From 483eb2f56657e8e7f419ab1a4fab8dce9ade8609 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sat, 27 Apr 2024 20:24:20 +0200 Subject: Adding upstream version 14.2.21. Signed-off-by: Daniel Baumann --- qa/.gitignore | 4 + qa/.teuthology_branch | 1 + qa/Makefile | 4 + qa/README | 64 + qa/archs/aarch64.yaml | 1 + qa/archs/armv7.yaml | 1 + qa/archs/i686.yaml | 1 + qa/archs/x86_64.yaml | 1 + qa/btrfs/.gitignore | 3 + qa/btrfs/Makefile | 11 + qa/btrfs/clone_range.c | 35 + qa/btrfs/create_async_snap.c | 34 + qa/btrfs/test_async_snap.c | 83 + qa/btrfs/test_rmdir_async_snap.c | 62 + qa/cephfs/.qa | 1 + qa/cephfs/begin.yaml | 10 + qa/cephfs/clusters/.qa | 1 + qa/cephfs/clusters/1-mds-1-client-coloc.yaml | 12 + qa/cephfs/clusters/1-mds-1-client-micro.yaml | 7 + qa/cephfs/clusters/1-mds-1-client.yaml | 13 + qa/cephfs/clusters/1-mds-2-client-coloc.yaml | 12 + qa/cephfs/clusters/1-mds-2-client-micro.yaml | 8 + qa/cephfs/clusters/1-mds-2-client.yaml | 14 + qa/cephfs/clusters/1-mds-3-client.yaml | 15 + qa/cephfs/clusters/1-mds-4-client-coloc.yaml | 12 + qa/cephfs/clusters/1-mds-4-client.yaml | 16 + qa/cephfs/clusters/1a3s-mds-1c-client.yaml | 12 + qa/cephfs/clusters/1a3s-mds-2c-client.yaml | 12 + qa/cephfs/clusters/3-mds.yaml | 17 + qa/cephfs/clusters/9-mds.yaml | 17 + qa/cephfs/clusters/fixed-2-ucephfs.yaml | 12 + qa/cephfs/conf/+ | 0 qa/cephfs/conf/.qa | 1 + qa/cephfs/conf/client.yaml | 7 + qa/cephfs/conf/mds.yaml | 11 + qa/cephfs/conf/mon.yaml | 5 + qa/cephfs/conf/osd.yaml | 5 + qa/cephfs/mount/.qa | 1 + qa/cephfs/mount/fuse.yaml | 2 + qa/cephfs/mount/kclient/% | 0 qa/cephfs/mount/kclient/.qa | 1 + qa/cephfs/mount/kclient/mount.yaml | 2 + qa/cephfs/mount/kclient/overrides/% | 0 qa/cephfs/mount/kclient/overrides/.qa | 1 + qa/cephfs/mount/kclient/overrides/distro/.qa | 1 + qa/cephfs/mount/kclient/overrides/distro/rhel/% | 0 qa/cephfs/mount/kclient/overrides/distro/rhel/.qa | 1 + .../kclient/overrides/distro/rhel/k-distro.yaml | 3 + .../kclient/overrides/distro/rhel/rhel_latest.yaml | 1 + .../mount/kclient/overrides/ms-die-on-skipped.yaml | 5 + qa/cephfs/objectstore-ec/.qa | 1 + qa/cephfs/objectstore-ec/bluestore-bitmap.yaml | 1 + .../objectstore-ec/bluestore-comp-ec-root.yaml | 28 + qa/cephfs/objectstore-ec/bluestore-comp.yaml | 23 + qa/cephfs/objectstore-ec/bluestore-ec-root.yaml | 42 + qa/cephfs/objectstore-ec/filestore-xfs.yaml | 15 + qa/cephfs/overrides/.qa | 1 + qa/cephfs/overrides/frag_enable.yaml | 9 + qa/cephfs/overrides/fuse/.qa | 1 + qa/cephfs/overrides/fuse/default-perm/% | 0 qa/cephfs/overrides/fuse/default-perm/.qa | 1 + qa/cephfs/overrides/fuse/default-perm/no.yaml | 5 + qa/cephfs/overrides/fuse/default-perm/yes.yaml | 5 + qa/cephfs/overrides/log-config.yaml | 3 + qa/cephfs/overrides/osd-asserts.yaml | 5 + qa/cephfs/overrides/session_timeout.yaml | 4 + qa/cephfs/overrides/whitelist_health.yaml | 12 + .../overrides/whitelist_wrongly_marked_down.yaml | 9 + qa/cephfs/tasks/.qa | 1 + .../tasks/cfuse_workunit_suites_blogbench.yaml | 9 + qa/cephfs/tasks/cfuse_workunit_suites_dbench.yaml | 5 + qa/cephfs/tasks/cfuse_workunit_suites_ffsb.yaml | 17 + .../tasks/cfuse_workunit_suites_fsstress.yaml | 6 + qa/cephfs/tasks/cfuse_workunit_trivial_sync.yaml | 4 + qa/cephfs/tasks/libcephfs_interface_tests.yaml | 14 + qa/client/30_subdir_mount.sh | 23 + qa/client/common.sh | 58 + qa/client/gen-1774.sh | 2068 +++++ qa/clusters/2-node-mgr.yaml | 10 + qa/clusters/extra-client.yaml | 14 + qa/clusters/fixed-1.yaml | 14 + qa/clusters/fixed-2.yaml | 12 + qa/clusters/fixed-3-cephfs.yaml | 16 + qa/clusters/fixed-3.yaml | 13 + qa/clusters/fixed-4.yaml | 10 + qa/config/rados.yaml | 10 + qa/crontab/teuthology-cronjobs | 182 + qa/debug/buildpackages.yaml | 6 + qa/debug/mds_client.yaml | 9 + qa/debug/mgr.yaml | 17 + qa/debug/openstack-15G.yaml | 3 + qa/debug/openstack-30G.yaml | 3 + qa/distros/a-supported-distro.yaml | 1 + qa/distros/all/centos.yaml | 1 + qa/distros/all/centos_6.3.yaml | 2 + qa/distros/all/centos_6.4.yaml | 2 + qa/distros/all/centos_6.5.yaml | 2 + qa/distros/all/centos_7.0.yaml | 2 + qa/distros/all/centos_7.1.yaml | 2 + qa/distros/all/centos_7.2.yaml | 2 + qa/distros/all/centos_7.3.yaml | 2 + qa/distros/all/centos_7.4.yaml | 2 + qa/distros/all/centos_7.5.yaml | 2 + qa/distros/all/centos_7.6.yaml | 2 + qa/distros/all/centos_7.8.yaml | 2 + qa/distros/all/debian_6.0.yaml | 2 + qa/distros/all/debian_7.0.yaml | 2 + qa/distros/all/debian_8.0.yaml | 2 + qa/distros/all/fedora_17.yaml | 2 + qa/distros/all/fedora_18.yaml | 2 + qa/distros/all/fedora_19.yaml | 2 + qa/distros/all/opensuse_12.2.yaml | 2 + qa/distros/all/opensuse_13.2.yaml | 2 + qa/distros/all/opensuse_15.0.yaml | 2 + qa/distros/all/opensuse_42.1.yaml | 2 + qa/distros/all/opensuse_42.2.yaml | 2 + qa/distros/all/opensuse_42.3.yaml | 2 + qa/distros/all/rhel_6.3.yaml | 2 + qa/distros/all/rhel_6.4.yaml | 2 + qa/distros/all/rhel_6.5.yaml | 2 + qa/distros/all/rhel_7.0.yaml | 2 + qa/distros/all/rhel_7.5.yaml | 2 + qa/distros/all/rhel_7.6.yaml | 2 + qa/distros/all/rhel_7.8.yaml | 2 + qa/distros/all/rhel_7.9.yaml | 2 + qa/distros/all/rhel_7.yaml | 1 + qa/distros/all/sle_12.2.yaml | 2 + qa/distros/all/ubuntu_12.04.yaml | 2 + qa/distros/all/ubuntu_12.10.yaml | 2 + qa/distros/all/ubuntu_14.04.yaml | 2 + qa/distros/all/ubuntu_14.04_aarch64.yaml | 3 + qa/distros/all/ubuntu_14.04_i686.yaml | 3 + qa/distros/all/ubuntu_16.04.yaml | 2 + qa/distros/all/ubuntu_18.04.yaml | 2 + qa/distros/supported-all-distro/centos_latest.yaml | 1 + qa/distros/supported-all-distro/rhel_7.yaml | 1 + qa/distros/supported-all-distro/ubuntu_16.04.yaml | 1 + qa/distros/supported-all-distro/ubuntu_latest.yaml | 1 + .../supported-random-distro$/centos_latest.yaml | 1 + qa/distros/supported-random-distro$/rhel_7.yaml | 1 + .../supported-random-distro$/ubuntu_16.04.yaml | 1 + .../supported-random-distro$/ubuntu_latest.yaml | 1 + qa/distros/supported/centos_latest.yaml | 1 + qa/distros/supported/rhel_latest.yaml | 1 + qa/distros/supported/ubuntu_latest.yaml | 1 + qa/erasure-code/ec-feature-plugins-v2.yaml | 98 + qa/erasure-code/ec-feature-plugins-v3.yaml | 98 + qa/erasure-code/ec-rados-default.yaml | 19 + qa/erasure-code/ec-rados-parallel.yaml | 20 + qa/erasure-code/ec-rados-plugin=clay-k=4-m=2.yaml | 25 + qa/erasure-code/ec-rados-plugin=isa-k=2-m=1.yaml | 26 + .../ec-rados-plugin=jerasure-k=2-m=1.yaml | 25 + .../ec-rados-plugin=jerasure-k=3-m=1.yaml | 31 + .../ec-rados-plugin=jerasure-k=4-m=2.yaml | 25 + .../ec-rados-plugin=lrc-k=4-m=2-l=3.yaml | 25 + .../ec-rados-plugin=shec-k=4-m=3-c=2.yaml | 25 + qa/erasure-code/ec-rados-sequential.yaml | 20 + qa/find-used-ports.sh | 3 + qa/libceph/Makefile | 11 + qa/libceph/trivial_libceph.c | 69 + qa/loopall.sh | 28 + qa/machine_types/schedule_rados_ovh.sh | 37 + qa/machine_types/schedule_subset.sh | 49 + qa/machine_types/vps.yaml | 14 + qa/mds/test_anchortable.sh | 27 + qa/mds/test_mdstable_failures.sh | 14 + qa/mon/bootstrap/host.sh | 29 + qa/mon/bootstrap/initial_members.sh | 39 + qa/mon/bootstrap/initial_members_asok.sh | 66 + qa/mon/bootstrap/simple.sh | 36 + qa/mon/bootstrap/simple_expand.sh | 60 + qa/mon/bootstrap/simple_expand_monmap.sh | 44 + qa/mon/bootstrap/simple_single_expand.sh | 54 + qa/mon/bootstrap/simple_single_expand2.sh | 40 + qa/mon/bootstrap/single_host.sh | 29 + qa/mon/bootstrap/single_host_multi.sh | 39 + qa/msgr/async-v1only.yaml | 7 + qa/msgr/async-v2only.yaml | 7 + qa/msgr/async.yaml | 5 + qa/msgr/random.yaml | 7 + qa/msgr/simple.yaml | 7 + qa/nightlies/cron_wrapper | 53 + qa/objectstore/bluestore-bitmap.yaml | 43 + qa/objectstore/bluestore-comp-lz4.yaml | 24 + qa/objectstore/bluestore-comp-snappy.yaml | 24 + qa/objectstore/bluestore-comp-zlib.yaml | 24 + qa/objectstore/bluestore-comp-zstd.yaml | 24 + qa/objectstore/bluestore-hybrid.yaml | 40 + qa/objectstore/bluestore-stupid.yaml | 43 + qa/objectstore/filestore-xfs.yaml | 15 + qa/objectstore_cephfs/bluestore-bitmap.yaml | 1 + qa/objectstore_cephfs/filestore-xfs.yaml | 1 + qa/overrides/2-size-1-min-size.yaml | 6 + qa/overrides/2-size-2-min-size.yaml | 8 + qa/overrides/3-size-2-min-size.yaml | 8 + qa/overrides/more-active-recovery.yaml | 6 + qa/overrides/no_client_pidfile.yaml | 5 + qa/overrides/short_pg_log.yaml | 7 + qa/overrides/whitelist_wrongly_marked_down.yaml | 10 + qa/packages/packages.yaml | 50 + qa/qa_scripts/cephscrub.sh | 30 + qa/qa_scripts/openstack/README | 32 + qa/qa_scripts/openstack/ceph_install.sh | 11 + .../openstack/ceph_install_w_ansible/README | 32 + .../ceph_install_w_ansible/ceph_install.sh | 39 + .../openstack/ceph_install_w_ansible/config | 5 + .../openstack/ceph_install_w_ansible/copy_func.sh | 1 + .../ceph_install_w_ansible/execs/cdn_setup.sh | 20 + .../ceph_install_w_ansible/execs/ceph_ansible.sh | 36 + .../execs/edit_ansible_hosts.sh | 17 + .../execs/edit_groupvars_osds.sh | 13 + .../ceph_install_w_ansible/multi_action.sh | 19 + .../openstack/ceph_install_w_ansible/repolocs.sh | 8 + .../openstack/ceph_install_w_ansible/staller.sh | 15 + .../openstack/ceph_install_w_ansible/talknice.sh | 29 + qa/qa_scripts/openstack/connectceph.sh | 44 + qa/qa_scripts/openstack/copy_func.sh | 22 + qa/qa_scripts/openstack/execs/ceph-pool-create.sh | 34 + qa/qa_scripts/openstack/execs/ceph_cluster.sh | 50 + qa/qa_scripts/openstack/execs/libvirt-secret.sh | 19 + .../openstack/execs/openstack-preinstall.sh | 17 + qa/qa_scripts/openstack/execs/run_openstack.sh | 23 + qa/qa_scripts/openstack/execs/start_openstack.sh | 15 + qa/qa_scripts/openstack/files/cinder.template.conf | 3481 +++++++ .../openstack/files/glance-api.template.conf | 1590 ++++ qa/qa_scripts/openstack/files/kilo.template.conf | 1077 +++ qa/qa_scripts/openstack/files/nova.template.conf | 3698 ++++++++ qa/qa_scripts/openstack/fix_conf_file.sh | 28 + qa/qa_scripts/openstack/image_create.sh | 16 + qa/qa_scripts/openstack/openstack.sh | 28 + qa/qa_scripts/openstack/packstack.sh | 20 + qa/rbd/common.sh | 103 + qa/rbd/krbd_blkroset.t | 364 + qa/rbd/krbd_deep_flatten.t | 329 + qa/rbd/krbd_discard.t | 398 + qa/rbd/krbd_discard_4M.t | 330 + qa/rbd/krbd_discard_512b.t | 416 + qa/rbd/krbd_discard_granularity.t | 40 + qa/rbd/krbd_get_features.t | 31 + qa/rbd/krbd_huge_image.t | 41 + qa/rbd/krbd_msgr_segments.t | 85 + qa/rbd/krbd_parent_overlap.t | 64 + qa/rbd/krbd_whole_object_zeroout.t | 143 + qa/rbd/krbd_zeroout.t | 422 + qa/rbd/rbd.sh | 50 + qa/releases/infernalis.yaml | 5 + qa/releases/jewel.yaml | 6 + qa/releases/kraken.yaml | 4 + qa/releases/luminous-with-mgr.yaml | 11 + qa/releases/luminous.yaml | 21 + qa/releases/mimic.yaml | 6 + qa/releases/nautilus.yaml | 6 + qa/rgw_frontend/beast.yaml | 3 + qa/rgw_frontend/civetweb.yaml | 3 + qa/rgw_pool_type/ec-profile.yaml | 10 + qa/rgw_pool_type/ec.yaml | 5 + qa/rgw_pool_type/replicated.yaml | 3 + qa/run-standalone.sh | 148 + qa/run_xfstests-obsolete.sh | 458 + qa/run_xfstests.sh | 323 + qa/run_xfstests_qemu.sh | 29 + qa/runallonce.sh | 25 + qa/runoncfuse.sh | 8 + qa/runonkclient.sh | 9 + qa/setup-chroot.sh | 65 + qa/standalone/README | 23 + qa/standalone/ceph-helpers.sh | 2285 +++++ qa/standalone/crush/crush-choose-args.sh | 243 + qa/standalone/crush/crush-classes.sh | 237 + .../erasure-code/test-erasure-code-plugins.sh | 118 + qa/standalone/erasure-code/test-erasure-code.sh | 333 + qa/standalone/erasure-code/test-erasure-eio.sh | 670 ++ qa/standalone/mgr/balancer.sh | 221 + qa/standalone/misc/network-ping.sh | 145 + qa/standalone/misc/ok-to-stop.sh | 289 + qa/standalone/misc/rados-striper.sh | 101 + qa/standalone/misc/test-ceph-helpers.sh | 21 + qa/standalone/mon/misc.sh | 262 + qa/standalone/mon/mkfs.sh | 193 + qa/standalone/mon/mon-bind.sh | 147 + qa/standalone/mon/mon-created-time.sh | 54 + qa/standalone/mon/mon-handle-forward.sh | 64 + qa/standalone/mon/mon-last-epoch-clean.sh | 307 + qa/standalone/mon/mon-osdmap-prune.sh | 57 + qa/standalone/mon/mon-ping.sh | 46 + qa/standalone/mon/mon-scrub.sh | 49 + qa/standalone/mon/mon-seesaw.sh | 72 + qa/standalone/mon/msgr-v2-transition.sh | 82 + qa/standalone/mon/osd-crush.sh | 239 + qa/standalone/mon/osd-erasure-code-profile.sh | 240 + qa/standalone/mon/osd-pool-create.sh | 328 + qa/standalone/mon/osd-pool-df.sh | 75 + qa/standalone/mon/test_pool_quota.sh | 63 + qa/standalone/osd/bad-inc-map.sh | 62 + qa/standalone/osd/divergent-priors.sh | 840 ++ qa/standalone/osd/ec-error-rollforward.sh | 66 + qa/standalone/osd/osd-backfill-prio.sh | 519 ++ qa/standalone/osd/osd-backfill-recovery-log.sh | 136 + qa/standalone/osd/osd-backfill-space.sh | 1175 +++ qa/standalone/osd/osd-backfill-stats.sh | 753 ++ qa/standalone/osd/osd-bench.sh | 96 + qa/standalone/osd/osd-bluefs-volume-ops.sh | 346 + qa/standalone/osd/osd-config.sh | 97 + qa/standalone/osd/osd-copy-from.sh | 68 + qa/standalone/osd/osd-dup.sh | 83 + qa/standalone/osd/osd-fast-mark-down.sh | 116 + qa/standalone/osd/osd-force-create-pg.sh | 52 + qa/standalone/osd/osd-markdown.sh | 131 + qa/standalone/osd/osd-reactivate.sh | 56 + qa/standalone/osd/osd-recovery-prio.sh | 515 ++ qa/standalone/osd/osd-recovery-space.sh | 175 + qa/standalone/osd/osd-recovery-stats.sh | 512 ++ qa/standalone/osd/osd-rep-recov-eio.sh | 476 + qa/standalone/osd/osd-reuse-id.sh | 52 + qa/standalone/osd/pg-split-merge.sh | 204 + qa/standalone/osd/repro_long_log.sh | 152 + qa/standalone/scrub/osd-recovery-scrub.sh | 132 + qa/standalone/scrub/osd-scrub-dump.sh | 173 + qa/standalone/scrub/osd-scrub-repair.sh | 6231 +++++++++++++ qa/standalone/scrub/osd-scrub-snaps.sh | 1274 +++ qa/standalone/scrub/osd-scrub-test.sh | 319 + qa/standalone/scrub/osd-unexpected-clone.sh | 89 + qa/standalone/special/ceph_objectstore_tool.py | 2080 +++++ qa/standalone/special/test-failure.sh | 48 + qa/suites/.qa | 1 + qa/suites/big/.qa | 1 + qa/suites/big/rados-thrash/% | 0 qa/suites/big/rados-thrash/.qa | 1 + qa/suites/big/rados-thrash/ceph/.qa | 1 + qa/suites/big/rados-thrash/ceph/ceph.yaml | 3 + qa/suites/big/rados-thrash/clusters/.qa | 1 + qa/suites/big/rados-thrash/clusters/big.yaml | 68 + qa/suites/big/rados-thrash/clusters/medium.yaml | 22 + qa/suites/big/rados-thrash/clusters/small.yaml | 6 + qa/suites/big/rados-thrash/objectstore | 1 + qa/suites/big/rados-thrash/openstack.yaml | 8 + qa/suites/big/rados-thrash/thrashers/.qa | 1 + qa/suites/big/rados-thrash/thrashers/default.yaml | 11 + qa/suites/big/rados-thrash/workloads/.qa | 1 + .../rados-thrash/workloads/snaps-few-objects.yaml | 13 + qa/suites/buildpackages/.qa | 1 + qa/suites/buildpackages/any/% | 0 qa/suites/buildpackages/any/.qa | 1 + qa/suites/buildpackages/any/distros | 1 + qa/suites/buildpackages/any/tasks/.qa | 1 + qa/suites/buildpackages/any/tasks/release.yaml | 8 + qa/suites/buildpackages/tests/% | 0 qa/suites/buildpackages/tests/.qa | 1 + qa/suites/buildpackages/tests/distros | 1 + qa/suites/buildpackages/tests/tasks/.qa | 1 + qa/suites/buildpackages/tests/tasks/release.yaml | 20 + qa/suites/ceph-ansible/.qa | 1 + qa/suites/ceph-ansible/smoke/.qa | 1 + qa/suites/ceph-ansible/smoke/basic/% | 0 qa/suites/ceph-ansible/smoke/basic/.qa | 1 + qa/suites/ceph-ansible/smoke/basic/0-clusters/.qa | 1 + .../smoke/basic/0-clusters/3-node.yaml | 12 + .../smoke/basic/0-clusters/4-node.yaml | 13 + qa/suites/ceph-ansible/smoke/basic/1-distros/.qa | 1 + .../smoke/basic/1-distros/centos_latest.yaml | 1 + .../smoke/basic/1-distros/ubuntu_latest.yaml | 1 + qa/suites/ceph-ansible/smoke/basic/2-ceph/.qa | 1 + .../smoke/basic/2-ceph/ceph_ansible.yaml | 35 + qa/suites/ceph-ansible/smoke/basic/3-config/.qa | 1 + .../basic/3-config/bluestore_with_dmcrypt.yaml | 8 + .../smoke/basic/3-config/dmcrypt_off.yaml | 7 + .../smoke/basic/3-config/dmcrypt_on.yaml | 7 + qa/suites/ceph-ansible/smoke/basic/4-tasks/.qa | 1 + .../smoke/basic/4-tasks/ceph-admin-commands.yaml | 7 + .../smoke/basic/4-tasks/rbd_import_export.yaml | 7 + .../ceph-ansible/smoke/basic/4-tasks/rest.yaml | 15 + qa/suites/ceph-deploy/% | 0 qa/suites/ceph-deploy/.qa | 1 + qa/suites/ceph-deploy/cluster/.qa | 1 + qa/suites/ceph-deploy/cluster/4node.yaml | 15 + qa/suites/ceph-deploy/config/.qa | 1 + .../ceph-deploy/config/ceph_volume_bluestore.yaml | 7 + .../config/ceph_volume_bluestore_dmcrypt.yaml | 8 + .../config/ceph_volume_dmcrypt_off.yaml | 3 + .../ceph-deploy/config/ceph_volume_filestore.yaml | 4 + qa/suites/ceph-deploy/distros/.qa | 1 + qa/suites/ceph-deploy/distros/centos_latest.yaml | 1 + qa/suites/ceph-deploy/distros/ubuntu_latest.yaml | 1 + qa/suites/ceph-deploy/python_versions/.qa | 1 + .../ceph-deploy/python_versions/python_2.yaml | 3 + .../ceph-deploy/python_versions/python_3.yaml | 3 + qa/suites/ceph-deploy/tasks/.qa | 1 + .../ceph-deploy/tasks/ceph-admin-commands.yaml | 12 + qa/suites/ceph-deploy/tasks/rbd_import_export.yaml | 9 + qa/suites/cephmetrics/% | 0 qa/suites/cephmetrics/.qa | 1 + qa/suites/cephmetrics/0-clusters/.qa | 1 + qa/suites/cephmetrics/0-clusters/3-node.yaml | 11 + qa/suites/cephmetrics/1-distros/.qa | 1 + qa/suites/cephmetrics/1-distros/centos_latest.yaml | 1 + qa/suites/cephmetrics/1-distros/ubuntu_latest.yaml | 1 + qa/suites/cephmetrics/2-ceph/.qa | 1 + qa/suites/cephmetrics/2-ceph/ceph_ansible.yaml | 32 + qa/suites/cephmetrics/3-ceph-config/.qa | 1 + .../3-ceph-config/bluestore_with_dmcrypt.yaml | 8 + .../3-ceph-config/bluestore_without_dmcrypt.yaml | 8 + .../cephmetrics/3-ceph-config/dmcrypt_off.yaml | 7 + .../cephmetrics/3-ceph-config/dmcrypt_on.yaml | 7 + qa/suites/cephmetrics/4-epel/.qa | 1 + qa/suites/cephmetrics/4-epel/no_epel.yaml | 7 + qa/suites/cephmetrics/4-epel/use_epel.yaml | 7 + qa/suites/cephmetrics/5-containers/.qa | 1 + .../cephmetrics/5-containers/containerized.yaml | 10 + .../cephmetrics/5-containers/no_containers.yaml | 10 + qa/suites/cephmetrics/6-tasks/.qa | 1 + qa/suites/cephmetrics/6-tasks/cephmetrics.yaml | 4 + qa/suites/dummy/% | 0 qa/suites/dummy/.qa | 1 + qa/suites/dummy/all/.qa | 1 + qa/suites/dummy/all/nop.yaml | 6 + qa/suites/experimental/.qa | 1 + qa/suites/experimental/multimds/% | 0 qa/suites/experimental/multimds/.qa | 1 + qa/suites/experimental/multimds/clusters/.qa | 1 + .../experimental/multimds/clusters/7-multimds.yaml | 8 + qa/suites/experimental/multimds/tasks/.qa | 1 + .../multimds/tasks/fsstress_thrash_subtrees.yaml | 15 + qa/suites/fs/.qa | 1 + qa/suites/fs/32bits/% | 0 qa/suites/fs/32bits/.qa | 1 + qa/suites/fs/32bits/begin.yaml | 1 + qa/suites/fs/32bits/clusters/.qa | 1 + qa/suites/fs/32bits/clusters/fixed-2-ucephfs.yaml | 1 + qa/suites/fs/32bits/conf | 1 + qa/suites/fs/32bits/mount/.qa | 1 + qa/suites/fs/32bits/mount/fuse.yaml | 1 + qa/suites/fs/32bits/objectstore-ec | 1 + qa/suites/fs/32bits/overrides/+ | 0 qa/suites/fs/32bits/overrides/.qa | 1 + qa/suites/fs/32bits/overrides/faked-ino.yaml | 5 + qa/suites/fs/32bits/overrides/frag_enable.yaml | 1 + .../fs/32bits/overrides/whitelist_health.yaml | 1 + .../overrides/whitelist_wrongly_marked_down.yaml | 1 + qa/suites/fs/32bits/supported-random-distros$ | 1 + qa/suites/fs/32bits/tasks/.qa | 1 + .../tasks/cfuse_workunit_suites_fsstress.yaml | 1 + .../fs/32bits/tasks/cfuse_workunit_suites_pjd.yaml | 12 + qa/suites/fs/basic_functional/% | 0 qa/suites/fs/basic_functional/.qa | 1 + qa/suites/fs/basic_functional/begin.yaml | 1 + qa/suites/fs/basic_functional/clusters/.qa | 1 + .../clusters/1-mds-4-client-coloc.yaml | 1 + qa/suites/fs/basic_functional/conf | 1 + qa/suites/fs/basic_functional/mount/.qa | 1 + qa/suites/fs/basic_functional/mount/fuse.yaml | 1 + qa/suites/fs/basic_functional/objectstore/.qa | 1 + .../objectstore/bluestore-bitmap.yaml | 1 + .../objectstore/bluestore-ec-root.yaml | 1 + qa/suites/fs/basic_functional/overrides/+ | 0 qa/suites/fs/basic_functional/overrides/.qa | 1 + .../fs/basic_functional/overrides/frag_enable.yaml | 1 + .../overrides/no_client_pidfile.yaml | 1 + .../overrides/whitelist_health.yaml | 1 + .../overrides/whitelist_wrongly_marked_down.yaml | 1 + .../fs/basic_functional/supported-random-distros$ | 1 + qa/suites/fs/basic_functional/tasks/.qa | 1 + qa/suites/fs/basic_functional/tasks/admin.yaml | 11 + .../fs/basic_functional/tasks/alternate-pool.yaml | 20 + .../fs/basic_functional/tasks/asok_dump_tree.yaml | 4 + .../fs/basic_functional/tasks/auto-repair.yaml | 13 + qa/suites/fs/basic_functional/tasks/backtrace.yaml | 5 + qa/suites/fs/basic_functional/tasks/cap-flush.yaml | 8 + .../fs/basic_functional/tasks/cephfs-shell.yaml | 8 + .../basic_functional/tasks/cephfs_scrub_tests.yaml | 19 + .../tasks/cfuse_workunit_quota.yaml | 5 + .../fs/basic_functional/tasks/client-limits.yaml | 19 + .../fs/basic_functional/tasks/client-readahad.yaml | 4 + .../fs/basic_functional/tasks/client-recovery.yaml | 17 + qa/suites/fs/basic_functional/tasks/damage.yaml | 27 + qa/suites/fs/basic_functional/tasks/data-scan.yaml | 20 + .../fs/basic_functional/tasks/forward-scrub.yaml | 14 + qa/suites/fs/basic_functional/tasks/fragment.yaml | 5 + .../fs/basic_functional/tasks/journal-repair.yaml | 14 + .../basic_functional/tasks/libcephfs_python.yaml | 10 + qa/suites/fs/basic_functional/tasks/mds-flush.yaml | 5 + qa/suites/fs/basic_functional/tasks/mds-full.yaml | 37 + .../basic_functional/tasks/mds_creation_retry.yaml | 6 + .../fs/basic_functional/tasks/openfiletable.yaml | 5 + qa/suites/fs/basic_functional/tasks/pool-perm.yaml | 5 + qa/suites/fs/basic_functional/tasks/quota.yaml | 5 + qa/suites/fs/basic_functional/tasks/sessionmap/+ | 0 qa/suites/fs/basic_functional/tasks/sessionmap/.qa | 1 + .../tasks/sessionmap/sessionmap.yaml | 10 + qa/suites/fs/basic_functional/tasks/strays.yaml | 5 + .../tasks/test_journal_migration.yaml | 5 + .../fs/basic_functional/tasks/volume-client/% | 0 .../fs/basic_functional/tasks/volume-client/.qa | 1 + .../basic_functional/tasks/volume-client/task/.qa | 1 + .../tasks/volume-client/task/test/+ | 0 .../tasks/volume-client/task/test/.qa | 1 + .../tasks/volume-client/task/test/test.yaml | 8 + qa/suites/fs/basic_functional/tasks/volumes.yaml | 20 + qa/suites/fs/basic_workload/% | 0 qa/suites/fs/basic_workload/.qa | 1 + qa/suites/fs/basic_workload/begin.yaml | 1 + qa/suites/fs/basic_workload/clusters/.qa | 1 + .../basic_workload/clusters/fixed-2-ucephfs.yaml | 1 + qa/suites/fs/basic_workload/conf | 1 + qa/suites/fs/basic_workload/inline/.qa | 1 + qa/suites/fs/basic_workload/inline/no.yaml | 0 qa/suites/fs/basic_workload/inline/yes.yaml | 4 + qa/suites/fs/basic_workload/mount/.qa | 1 + qa/suites/fs/basic_workload/mount/fuse.yaml | 1 + qa/suites/fs/basic_workload/objectstore-ec | 1 + qa/suites/fs/basic_workload/omap_limit/.qa | 1 + qa/suites/fs/basic_workload/omap_limit/10.yaml | 5 + qa/suites/fs/basic_workload/omap_limit/10000.yaml | 5 + qa/suites/fs/basic_workload/overrides/+ | 0 qa/suites/fs/basic_workload/overrides/.qa | 1 + .../fs/basic_workload/overrides/frag_enable.yaml | 1 + .../basic_workload/overrides/session_timeout.yaml | 1 + .../basic_workload/overrides/whitelist_health.yaml | 1 + .../overrides/whitelist_wrongly_marked_down.yaml | 1 + .../fs/basic_workload/supported-random-distros$ | 1 + qa/suites/fs/basic_workload/tasks/.qa | 1 + .../tasks/cfuse_workunit_kernel_untar_build.yaml | 14 + .../basic_workload/tasks/cfuse_workunit_misc.yaml | 10 + .../tasks/cfuse_workunit_misc_test_o_trunc.yaml | 5 + .../tasks/cfuse_workunit_norstats.yaml | 15 + .../tasks/cfuse_workunit_suites_blogbench.yaml | 1 + .../tasks/cfuse_workunit_suites_dbench.yaml | 1 + .../tasks/cfuse_workunit_suites_ffsb.yaml | 1 + .../tasks/cfuse_workunit_suites_fsstress.yaml | 1 + .../tasks/cfuse_workunit_suites_fsx.yaml | 9 + .../tasks/cfuse_workunit_suites_fsync.yaml | 5 + .../tasks/cfuse_workunit_suites_iogen.yaml | 6 + .../tasks/cfuse_workunit_suites_iozone.yaml | 5 + .../tasks/cfuse_workunit_suites_pjd.yaml | 12 + .../cfuse_workunit_suites_truncate_delay.yaml | 14 + .../tasks/cfuse_workunit_trivial_sync.yaml | 1 + .../tasks/libcephfs_interface_tests.yaml | 1 + qa/suites/fs/bugs/.qa | 1 + qa/suites/fs/bugs/client_trim_caps/% | 0 qa/suites/fs/bugs/client_trim_caps/.qa | 1 + qa/suites/fs/bugs/client_trim_caps/begin.yaml | 1 + qa/suites/fs/bugs/client_trim_caps/clusters/.qa | 1 + .../client_trim_caps/clusters/small-cluster.yaml | 11 + qa/suites/fs/bugs/client_trim_caps/conf | 1 + qa/suites/fs/bugs/client_trim_caps/objectstore/.qa | 1 + .../objectstore/bluestore-bitmap.yaml | 1 + qa/suites/fs/bugs/client_trim_caps/overrides/+ | 0 qa/suites/fs/bugs/client_trim_caps/overrides/.qa | 1 + .../client_trim_caps/overrides/frag_enable.yaml | 1 + .../overrides/no_client_pidfile.yaml | 1 + .../overrides/whitelist_health.yaml | 1 + .../overrides/whitelist_wrongly_marked_down.yaml | 1 + qa/suites/fs/bugs/client_trim_caps/tasks/.qa | 1 + .../bugs/client_trim_caps/tasks/trim-i22073.yaml | 20 + qa/suites/fs/multiclient/% | 0 qa/suites/fs/multiclient/.qa | 1 + qa/suites/fs/multiclient/begin.yaml | 1 + qa/suites/fs/multiclient/clusters/.qa | 1 + .../fs/multiclient/clusters/1-mds-2-client.yaml | 1 + .../fs/multiclient/clusters/1-mds-3-client.yaml | 1 + qa/suites/fs/multiclient/conf | 1 + qa/suites/fs/multiclient/distros/.qa | 1 + .../fs/multiclient/distros/ubuntu_latest.yaml | 1 + qa/suites/fs/multiclient/mount/.qa | 1 + qa/suites/fs/multiclient/mount/fuse.yaml | 1 + .../fs/multiclient/mount/kclient.yaml.disabled | 7 + qa/suites/fs/multiclient/objectstore-ec | 1 + qa/suites/fs/multiclient/overrides/+ | 0 qa/suites/fs/multiclient/overrides/.qa | 1 + .../fs/multiclient/overrides/frag_enable.yaml | 1 + .../fs/multiclient/overrides/whitelist_health.yaml | 1 + .../overrides/whitelist_wrongly_marked_down.yaml | 1 + qa/suites/fs/multiclient/tasks/.qa | 1 + .../fs/multiclient/tasks/cephfs_misc_tests.yaml | 13 + .../fs/multiclient/tasks/fsx-mpi.yaml.disabled | 17 + .../fs/multiclient/tasks/ior-shared-file.yaml | 23 + qa/suites/fs/multiclient/tasks/mdtest.yaml | 20 + qa/suites/fs/multifs/% | 0 qa/suites/fs/multifs/.qa | 1 + qa/suites/fs/multifs/begin.yaml | 1 + qa/suites/fs/multifs/clusters/.qa | 1 + .../fs/multifs/clusters/1a3s-mds-2c-client.yaml | 1 + qa/suites/fs/multifs/conf | 1 + qa/suites/fs/multifs/mount/.qa | 1 + qa/suites/fs/multifs/mount/fuse.yaml | 1 + qa/suites/fs/multifs/objectstore-ec | 1 + qa/suites/fs/multifs/overrides/+ | 0 qa/suites/fs/multifs/overrides/.qa | 1 + qa/suites/fs/multifs/overrides/frag_enable.yaml | 1 + qa/suites/fs/multifs/overrides/mon-debug.yaml | 5 + .../fs/multifs/overrides/whitelist_health.yaml | 1 + .../overrides/whitelist_wrongly_marked_down.yaml | 1 + qa/suites/fs/multifs/supported-random-distros$ | 1 + qa/suites/fs/multifs/tasks/.qa | 1 + qa/suites/fs/multifs/tasks/failover.yaml | 14 + qa/suites/fs/permission/% | 0 qa/suites/fs/permission/.qa | 1 + qa/suites/fs/permission/begin.yaml | 1 + qa/suites/fs/permission/clusters/.qa | 1 + .../fs/permission/clusters/fixed-2-ucephfs.yaml | 1 + qa/suites/fs/permission/conf | 1 + qa/suites/fs/permission/mount/.qa | 1 + qa/suites/fs/permission/mount/fuse.yaml | 1 + qa/suites/fs/permission/objectstore-ec | 1 + qa/suites/fs/permission/overrides/+ | 0 qa/suites/fs/permission/overrides/.qa | 1 + qa/suites/fs/permission/overrides/frag_enable.yaml | 1 + .../fs/permission/overrides/whitelist_health.yaml | 1 + .../overrides/whitelist_wrongly_marked_down.yaml | 1 + qa/suites/fs/permission/supported-random-distros$ | 1 + qa/suites/fs/permission/tasks/.qa | 1 + .../fs/permission/tasks/cfuse_workunit_misc.yaml | 12 + .../tasks/cfuse_workunit_suites_pjd.yaml | 13 + qa/suites/fs/snaps/% | 0 qa/suites/fs/snaps/.qa | 1 + qa/suites/fs/snaps/begin.yaml | 1 + qa/suites/fs/snaps/clusters/.qa | 1 + qa/suites/fs/snaps/clusters/fixed-2-ucephfs.yaml | 1 + qa/suites/fs/snaps/conf | 1 + qa/suites/fs/snaps/mount/.qa | 1 + qa/suites/fs/snaps/mount/fuse.yaml | 1 + qa/suites/fs/snaps/objectstore-ec | 1 + qa/suites/fs/snaps/overrides/+ | 0 qa/suites/fs/snaps/overrides/.qa | 1 + qa/suites/fs/snaps/overrides/frag_enable.yaml | 1 + qa/suites/fs/snaps/overrides/whitelist_health.yaml | 1 + .../overrides/whitelist_wrongly_marked_down.yaml | 1 + qa/suites/fs/snaps/supported-random-distros$ | 1 + qa/suites/fs/snaps/tasks/.qa | 1 + qa/suites/fs/snaps/tasks/snaptests.yaml | 5 + qa/suites/fs/thrash/% | 0 qa/suites/fs/thrash/.qa | 1 + qa/suites/fs/thrash/begin.yaml | 1 + qa/suites/fs/thrash/ceph-thrash/.qa | 1 + qa/suites/fs/thrash/ceph-thrash/default.yaml | 7 + qa/suites/fs/thrash/clusters/.qa | 1 + .../fs/thrash/clusters/1-mds-1-client-coloc.yaml | 1 + qa/suites/fs/thrash/conf | 1 + qa/suites/fs/thrash/mount/.qa | 1 + qa/suites/fs/thrash/mount/fuse.yaml | 1 + qa/suites/fs/thrash/msgr-failures/.qa | 1 + qa/suites/fs/thrash/msgr-failures/none.yaml | 0 .../fs/thrash/msgr-failures/osd-mds-delay.yaml | 10 + qa/suites/fs/thrash/objectstore-ec | 1 + qa/suites/fs/thrash/overrides/+ | 0 qa/suites/fs/thrash/overrides/.qa | 1 + qa/suites/fs/thrash/overrides/frag_enable.yaml | 1 + qa/suites/fs/thrash/overrides/session_timeout.yaml | 1 + .../fs/thrash/overrides/whitelist_health.yaml | 1 + .../overrides/whitelist_wrongly_marked_down.yaml | 1 + qa/suites/fs/thrash/supported-random-distros$ | 1 + qa/suites/fs/thrash/tasks/.qa | 1 + .../fs/thrash/tasks/cfuse_workunit_snaptests.yaml | 5 + .../tasks/cfuse_workunit_suites_fsstress.yaml | 1 + .../fs/thrash/tasks/cfuse_workunit_suites_pjd.yaml | 12 + .../thrash/tasks/cfuse_workunit_trivial_sync.yaml | 1 + qa/suites/fs/traceless/% | 0 qa/suites/fs/traceless/.qa | 1 + qa/suites/fs/traceless/begin.yaml | 1 + qa/suites/fs/traceless/clusters/.qa | 1 + .../fs/traceless/clusters/fixed-2-ucephfs.yaml | 1 + qa/suites/fs/traceless/conf | 1 + qa/suites/fs/traceless/mount/.qa | 1 + qa/suites/fs/traceless/mount/fuse.yaml | 1 + qa/suites/fs/traceless/objectstore-ec | 1 + qa/suites/fs/traceless/overrides/+ | 0 qa/suites/fs/traceless/overrides/.qa | 1 + qa/suites/fs/traceless/overrides/frag_enable.yaml | 1 + .../fs/traceless/overrides/whitelist_health.yaml | 1 + .../overrides/whitelist_wrongly_marked_down.yaml | 1 + qa/suites/fs/traceless/supported-random-distros$ | 1 + qa/suites/fs/traceless/tasks/.qa | 1 + .../tasks/cfuse_workunit_suites_blogbench.yaml | 1 + .../tasks/cfuse_workunit_suites_dbench.yaml | 1 + .../tasks/cfuse_workunit_suites_ffsb.yaml | 1 + .../tasks/cfuse_workunit_suites_fsstress.yaml | 1 + qa/suites/fs/traceless/traceless/.qa | 1 + qa/suites/fs/traceless/traceless/50pc.yaml | 5 + qa/suites/fs/upgrade/.qa | 1 + qa/suites/fs/upgrade/featureful_client/.qa | 1 + .../fs/upgrade/featureful_client/old_client/% | 0 .../fs/upgrade/featureful_client/old_client/.qa | 1 + .../old_client/bluestore-bitmap.yaml | 1 + .../featureful_client/old_client/clusters/.qa | 1 + .../old_client/clusters/1-mds-2-client-micro.yaml | 1 + .../fs/upgrade/featureful_client/old_client/conf | 1 + .../featureful_client/old_client/overrides/% | 0 .../featureful_client/old_client/overrides/.qa | 1 + .../old_client/overrides/frag_enable.yaml | 1 + .../old_client/overrides/multimds/no.yaml | 4 + .../old_client/overrides/multimds/yes.yaml | 4 + .../old_client/overrides/whitelist_health.yaml | 1 + .../overrides/whitelist_wrongly_marked_down.yaml | 1 + .../upgrade/featureful_client/old_client/tasks/% | 0 .../upgrade/featureful_client/old_client/tasks/.qa | 1 + .../old_client/tasks/0-luminous.yaml | 41 + .../old_client/tasks/1-client.yaml | 8 + .../old_client/tasks/2-upgrade.yaml | 56 + .../old_client/tasks/3-compat_client/mimic.yaml | 10 + .../old_client/tasks/3-compat_client/no.yaml | 6 + .../fs/upgrade/featureful_client/upgraded_client/% | 0 .../upgrade/featureful_client/upgraded_client/.qa | 1 + .../upgraded_client/bluestore-bitmap.yaml | 1 + .../featureful_client/upgraded_client/clusters/.qa | 1 + .../clusters/1-mds-2-client-micro.yaml | 1 + .../upgrade/featureful_client/upgraded_client/conf | 1 + .../featureful_client/upgraded_client/overrides/% | 0 .../upgraded_client/overrides/.qa | 1 + .../upgraded_client/overrides/frag_enable.yaml | 1 + .../upgraded_client/overrides/multimds/no.yaml | 4 + .../upgraded_client/overrides/multimds/yes.yaml | 4 + .../overrides/whitelist_health.yaml | 1 + .../overrides/whitelist_wrongly_marked_down.yaml | 1 + .../featureful_client/upgraded_client/tasks/% | 0 .../featureful_client/upgraded_client/tasks/.qa | 1 + .../upgraded_client/tasks/0-luminous.yaml | 41 + .../upgraded_client/tasks/1-client.yaml | 11 + .../upgraded_client/tasks/2-upgrade.yaml | 56 + .../upgraded_client/tasks/3-client-upgrade.yaml | 14 + .../upgraded_client/tasks/4-compat_client.yaml | 13 + .../upgraded_client/tasks/5-client-sanity.yaml | 6 + qa/suites/fs/upgrade/snaps/% | 0 qa/suites/fs/upgrade/snaps/.qa | 1 + qa/suites/fs/upgrade/snaps/clusters/.qa | 1 + qa/suites/fs/upgrade/snaps/clusters/3-mds.yaml | 1 + qa/suites/fs/upgrade/snaps/conf | 1 + qa/suites/fs/upgrade/snaps/objectstore-ec | 1 + qa/suites/fs/upgrade/snaps/overrides/% | 0 qa/suites/fs/upgrade/snaps/overrides/.qa | 1 + .../fs/upgrade/snaps/overrides/frag_enable.yaml | 1 + qa/suites/fs/upgrade/snaps/overrides/multimds/.qa | 1 + .../fs/upgrade/snaps/overrides/multimds/no.yaml | 3 + .../fs/upgrade/snaps/overrides/multimds/yes.yaml | 3 + .../upgrade/snaps/overrides/whitelist_health.yaml | 1 + .../upgrade/snaps/overrides/whitelist_rstat.yaml | 8 + .../overrides/whitelist_wrongly_marked_down.yaml | 1 + qa/suites/fs/upgrade/snaps/tasks/% | 0 qa/suites/fs/upgrade/snaps/tasks/.qa | 1 + qa/suites/fs/upgrade/snaps/tasks/0-luminous.yaml | 41 + qa/suites/fs/upgrade/snaps/tasks/1-client.yaml | 13 + qa/suites/fs/upgrade/snaps/tasks/2-upgrade.yaml | 19 + qa/suites/fs/upgrade/snaps/tasks/3-sanity.yaml | 10 + .../fs/upgrade/snaps/tasks/4-client-upgrade/.qa | 1 + .../upgrade/snaps/tasks/4-client-upgrade/no.yaml | 0 .../upgrade/snaps/tasks/4-client-upgrade/yes.yaml | 10 + .../fs/upgrade/snaps/tasks/5-client-sanity.yaml | 10 + .../fs/upgrade/snaps/tasks/6-snap-upgrade.yaml | 16 + .../fs/upgrade/snaps/tasks/7-client-sanity.yaml | 1 + qa/suites/fs/upgrade/volumes/.qa | 1 + qa/suites/fs/upgrade/volumes/import-legacy/% | 0 qa/suites/fs/upgrade/volumes/import-legacy/.qa | 1 + .../volumes/import-legacy/bluestore-bitmap.yaml | 1 + .../fs/upgrade/volumes/import-legacy/clusters/.qa | 1 + .../clusters/1-mds-2-client-micro.yaml | 7 + qa/suites/fs/upgrade/volumes/import-legacy/conf | 1 + .../fs/upgrade/volumes/import-legacy/overrides/+ | 0 .../fs/upgrade/volumes/import-legacy/overrides/.qa | 1 + .../import-legacy/overrides/frag_enable.yaml | 1 + .../volumes/import-legacy/overrides/pg-warn.yaml | 5 + .../import-legacy/overrides/whitelist_health.yaml | 1 + .../overrides/whitelist_wrongly_marked_down.yaml | 1 + qa/suites/fs/upgrade/volumes/import-legacy/tasks/% | 0 .../fs/upgrade/volumes/import-legacy/tasks/.qa | 1 + .../volumes/import-legacy/tasks/0-mimic.yaml | 42 + .../volumes/import-legacy/tasks/1-client.yaml | 33 + .../volumes/import-legacy/tasks/2-upgrade.yaml | 54 + .../volumes/import-legacy/tasks/3-verify.yaml | 25 + .../volumes/import-legacy/ubuntu_18.04.yaml | 1 + qa/suites/fs/verify/% | 0 qa/suites/fs/verify/.qa | 1 + qa/suites/fs/verify/begin.yaml | 1 + qa/suites/fs/verify/centos_latest.yaml | 1 + qa/suites/fs/verify/clusters/.qa | 1 + qa/suites/fs/verify/clusters/fixed-2-ucephfs.yaml | 1 + qa/suites/fs/verify/conf | 1 + qa/suites/fs/verify/mount/.qa | 1 + qa/suites/fs/verify/mount/fuse.yaml | 1 + qa/suites/fs/verify/objectstore-ec | 1 + qa/suites/fs/verify/overrides/+ | 0 qa/suites/fs/verify/overrides/.qa | 1 + qa/suites/fs/verify/overrides/frag_enable.yaml | 1 + qa/suites/fs/verify/overrides/mon-debug.yaml | 6 + qa/suites/fs/verify/overrides/session_timeout.yaml | 1 + .../fs/verify/overrides/whitelist_health.yaml | 1 + .../overrides/whitelist_wrongly_marked_down.yaml | 1 + qa/suites/fs/verify/tasks/.qa | 1 + .../verify/tasks/cfuse_workunit_suites_dbench.yaml | 1 + .../tasks/cfuse_workunit_suites_fsstress.yaml | 1 + qa/suites/fs/verify/validater/.qa | 1 + qa/suites/fs/verify/validater/lockdep.yaml | 5 + qa/suites/fs/verify/validater/valgrind.yaml | 29 + qa/suites/hadoop/.qa | 1 + qa/suites/hadoop/basic/% | 0 qa/suites/hadoop/basic/.qa | 1 + qa/suites/hadoop/basic/clusters/.qa | 1 + qa/suites/hadoop/basic/clusters/fixed-3.yaml | 13 + qa/suites/hadoop/basic/distros/.qa | 1 + qa/suites/hadoop/basic/distros/ubuntu_latest.yaml | 1 + qa/suites/hadoop/basic/filestore-xfs.yaml | 1 + qa/suites/hadoop/basic/tasks/.qa | 1 + qa/suites/hadoop/basic/tasks/repl.yaml | 8 + qa/suites/hadoop/basic/tasks/terasort.yaml | 10 + qa/suites/hadoop/basic/tasks/wordcount.yaml | 8 + qa/suites/kcephfs/.qa | 1 + qa/suites/kcephfs/cephfs/% | 0 qa/suites/kcephfs/cephfs/.qa | 1 + qa/suites/kcephfs/cephfs/begin.yaml | 1 + qa/suites/kcephfs/cephfs/clusters/.qa | 1 + .../kcephfs/cephfs/clusters/1-mds-1-client.yaml | 1 + qa/suites/kcephfs/cephfs/conf | 1 + qa/suites/kcephfs/cephfs/inline/.qa | 1 + qa/suites/kcephfs/cephfs/inline/no.yaml | 0 qa/suites/kcephfs/cephfs/inline/yes.yaml | 4 + qa/suites/kcephfs/cephfs/kclient | 1 + qa/suites/kcephfs/cephfs/objectstore-ec | 1 + qa/suites/kcephfs/cephfs/overrides/+ | 0 qa/suites/kcephfs/cephfs/overrides/.qa | 1 + .../kcephfs/cephfs/overrides/frag_enable.yaml | 1 + qa/suites/kcephfs/cephfs/overrides/log-config.yaml | 1 + .../kcephfs/cephfs/overrides/osd-asserts.yaml | 1 + .../kcephfs/cephfs/overrides/whitelist_health.yaml | 1 + .../overrides/whitelist_wrongly_marked_down.yaml | 1 + qa/suites/kcephfs/cephfs/tasks/.qa | 1 + .../cephfs/tasks/kclient_workunit_direct_io.yaml | 6 + .../tasks/kclient_workunit_kernel_untar_build.yaml | 5 + .../cephfs/tasks/kclient_workunit_misc.yaml | 5 + .../cephfs/tasks/kclient_workunit_o_trunc.yaml | 6 + .../cephfs/tasks/kclient_workunit_snaps.yaml | 5 + .../tasks/kclient_workunit_suites_dbench.yaml | 5 + .../cephfs/tasks/kclient_workunit_suites_ffsb.yaml | 10 + .../tasks/kclient_workunit_suites_fsstress.yaml | 5 + .../cephfs/tasks/kclient_workunit_suites_fsx.yaml | 5 + .../tasks/kclient_workunit_suites_fsync.yaml | 5 + .../tasks/kclient_workunit_suites_iozone.yaml | 5 + .../cephfs/tasks/kclient_workunit_suites_pjd.yaml | 6 + .../tasks/kclient_workunit_trivial_sync.yaml | 4 + qa/suites/kcephfs/mixed-clients/% | 0 qa/suites/kcephfs/mixed-clients/.qa | 1 + qa/suites/kcephfs/mixed-clients/begin.yaml | 1 + qa/suites/kcephfs/mixed-clients/clusters/.qa | 1 + .../mixed-clients/clusters/1-mds-2-client.yaml | 1 + qa/suites/kcephfs/mixed-clients/conf | 1 + qa/suites/kcephfs/mixed-clients/kclient-overrides | 1 + qa/suites/kcephfs/mixed-clients/objectstore-ec | 1 + qa/suites/kcephfs/mixed-clients/overrides/+ | 0 qa/suites/kcephfs/mixed-clients/overrides/.qa | 1 + .../mixed-clients/overrides/frag_enable.yaml | 1 + .../mixed-clients/overrides/log-config.yaml | 1 + .../mixed-clients/overrides/osd-asserts.yaml | 1 + .../mixed-clients/overrides/whitelist_health.yaml | 1 + .../overrides/whitelist_wrongly_marked_down.yaml | 1 + qa/suites/kcephfs/mixed-clients/tasks/.qa | 1 + .../kernel_cfuse_workunits_dbench_iozone.yaml | 18 + ...ernel_cfuse_workunits_untarbuild_blogbench.yaml | 18 + qa/suites/kcephfs/recovery/% | 0 qa/suites/kcephfs/recovery/.qa | 1 + qa/suites/kcephfs/recovery/begin.yaml | 1 + qa/suites/kcephfs/recovery/clusters/.qa | 1 + .../kcephfs/recovery/clusters/1-mds-4-client.yaml | 1 + qa/suites/kcephfs/recovery/conf | 1 + qa/suites/kcephfs/recovery/kclient | 1 + qa/suites/kcephfs/recovery/objectstore-ec | 1 + qa/suites/kcephfs/recovery/overrides/+ | 0 qa/suites/kcephfs/recovery/overrides/.qa | 1 + .../kcephfs/recovery/overrides/frag_enable.yaml | 1 + .../kcephfs/recovery/overrides/log-config.yaml | 1 + .../kcephfs/recovery/overrides/osd-asserts.yaml | 1 + .../recovery/overrides/whitelist_health.yaml | 1 + .../overrides/whitelist_wrongly_marked_down.yaml | 1 + qa/suites/kcephfs/recovery/tasks/.qa | 1 + qa/suites/kcephfs/recovery/tasks/auto-repair.yaml | 13 + qa/suites/kcephfs/recovery/tasks/backtrace.yaml | 5 + .../kcephfs/recovery/tasks/client-limits.yaml | 20 + .../kcephfs/recovery/tasks/client-recovery.yaml | 15 + qa/suites/kcephfs/recovery/tasks/damage.yaml | 27 + qa/suites/kcephfs/recovery/tasks/data-scan.yaml | 19 + qa/suites/kcephfs/recovery/tasks/failover.yaml | 12 + .../kcephfs/recovery/tasks/forward-scrub.yaml | 14 + .../kcephfs/recovery/tasks/journal-repair.yaml | 14 + qa/suites/kcephfs/recovery/tasks/mds-flush.yaml | 5 + qa/suites/kcephfs/recovery/tasks/mds-full.yaml | 29 + qa/suites/kcephfs/recovery/tasks/pool-perm.yaml | 5 + qa/suites/kcephfs/recovery/tasks/sessionmap.yaml | 10 + qa/suites/kcephfs/recovery/tasks/strays.yaml | 5 + .../kcephfs/recovery/tasks/volume-client.yaml | 9 + qa/suites/kcephfs/thrash/% | 0 qa/suites/kcephfs/thrash/.qa | 1 + qa/suites/kcephfs/thrash/begin.yaml | 1 + qa/suites/kcephfs/thrash/clusters/.qa | 1 + .../kcephfs/thrash/clusters/1-mds-1-client.yaml | 1 + qa/suites/kcephfs/thrash/conf | 1 + qa/suites/kcephfs/thrash/kclient | 1 + qa/suites/kcephfs/thrash/objectstore-ec | 1 + qa/suites/kcephfs/thrash/overrides/+ | 0 qa/suites/kcephfs/thrash/overrides/.qa | 1 + .../kcephfs/thrash/overrides/frag_enable.yaml | 1 + qa/suites/kcephfs/thrash/overrides/log-config.yaml | 1 + .../kcephfs/thrash/overrides/osd-asserts.yaml | 1 + .../thrash/overrides/thrash-health-whitelist.yaml | 1 + .../kcephfs/thrash/overrides/whitelist_health.yaml | 1 + .../overrides/whitelist_wrongly_marked_down.yaml | 1 + qa/suites/kcephfs/thrash/thrashers/.qa | 1 + qa/suites/kcephfs/thrash/thrashers/default.yaml | 7 + qa/suites/kcephfs/thrash/thrashers/mds.yaml | 7 + qa/suites/kcephfs/thrash/thrashers/mon.yaml | 9 + qa/suites/kcephfs/thrash/workloads/.qa | 1 + .../workloads/kclient_workunit_suites_ffsb.yaml | 13 + .../workloads/kclient_workunit_suites_iozone.yaml | 5 + qa/suites/krbd/.qa | 1 + qa/suites/krbd/basic/% | 0 qa/suites/krbd/basic/.qa | 1 + qa/suites/krbd/basic/bluestore-bitmap.yaml | 1 + qa/suites/krbd/basic/ceph/.qa | 1 + qa/suites/krbd/basic/ceph/ceph.yaml | 3 + qa/suites/krbd/basic/clusters/.qa | 1 + qa/suites/krbd/basic/clusters/fixed-1.yaml | 1 + qa/suites/krbd/basic/conf.yaml | 7 + qa/suites/krbd/basic/ms_mode/.qa | 1 + qa/suites/krbd/basic/ms_mode/crc.yaml | 5 + qa/suites/krbd/basic/ms_mode/legacy.yaml | 5 + qa/suites/krbd/basic/ms_mode/secure.yaml | 5 + qa/suites/krbd/basic/tasks/.qa | 1 + qa/suites/krbd/basic/tasks/krbd_deep_flatten.yaml | 5 + qa/suites/krbd/basic/tasks/krbd_discard.yaml | 9 + qa/suites/krbd/basic/tasks/krbd_huge_image.yaml | 5 + qa/suites/krbd/basic/tasks/krbd_msgr_segments.yaml | 5 + .../krbd/basic/tasks/krbd_parent_overlap.yaml | 5 + qa/suites/krbd/basic/tasks/krbd_read_only.yaml | 6 + .../basic/tasks/krbd_whole_object_zeroout.yaml | 5 + qa/suites/krbd/fsx/% | 0 qa/suites/krbd/fsx/.qa | 1 + qa/suites/krbd/fsx/ceph/.qa | 1 + qa/suites/krbd/fsx/ceph/ceph.yaml | 3 + qa/suites/krbd/fsx/clusters/.qa | 1 + qa/suites/krbd/fsx/clusters/3-node.yaml | 14 + qa/suites/krbd/fsx/conf.yaml | 5 + qa/suites/krbd/fsx/ms_mode$/.qa | 1 + qa/suites/krbd/fsx/ms_mode$/crc.yaml | 5 + qa/suites/krbd/fsx/ms_mode$/legacy.yaml | 5 + qa/suites/krbd/fsx/ms_mode$/prefer-crc.yaml | 5 + qa/suites/krbd/fsx/ms_mode$/secure.yaml | 5 + qa/suites/krbd/fsx/objectstore/.qa | 1 + .../krbd/fsx/objectstore/bluestore-bitmap.yaml | 1 + qa/suites/krbd/fsx/objectstore/filestore-xfs.yaml | 1 + qa/suites/krbd/fsx/striping/.qa | 1 + qa/suites/krbd/fsx/striping/default/% | 0 qa/suites/krbd/fsx/striping/default/.qa | 1 + .../krbd/fsx/striping/default/msgr-failures/.qa | 1 + .../fsx/striping/default/msgr-failures/few.yaml | 7 + .../fsx/striping/default/msgr-failures/many.yaml | 7 + .../striping/default/randomized-striping-off.yaml | 3 + qa/suites/krbd/fsx/striping/fancy/% | 0 qa/suites/krbd/fsx/striping/fancy/.qa | 1 + .../krbd/fsx/striping/fancy/msgr-failures/.qa | 1 + .../krbd/fsx/striping/fancy/msgr-failures/few.yaml | 7 + .../fsx/striping/fancy/randomized-striping-on.yaml | 3 + qa/suites/krbd/fsx/tasks/.qa | 1 + qa/suites/krbd/fsx/tasks/fsx-1-client.yaml | 10 + qa/suites/krbd/fsx/tasks/fsx-3-client.yaml | 10 + qa/suites/krbd/rbd-nomount/% | 0 qa/suites/krbd/rbd-nomount/.qa | 1 + qa/suites/krbd/rbd-nomount/bluestore-bitmap.yaml | 1 + qa/suites/krbd/rbd-nomount/clusters/.qa | 1 + qa/suites/krbd/rbd-nomount/clusters/fixed-3.yaml | 1 + qa/suites/krbd/rbd-nomount/conf.yaml | 7 + qa/suites/krbd/rbd-nomount/install/.qa | 1 + qa/suites/krbd/rbd-nomount/install/ceph.yaml | 3 + qa/suites/krbd/rbd-nomount/ms_mode/.qa | 1 + qa/suites/krbd/rbd-nomount/ms_mode/crc.yaml | 5 + qa/suites/krbd/rbd-nomount/ms_mode/legacy.yaml | 5 + qa/suites/krbd/rbd-nomount/ms_mode/secure.yaml | 5 + qa/suites/krbd/rbd-nomount/msgr-failures/.qa | 1 + qa/suites/krbd/rbd-nomount/msgr-failures/few.yaml | 7 + qa/suites/krbd/rbd-nomount/msgr-failures/many.yaml | 7 + qa/suites/krbd/rbd-nomount/tasks/.qa | 1 + .../krbd/rbd-nomount/tasks/krbd_data_pool.yaml | 5 + .../rbd-nomount/tasks/krbd_exclusive_option.yaml | 5 + .../krbd/rbd-nomount/tasks/krbd_fallocate.yaml | 5 + .../tasks/krbd_latest_osdmap_on_map.yaml | 5 + .../krbd/rbd-nomount/tasks/krbd_namespaces.yaml | 5 + .../rbd-nomount/tasks/krbd_udev_enumerate.yaml | 5 + .../tasks/krbd_udev_netlink_enobufs.yaml | 10 + .../krbd/rbd-nomount/tasks/krbd_udev_netns.yaml | 5 + .../krbd/rbd-nomount/tasks/krbd_udev_symlinks.yaml | 5 + .../krbd/rbd-nomount/tasks/rbd_concurrent.yaml | 10 + .../krbd/rbd-nomount/tasks/rbd_huge_tickets.yaml | 5 + .../krbd/rbd-nomount/tasks/rbd_image_read.yaml | 15 + qa/suites/krbd/rbd-nomount/tasks/rbd_kernel.yaml | 5 + .../rbd-nomount/tasks/rbd_map_snapshot_io.yaml | 5 + .../krbd/rbd-nomount/tasks/rbd_map_unmap.yaml | 5 + .../krbd/rbd-nomount/tasks/rbd_simple_big.yaml | 6 + qa/suites/krbd/rbd/% | 0 qa/suites/krbd/rbd/.qa | 1 + qa/suites/krbd/rbd/bluestore-bitmap.yaml | 1 + qa/suites/krbd/rbd/clusters/.qa | 1 + qa/suites/krbd/rbd/clusters/fixed-3.yaml | 1 + qa/suites/krbd/rbd/conf.yaml | 7 + qa/suites/krbd/rbd/ms_mode/.qa | 1 + qa/suites/krbd/rbd/ms_mode/crc.yaml | 5 + qa/suites/krbd/rbd/ms_mode/legacy.yaml | 5 + qa/suites/krbd/rbd/ms_mode/secure.yaml | 5 + qa/suites/krbd/rbd/msgr-failures/.qa | 1 + qa/suites/krbd/rbd/msgr-failures/few.yaml | 7 + qa/suites/krbd/rbd/msgr-failures/many.yaml | 7 + qa/suites/krbd/rbd/tasks/.qa | 1 + qa/suites/krbd/rbd/tasks/rbd_fio.yaml | 11 + .../rbd/tasks/rbd_workunit_kernel_untar_build.yaml | 12 + .../krbd/rbd/tasks/rbd_workunit_suites_dbench.yaml | 9 + .../krbd/rbd/tasks/rbd_workunit_suites_ffsb.yaml | 10 + .../rbd/tasks/rbd_workunit_suites_fsstress.yaml | 9 + .../tasks/rbd_workunit_suites_fsstress_ext4.yaml | 10 + .../krbd/rbd/tasks/rbd_workunit_suites_fsx.yaml | 9 + .../krbd/rbd/tasks/rbd_workunit_suites_iozone.yaml | 10 + .../krbd/rbd/tasks/rbd_workunit_trivial_sync.yaml | 8 + qa/suites/krbd/singleton/% | 0 qa/suites/krbd/singleton/.qa | 1 + qa/suites/krbd/singleton/bluestore-bitmap.yaml | 1 + qa/suites/krbd/singleton/conf.yaml | 7 + qa/suites/krbd/singleton/ms_mode$/.qa | 1 + qa/suites/krbd/singleton/ms_mode$/crc.yaml | 5 + qa/suites/krbd/singleton/ms_mode$/legacy.yaml | 5 + qa/suites/krbd/singleton/ms_mode$/prefer-crc.yaml | 5 + qa/suites/krbd/singleton/ms_mode$/secure.yaml | 5 + qa/suites/krbd/singleton/msgr-failures/.qa | 1 + qa/suites/krbd/singleton/msgr-failures/few.yaml | 7 + qa/suites/krbd/singleton/msgr-failures/many.yaml | 7 + qa/suites/krbd/singleton/tasks/.qa | 1 + qa/suites/krbd/singleton/tasks/rbd_xfstests.yaml | 38 + qa/suites/krbd/thrash/% | 0 qa/suites/krbd/thrash/.qa | 1 + qa/suites/krbd/thrash/bluestore-bitmap.yaml | 1 + qa/suites/krbd/thrash/ceph/.qa | 1 + qa/suites/krbd/thrash/ceph/ceph.yaml | 3 + qa/suites/krbd/thrash/clusters/.qa | 1 + qa/suites/krbd/thrash/clusters/fixed-3.yaml | 1 + qa/suites/krbd/thrash/conf.yaml | 7 + qa/suites/krbd/thrash/ms_mode$/.qa | 1 + qa/suites/krbd/thrash/ms_mode$/crc.yaml | 5 + qa/suites/krbd/thrash/ms_mode$/legacy.yaml | 5 + qa/suites/krbd/thrash/ms_mode$/prefer-crc.yaml | 5 + qa/suites/krbd/thrash/ms_mode$/secure.yaml | 5 + qa/suites/krbd/thrash/thrashers/.qa | 1 + qa/suites/krbd/thrash/thrashers/backoff.yaml | 15 + qa/suites/krbd/thrash/thrashers/mon-thrasher.yaml | 8 + qa/suites/krbd/thrash/thrashers/pggrow.yaml | 10 + qa/suites/krbd/thrash/thrashers/upmap.yaml | 17 + qa/suites/krbd/thrash/thrashosds-health.yaml | 1 + qa/suites/krbd/thrash/workloads/.qa | 1 + qa/suites/krbd/thrash/workloads/rbd_fio.yaml | 11 + .../thrash/workloads/rbd_workunit_suites_ffsb.yaml | 8 + qa/suites/krbd/unmap/% | 0 qa/suites/krbd/unmap/.qa | 1 + qa/suites/krbd/unmap/ceph/.qa | 1 + qa/suites/krbd/unmap/ceph/ceph.yaml | 14 + qa/suites/krbd/unmap/clusters/.qa | 1 + qa/suites/krbd/unmap/clusters/separate-client.yaml | 16 + qa/suites/krbd/unmap/conf.yaml | 5 + qa/suites/krbd/unmap/filestore-xfs.yaml | 1 + qa/suites/krbd/unmap/kernels/.qa | 1 + qa/suites/krbd/unmap/kernels/pre-single-major.yaml | 10 + qa/suites/krbd/unmap/kernels/single-major-off.yaml | 6 + qa/suites/krbd/unmap/kernels/single-major-on.yaml | 6 + qa/suites/krbd/unmap/tasks/.qa | 1 + qa/suites/krbd/unmap/tasks/unmap.yaml | 5 + qa/suites/krbd/wac/.qa | 1 + qa/suites/krbd/wac/sysfs/% | 0 qa/suites/krbd/wac/sysfs/.qa | 1 + qa/suites/krbd/wac/sysfs/bluestore-bitmap.yaml | 1 + qa/suites/krbd/wac/sysfs/ceph/.qa | 1 + qa/suites/krbd/wac/sysfs/ceph/ceph.yaml | 3 + qa/suites/krbd/wac/sysfs/clusters/.qa | 1 + qa/suites/krbd/wac/sysfs/clusters/fixed-1.yaml | 1 + qa/suites/krbd/wac/sysfs/conf.yaml | 7 + qa/suites/krbd/wac/sysfs/tasks/.qa | 1 + qa/suites/krbd/wac/sysfs/tasks/stable_writes.yaml | 5 + qa/suites/krbd/wac/wac/% | 0 qa/suites/krbd/wac/wac/.qa | 1 + qa/suites/krbd/wac/wac/bluestore-bitmap.yaml | 1 + qa/suites/krbd/wac/wac/ceph/.qa | 1 + qa/suites/krbd/wac/wac/ceph/ceph.yaml | 3 + qa/suites/krbd/wac/wac/clusters/.qa | 1 + qa/suites/krbd/wac/wac/clusters/fixed-3.yaml | 1 + qa/suites/krbd/wac/wac/conf.yaml | 7 + qa/suites/krbd/wac/wac/tasks/.qa | 1 + qa/suites/krbd/wac/wac/tasks/wac.yaml | 11 + qa/suites/krbd/wac/wac/verify/.qa | 1 + qa/suites/krbd/wac/wac/verify/many-resets.yaml | 12 + qa/suites/krbd/wac/wac/verify/no-resets.yaml | 5 + qa/suites/marginal/.qa | 1 + qa/suites/marginal/basic/% | 0 qa/suites/marginal/basic/.qa | 1 + qa/suites/marginal/basic/clusters/.qa | 1 + qa/suites/marginal/basic/clusters/fixed-3.yaml | 4 + qa/suites/marginal/basic/tasks/.qa | 1 + .../tasks/kclient_workunit_suites_blogbench.yaml | 8 + .../basic/tasks/kclient_workunit_suites_fsx.yaml | 8 + qa/suites/marginal/fs-misc/% | 0 qa/suites/marginal/fs-misc/.qa | 1 + qa/suites/marginal/fs-misc/clusters/.qa | 1 + .../marginal/fs-misc/clusters/two_clients.yaml | 4 + qa/suites/marginal/fs-misc/tasks/.qa | 1 + qa/suites/marginal/fs-misc/tasks/locktest.yaml | 5 + qa/suites/marginal/mds_restart/% | 0 qa/suites/marginal/mds_restart/.qa | 1 + qa/suites/marginal/mds_restart/clusters/.qa | 1 + .../marginal/mds_restart/clusters/one_mds.yaml | 4 + qa/suites/marginal/mds_restart/tasks/.qa | 1 + .../tasks/restart-workunit-backtraces.yaml | 11 + qa/suites/marginal/multimds/% | 0 qa/suites/marginal/multimds/.qa | 1 + qa/suites/marginal/multimds/clusters/.qa | 1 + .../marginal/multimds/clusters/3-node-3-mds.yaml | 5 + .../marginal/multimds/clusters/3-node-9-mds.yaml | 5 + qa/suites/marginal/multimds/mounts/.qa | 1 + qa/suites/marginal/multimds/mounts/ceph-fuse.yaml | 7 + qa/suites/marginal/multimds/mounts/kclient.yaml | 4 + qa/suites/marginal/multimds/tasks/.qa | 1 + .../marginal/multimds/tasks/workunit_misc.yaml | 5 + .../multimds/tasks/workunit_suites_blogbench.yaml | 5 + .../multimds/tasks/workunit_suites_dbench.yaml | 5 + .../multimds/tasks/workunit_suites_fsstress.yaml | 5 + .../multimds/tasks/workunit_suites_fsync.yaml | 5 + .../multimds/tasks/workunit_suites_pjd.yaml | 11 + .../tasks/workunit_suites_truncate_delay.yaml | 15 + qa/suites/marginal/multimds/thrash/.qa | 1 + qa/suites/marginal/multimds/thrash/exports.yaml | 5 + qa/suites/marginal/multimds/thrash/normal.yaml | 0 qa/suites/mixed-clients/.qa | 1 + qa/suites/mixed-clients/basic/.qa | 1 + qa/suites/mixed-clients/basic/clusters/.qa | 1 + .../mixed-clients/basic/clusters/fixed-3.yaml | 4 + qa/suites/mixed-clients/basic/objectstore | 1 + qa/suites/mixed-clients/basic/tasks/.qa | 1 + .../kernel_cfuse_workunits_dbench_iozone.yaml | 26 + ...ernel_cfuse_workunits_untarbuild_blogbench.yaml | 26 + qa/suites/multimds/.qa | 1 + qa/suites/multimds/basic/% | 0 qa/suites/multimds/basic/.qa | 1 + .../multimds/basic/0-supported-random-distro$ | 1 + qa/suites/multimds/basic/begin.yaml | 1 + qa/suites/multimds/basic/clusters/.qa | 1 + qa/suites/multimds/basic/clusters/3-mds.yaml | 1 + qa/suites/multimds/basic/clusters/9-mds.yaml | 1 + qa/suites/multimds/basic/conf | 1 + qa/suites/multimds/basic/inline | 1 + qa/suites/multimds/basic/mount | 1 + qa/suites/multimds/basic/objectstore-ec | 1 + qa/suites/multimds/basic/overrides/% | 0 qa/suites/multimds/basic/overrides/.qa | 1 + qa/suites/multimds/basic/overrides/basic | 1 + .../basic/overrides/fuse-default-perm-no.yaml | 1 + qa/suites/multimds/basic/q_check_counter/.qa | 1 + .../basic/q_check_counter/check_counter.yaml | 8 + qa/suites/multimds/basic/tasks/.qa | 1 + .../multimds/basic/tasks/cephfs_test_exports.yaml | 5 + .../basic/tasks/cephfs_test_snapshots.yaml | 13 + .../tasks/cfuse_workunit_kernel_untar_build.yaml | 10 + .../multimds/basic/tasks/cfuse_workunit_misc.yaml | 6 + .../basic/tasks/cfuse_workunit_norstats.yaml | 11 + .../tasks/cfuse_workunit_suites_blogbench.yaml | 1 + .../basic/tasks/cfuse_workunit_suites_dbench.yaml | 1 + .../basic/tasks/cfuse_workunit_suites_ffsb.yaml | 1 + .../tasks/cfuse_workunit_suites_fsstress.yaml | 1 + .../basic/tasks/cfuse_workunit_suites_fsx.yaml | 5 + .../basic/tasks/cfuse_workunit_suites_pjd.yaml | 12 + qa/suites/multimds/thrash/% | 0 qa/suites/multimds/thrash/.qa | 1 + .../multimds/thrash/0-supported-random-distro$ | 1 + qa/suites/multimds/thrash/begin.yaml | 1 + qa/suites/multimds/thrash/ceph-thrash | 1 + qa/suites/multimds/thrash/clusters/.qa | 1 + .../multimds/thrash/clusters/3-mds-2-standby.yaml | 4 + .../multimds/thrash/clusters/9-mds-3-standby.yaml | 4 + qa/suites/multimds/thrash/conf | 1 + qa/suites/multimds/thrash/mount | 1 + qa/suites/multimds/thrash/msgr-failures | 1 + qa/suites/multimds/thrash/objectstore-ec | 1 + qa/suites/multimds/thrash/overrides/% | 0 qa/suites/multimds/thrash/overrides/.qa | 1 + .../thrash/overrides/fuse-default-perm-no.yaml | 1 + qa/suites/multimds/thrash/overrides/thrash | 1 + .../multimds/thrash/overrides/thrash_debug.yaml | 7 + qa/suites/multimds/thrash/tasks/.qa | 1 + .../tasks/cfuse_workunit_suites_fsstress.yaml | 1 + .../thrash/tasks/cfuse_workunit_suites_pjd.yaml | 1 + qa/suites/multimds/verify/% | 0 qa/suites/multimds/verify/.qa | 1 + qa/suites/multimds/verify/begin.yaml | 1 + qa/suites/multimds/verify/centos_latest.yaml | 1 + qa/suites/multimds/verify/clusters/.qa | 1 + qa/suites/multimds/verify/clusters/3-mds.yaml | 1 + qa/suites/multimds/verify/clusters/9-mds.yaml | 1 + qa/suites/multimds/verify/conf | 1 + qa/suites/multimds/verify/mount | 1 + qa/suites/multimds/verify/objectstore-ec | 1 + qa/suites/multimds/verify/overrides/% | 0 qa/suites/multimds/verify/overrides/.qa | 1 + .../verify/overrides/fuse-default-perm-no.yaml | 1 + qa/suites/multimds/verify/overrides/verify | 1 + qa/suites/multimds/verify/tasks | 1 + qa/suites/multimds/verify/validater | 1 + qa/suites/perf-basic/% | 0 qa/suites/perf-basic/.qa | 1 + qa/suites/perf-basic/ceph.yaml | 24 + qa/suites/perf-basic/objectstore/.qa | 1 + qa/suites/perf-basic/objectstore/bluestore.yaml | 15 + .../perf-basic/objectstore/filestore-xfs.yaml | 15 + qa/suites/perf-basic/settings/.qa | 1 + qa/suites/perf-basic/settings/optimized.yaml | 80 + qa/suites/perf-basic/supported-all-distro | 1 + qa/suites/perf-basic/workloads/.qa | 1 + .../perf-basic/workloads/cosbench_64K_write.yaml | 31 + .../perf-basic/workloads/fio_4K_rand_write.yaml | 30 + .../perf-basic/workloads/radosbench_4K_write.yaml | 29 + qa/suites/powercycle/.qa | 1 + qa/suites/powercycle/osd/% | 0 qa/suites/powercycle/osd/.qa | 1 + qa/suites/powercycle/osd/clusters/.qa | 1 + .../powercycle/osd/clusters/3osd-1per-target.yaml | 5 + qa/suites/powercycle/osd/objectstore | 1 + qa/suites/powercycle/osd/powercycle/.qa | 1 + qa/suites/powercycle/osd/powercycle/default.yaml | 10 + qa/suites/powercycle/osd/supported-all-distro | 1 + qa/suites/powercycle/osd/tasks/.qa | 1 + .../osd/tasks/admin_socket_objecter_requests.yaml | 13 + .../tasks/cfuse_workunit_kernel_untar_build.yaml | 12 + .../powercycle/osd/tasks/cfuse_workunit_misc.yaml | 7 + .../osd/tasks/cfuse_workunit_suites_ffsb.yaml | 14 + .../osd/tasks/cfuse_workunit_suites_fsstress.yaml | 6 + .../osd/tasks/cfuse_workunit_suites_fsx.yaml | 7 + .../osd/tasks/cfuse_workunit_suites_fsync.yaml | 12 + .../osd/tasks/cfuse_workunit_suites_pjd.yaml | 12 + .../cfuse_workunit_suites_truncate_delay.yaml | 15 + .../powercycle/osd/tasks/rados_api_tests.yaml | 15 + qa/suites/powercycle/osd/tasks/radosbench.yaml | 38 + qa/suites/powercycle/osd/tasks/readwrite.yaml | 9 + .../powercycle/osd/tasks/snaps-few-objects.yaml | 13 + .../powercycle/osd/tasks/snaps-many-objects.yaml | 13 + qa/suites/powercycle/osd/thrashosds-health.yaml | 1 + qa/suites/powercycle/osd/whitelist_health.yaml | 7 + qa/suites/rados/.qa | 1 + qa/suites/rados/basic/% | 0 qa/suites/rados/basic/.qa | 1 + qa/suites/rados/basic/ceph.yaml | 13 + qa/suites/rados/basic/clusters/+ | 0 qa/suites/rados/basic/clusters/.qa | 1 + qa/suites/rados/basic/clusters/fixed-2.yaml | 1 + qa/suites/rados/basic/clusters/openstack.yaml | 4 + qa/suites/rados/basic/msgr | 1 + qa/suites/rados/basic/msgr-failures/.qa | 1 + qa/suites/rados/basic/msgr-failures/few.yaml | 7 + qa/suites/rados/basic/msgr-failures/many.yaml | 7 + qa/suites/rados/basic/objectstore | 1 + qa/suites/rados/basic/rados.yaml | 1 + qa/suites/rados/basic/supported-random-distro$ | 1 + qa/suites/rados/basic/tasks/.qa | 1 + qa/suites/rados/basic/tasks/rados_api_tests.yaml | 24 + qa/suites/rados/basic/tasks/rados_cls_all.yaml | 13 + qa/suites/rados/basic/tasks/rados_python.yaml | 15 + .../rados/basic/tasks/rados_stress_watch.yaml | 11 + qa/suites/rados/basic/tasks/rados_striper.yaml | 7 + .../basic/tasks/rados_workunit_loadgen_big.yaml | 11 + .../basic/tasks/rados_workunit_loadgen_mix.yaml | 11 + .../tasks/rados_workunit_loadgen_mostlyread.yaml | 11 + qa/suites/rados/basic/tasks/readwrite.yaml | 17 + qa/suites/rados/basic/tasks/repair_test.yaml | 31 + qa/suites/rados/basic/tasks/rgw_snaps.yaml | 41 + qa/suites/rados/basic/tasks/scrub_test.yaml | 30 + qa/suites/rados/dashboard/% | 0 qa/suites/rados/dashboard/.qa | 1 + qa/suites/rados/dashboard/clusters/+ | 0 qa/suites/rados/dashboard/clusters/.qa | 1 + qa/suites/rados/dashboard/clusters/2-node-mgr.yaml | 1 + qa/suites/rados/dashboard/debug/.qa | 1 + qa/suites/rados/dashboard/debug/mgr.yaml | 1 + qa/suites/rados/dashboard/objectstore | 1 + qa/suites/rados/dashboard/supported-random-distro$ | 1 + qa/suites/rados/dashboard/tasks/.qa | 1 + qa/suites/rados/dashboard/tasks/dashboard.yaml | 51 + qa/suites/rados/mgr/% | 0 qa/suites/rados/mgr/.qa | 1 + qa/suites/rados/mgr/clusters/+ | 0 qa/suites/rados/mgr/clusters/.qa | 1 + qa/suites/rados/mgr/clusters/2-node-mgr.yaml | 1 + qa/suites/rados/mgr/debug/.qa | 1 + qa/suites/rados/mgr/debug/mgr.yaml | 1 + qa/suites/rados/mgr/objectstore | 1 + qa/suites/rados/mgr/supported-random-distro$ | 1 + qa/suites/rados/mgr/tasks/.qa | 1 + qa/suites/rados/mgr/tasks/crash.yaml | 17 + qa/suites/rados/mgr/tasks/failover.yaml | 16 + qa/suites/rados/mgr/tasks/insights.yaml | 19 + qa/suites/rados/mgr/tasks/module_selftest.yaml | 25 + qa/suites/rados/mgr/tasks/orchestrator_cli.yaml | 18 + qa/suites/rados/mgr/tasks/progress.yaml | 24 + qa/suites/rados/mgr/tasks/prometheus.yaml | 16 + qa/suites/rados/mgr/tasks/ssh_orchestrator.yaml | 18 + qa/suites/rados/mgr/tasks/workunits.yaml | 16 + qa/suites/rados/monthrash/% | 0 qa/suites/rados/monthrash/.qa | 1 + qa/suites/rados/monthrash/ceph.yaml | 25 + qa/suites/rados/monthrash/clusters/.qa | 1 + qa/suites/rados/monthrash/clusters/3-mons.yaml | 7 + qa/suites/rados/monthrash/clusters/9-mons.yaml | 7 + qa/suites/rados/monthrash/msgr | 1 + qa/suites/rados/monthrash/msgr-failures/.qa | 1 + qa/suites/rados/monthrash/msgr-failures/few.yaml | 7 + .../rados/monthrash/msgr-failures/mon-delay.yaml | 13 + qa/suites/rados/monthrash/objectstore | 1 + qa/suites/rados/monthrash/rados.yaml | 1 + qa/suites/rados/monthrash/supported-random-distro$ | 1 + qa/suites/rados/monthrash/thrashers/.qa | 1 + .../rados/monthrash/thrashers/force-sync-many.yaml | 12 + qa/suites/rados/monthrash/thrashers/many.yaml | 16 + qa/suites/rados/monthrash/thrashers/one.yaml | 9 + qa/suites/rados/monthrash/thrashers/sync-many.yaml | 14 + qa/suites/rados/monthrash/thrashers/sync.yaml | 13 + qa/suites/rados/monthrash/workloads/.qa | 1 + .../monthrash/workloads/pool-create-delete.yaml | 58 + .../rados/monthrash/workloads/rados_5925.yaml | 9 + .../rados/monthrash/workloads/rados_api_tests.yaml | 26 + .../workloads/rados_mon_osdmap_prune.yaml | 22 + .../monthrash/workloads/rados_mon_workunits.yaml | 17 + .../monthrash/workloads/snaps-few-objects.yaml | 13 + qa/suites/rados/multimon/% | 0 qa/suites/rados/multimon/.qa | 1 + qa/suites/rados/multimon/clusters/.qa | 1 + qa/suites/rados/multimon/clusters/21.yaml | 8 + qa/suites/rados/multimon/clusters/3.yaml | 7 + qa/suites/rados/multimon/clusters/6.yaml | 7 + qa/suites/rados/multimon/clusters/9.yaml | 8 + qa/suites/rados/multimon/msgr | 1 + qa/suites/rados/multimon/msgr-failures/.qa | 1 + qa/suites/rados/multimon/msgr-failures/few.yaml | 7 + qa/suites/rados/multimon/msgr-failures/many.yaml | 8 + qa/suites/rados/multimon/no_pools.yaml | 3 + qa/suites/rados/multimon/objectstore | 1 + qa/suites/rados/multimon/rados.yaml | 1 + qa/suites/rados/multimon/supported-random-distro$ | 1 + qa/suites/rados/multimon/tasks/.qa | 1 + .../rados/multimon/tasks/mon_clock_no_skews.yaml | 11 + .../rados/multimon/tasks/mon_clock_with_skews.yaml | 24 + qa/suites/rados/multimon/tasks/mon_recovery.yaml | 10 + qa/suites/rados/objectstore/% | 0 qa/suites/rados/objectstore/.qa | 1 + qa/suites/rados/objectstore/backends/.qa | 1 + .../rados/objectstore/backends/alloc-hint.yaml | 22 + .../backends/ceph_objectstore_tool.yaml | 25 + .../rados/objectstore/backends/filejournal.yaml | 13 + .../backends/filestore-idempotent-aio-journal.yaml | 14 + .../objectstore/backends/filestore-idempotent.yaml | 11 + .../rados/objectstore/backends/fusestore.yaml | 9 + .../rados/objectstore/backends/keyvaluedb.yaml | 8 + .../objectstore/backends/objectcacher-stress.yaml | 14 + .../rados/objectstore/backends/objectstore.yaml | 12 + .../rados/objectstore/supported-random-distro$ | 1 + qa/suites/rados/perf/% | 0 qa/suites/rados/perf/.qa | 1 + qa/suites/rados/perf/ceph.yaml | 14 + qa/suites/rados/perf/distros/ubuntu_16.04.yaml | 1 + qa/suites/rados/perf/distros/ubuntu_latest.yaml | 1 + qa/suites/rados/perf/objectstore | 1 + qa/suites/rados/perf/openstack.yaml | 4 + qa/suites/rados/perf/settings/.qa | 1 + qa/suites/rados/perf/settings/optimized.yaml | 76 + qa/suites/rados/perf/workloads/.qa | 1 + .../perf/workloads/cosbench_64K_read_write.yaml | 26 + .../rados/perf/workloads/cosbench_64K_write.yaml | 26 + .../rados/perf/workloads/fio_4K_rand_read.yaml | 25 + qa/suites/rados/perf/workloads/fio_4K_rand_rw.yaml | 25 + .../rados/perf/workloads/fio_4M_rand_read.yaml | 25 + qa/suites/rados/perf/workloads/fio_4M_rand_rw.yaml | 25 + .../rados/perf/workloads/fio_4M_rand_write.yaml | 25 + .../perf/workloads/radosbench_4K_rand_read.yaml | 25 + .../perf/workloads/radosbench_4K_seq_read.yaml | 24 + .../perf/workloads/radosbench_4M_rand_read.yaml | 25 + .../perf/workloads/radosbench_4M_seq_read.yaml | 24 + .../rados/perf/workloads/radosbench_4M_write.yaml | 24 + qa/suites/rados/perf/workloads/sample_fio.yaml | 25 + .../rados/perf/workloads/sample_radosbench.yaml | 24 + qa/suites/rados/rest/% | 0 qa/suites/rados/rest/.qa | 1 + qa/suites/rados/rest/mgr-restful.yaml | 29 + qa/suites/rados/rest/supported-random-distro$ | 1 + qa/suites/rados/singleton-bluestore/% | 0 qa/suites/rados/singleton-bluestore/.qa | 1 + qa/suites/rados/singleton-bluestore/all/.qa | 1 + .../rados/singleton-bluestore/all/cephtool.yaml | 44 + qa/suites/rados/singleton-bluestore/msgr | 1 + .../rados/singleton-bluestore/msgr-failures/.qa | 1 + .../singleton-bluestore/msgr-failures/few.yaml | 7 + .../singleton-bluestore/msgr-failures/many.yaml | 7 + .../rados/singleton-bluestore/objectstore/.qa | 1 + .../objectstore/bluestore-bitmap.yaml | 1 + .../objectstore/bluestore-comp-lz4.yaml | 1 + .../objectstore/bluestore-comp-snappy.yaml | 1 + qa/suites/rados/singleton-bluestore/rados.yaml | 1 + .../singleton-bluestore/supported-random-distro$ | 1 + qa/suites/rados/singleton-flat/.qa | 1 + qa/suites/rados/singleton-flat/valgrind-leaks.yaml | 36 + qa/suites/rados/singleton-nomsgr/% | 0 qa/suites/rados/singleton-nomsgr/.qa | 1 + qa/suites/rados/singleton-nomsgr/all/.qa | 1 + .../singleton-nomsgr/all/admin_socket_output.yaml | 24 + qa/suites/rados/singleton-nomsgr/all/balancer.yaml | 10 + .../rados/singleton-nomsgr/all/cache-fs-trunc.yaml | 52 + .../singleton-nomsgr/all/ceph-kvstore-tool.yaml | 21 + .../rados/singleton-nomsgr/all/ceph-post-file.yaml | 12 + .../singleton-nomsgr/all/export-after-evict.yaml | 38 + .../rados/singleton-nomsgr/all/full-tiering.yaml | 38 + .../singleton-nomsgr/all/health-warnings.yaml | 20 + .../all/large-omap-object-warnings.yaml | 27 + .../all/lazy_omap_stats_output.yaml | 16 + .../singleton-nomsgr/all/librados_hello_world.yaml | 22 + qa/suites/rados/singleton-nomsgr/all/msgr.yaml | 21 + .../all/multi-backfill-reject.yaml | 48 + .../rados/singleton-nomsgr/all/pool-access.yaml | 13 + .../all/recovery-unfound-found.yaml | 58 + qa/suites/rados/singleton-nomsgr/rados.yaml | 1 + .../singleton-nomsgr/supported-random-distro$ | 1 + qa/suites/rados/singleton/% | 0 qa/suites/rados/singleton/.qa | 1 + qa/suites/rados/singleton/all/.qa | 1 + qa/suites/rados/singleton/all/admin-socket.yaml | 26 + qa/suites/rados/singleton/all/deduptool.yaml | 26 + .../rados/singleton/all/divergent_priors.yaml | 26 + .../rados/singleton/all/divergent_priors2.yaml | 26 + qa/suites/rados/singleton/all/dump-stuck.yaml | 19 + qa/suites/rados/singleton/all/ec-lost-unfound.yaml | 26 + .../singleton/all/erasure-code-nonregression.yaml | 17 + .../rados/singleton/all/lost-unfound-delete.yaml | 25 + qa/suites/rados/singleton/all/lost-unfound.yaml | 25 + .../singleton/all/max-pg-per-osd.from-mon.yaml | 27 + .../singleton/all/max-pg-per-osd.from-primary.yaml | 32 + .../singleton/all/max-pg-per-osd.from-replica.yaml | 32 + qa/suites/rados/singleton/all/mon-auth-caps.yaml | 17 + .../rados/singleton/all/mon-config-key-caps.yaml | 17 + qa/suites/rados/singleton/all/mon-config-keys.yaml | 20 + qa/suites/rados/singleton/all/mon-config.yaml | 20 + .../all/mon-memory-target-compliance.yaml.disabled | 152 + qa/suites/rados/singleton/all/osd-backfill.yaml | 26 + .../singleton/all/osd-recovery-incomplete.yaml | 28 + qa/suites/rados/singleton/all/osd-recovery.yaml | 30 + qa/suites/rados/singleton/all/peer.yaml | 25 + .../singleton/all/pg-autoscaler-progress-off.yaml | 42 + qa/suites/rados/singleton/all/pg-autoscaler.yaml | 38 + .../singleton/all/pg-removal-interruption.yaml | 34 + qa/suites/rados/singleton/all/radostool.yaml | 26 + qa/suites/rados/singleton/all/random-eio.yaml | 44 + qa/suites/rados/singleton/all/rebuild-mondb.yaml | 32 + .../rados/singleton/all/recovery-preemption.yaml | 57 + .../rados/singleton/all/resolve_stuck_peering.yaml | 17 + qa/suites/rados/singleton/all/test-crash.yaml | 15 + .../all/test_envlibrados_for_rocksdb.yaml | 19 + .../rados/singleton/all/thrash-backfill-full.yaml | 50 + qa/suites/rados/singleton/all/thrash-eio.yaml | 47 + qa/suites/rados/singleton/all/thrash-rados/+ | 0 qa/suites/rados/singleton/all/thrash-rados/.qa | 1 + .../singleton/all/thrash-rados/thrash-rados.yaml | 27 + .../all/thrash-rados/thrashosds-health.yaml | 1 + .../all/thrash_cache_writeback_proxy_none.yaml | 70 + .../singleton/all/watch-notify-same-primary.yaml | 32 + qa/suites/rados/singleton/msgr | 1 + qa/suites/rados/singleton/msgr-failures/.qa | 1 + qa/suites/rados/singleton/msgr-failures/few.yaml | 7 + qa/suites/rados/singleton/msgr-failures/many.yaml | 11 + qa/suites/rados/singleton/objectstore | 1 + qa/suites/rados/singleton/rados.yaml | 1 + qa/suites/rados/singleton/supported-random-distro$ | 1 + qa/suites/rados/standalone/% | 0 qa/suites/rados/standalone/.qa | 1 + .../rados/standalone/supported-random-distro$ | 1 + qa/suites/rados/standalone/workloads/.qa | 1 + qa/suites/rados/standalone/workloads/crush.yaml | 18 + .../rados/standalone/workloads/erasure-code.yaml | 18 + qa/suites/rados/standalone/workloads/mgr.yaml | 18 + qa/suites/rados/standalone/workloads/misc.yaml | 18 + qa/suites/rados/standalone/workloads/mon.yaml | 18 + qa/suites/rados/standalone/workloads/osd.yaml | 18 + qa/suites/rados/standalone/workloads/scrub.yaml | 18 + qa/suites/rados/thrash-erasure-code-big/% | 0 qa/suites/rados/thrash-erasure-code-big/.qa | 1 + qa/suites/rados/thrash-erasure-code-big/ceph.yaml | 1 + qa/suites/rados/thrash-erasure-code-big/cluster/+ | 0 .../rados/thrash-erasure-code-big/cluster/.qa | 1 + .../thrash-erasure-code-big/cluster/12-osds.yaml | 4 + .../thrash-erasure-code-big/cluster/openstack.yaml | 4 + .../rados/thrash-erasure-code-big/msgr-failures | 1 + .../rados/thrash-erasure-code-big/objectstore | 1 + qa/suites/rados/thrash-erasure-code-big/rados.yaml | 1 + .../thrash-erasure-code-big/recovery-overrides | 1 + .../supported-random-distro$ | 1 + .../rados/thrash-erasure-code-big/thrashers/.qa | 1 + .../thrash-erasure-code-big/thrashers/careful.yaml | 20 + .../thrash-erasure-code-big/thrashers/default.yaml | 19 + .../thrashers/fastread.yaml | 20 + .../thrash-erasure-code-big/thrashers/mapgap.yaml | 21 + .../thrashers/morepggrow.yaml | 16 + .../thrash-erasure-code-big/thrashers/pggrow.yaml | 15 + .../thrash-erasure-code-big/thrashosds-health.yaml | 1 + .../rados/thrash-erasure-code-big/workloads/.qa | 1 + .../ec-rados-plugin=jerasure-k=4-m=2.yaml | 1 + .../workloads/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml | 1 + qa/suites/rados/thrash-erasure-code-isa/% | 0 qa/suites/rados/thrash-erasure-code-isa/.qa | 1 + qa/suites/rados/thrash-erasure-code-isa/arch/.qa | 1 + .../rados/thrash-erasure-code-isa/arch/x86_64.yaml | 1 + qa/suites/rados/thrash-erasure-code-isa/ceph.yaml | 1 + qa/suites/rados/thrash-erasure-code-isa/clusters | 1 + .../rados/thrash-erasure-code-isa/msgr-failures | 1 + .../rados/thrash-erasure-code-isa/objectstore | 1 + qa/suites/rados/thrash-erasure-code-isa/rados.yaml | 1 + .../thrash-erasure-code-isa/recovery-overrides | 1 + .../supported-random-distro$ | 1 + qa/suites/rados/thrash-erasure-code-isa/thrashers | 1 + .../thrash-erasure-code-isa/thrashosds-health.yaml | 1 + .../rados/thrash-erasure-code-isa/workloads/.qa | 1 + .../workloads/ec-rados-plugin=isa-k=2-m=1.yaml | 1 + qa/suites/rados/thrash-erasure-code-overwrites/% | 0 qa/suites/rados/thrash-erasure-code-overwrites/.qa | 1 + .../bluestore-bitmap.yaml | 1 + .../rados/thrash-erasure-code-overwrites/ceph.yaml | 1 + .../rados/thrash-erasure-code-overwrites/clusters | 1 + .../rados/thrash-erasure-code-overwrites/fast | 1 + .../thrash-erasure-code-overwrites/msgr-failures | 1 + .../thrash-erasure-code-overwrites/rados.yaml | 1 + .../recovery-overrides | 1 + .../supported-random-distro$ | 1 + .../rados/thrash-erasure-code-overwrites/thrashers | 1 + .../thrashosds-health.yaml | 1 + .../thrash-erasure-code-overwrites/workloads/.qa | 1 + .../ec-pool-snaps-few-objects-overwrites.yaml | 23 + .../ec-small-objects-fast-read-overwrites.yaml | 29 + .../workloads/ec-small-objects-overwrites.yaml | 28 + .../workloads/ec-snaps-few-objects-overwrites.yaml | 22 + qa/suites/rados/thrash-erasure-code-shec/% | 0 qa/suites/rados/thrash-erasure-code-shec/.qa | 1 + qa/suites/rados/thrash-erasure-code-shec/ceph.yaml | 1 + .../rados/thrash-erasure-code-shec/clusters/+ | 0 .../rados/thrash-erasure-code-shec/clusters/.qa | 1 + .../thrash-erasure-code-shec/clusters/fixed-4.yaml | 1 + .../clusters/openstack.yaml | 4 + .../rados/thrash-erasure-code-shec/msgr-failures | 1 + .../rados/thrash-erasure-code-shec/objectstore | 1 + .../rados/thrash-erasure-code-shec/rados.yaml | 1 + .../thrash-erasure-code-shec/recovery-overrides | 1 + .../supported-random-distro$ | 1 + .../rados/thrash-erasure-code-shec/thrashers/.qa | 1 + .../thrashers/careful.yaml | 20 + .../thrashers/default.yaml | 19 + .../thrashosds-health.yaml | 1 + .../rados/thrash-erasure-code-shec/workloads/.qa | 1 + .../ec-rados-plugin=shec-k=4-m=3-c=2.yaml | 1 + qa/suites/rados/thrash-erasure-code/% | 0 qa/suites/rados/thrash-erasure-code/.qa | 1 + qa/suites/rados/thrash-erasure-code/ceph.yaml | 3 + qa/suites/rados/thrash-erasure-code/clusters | 1 + qa/suites/rados/thrash-erasure-code/fast/.qa | 1 + qa/suites/rados/thrash-erasure-code/fast/fast.yaml | 5 + .../rados/thrash-erasure-code/fast/normal.yaml | 0 qa/suites/rados/thrash-erasure-code/msgr-failures | 1 + qa/suites/rados/thrash-erasure-code/objectstore | 1 + qa/suites/rados/thrash-erasure-code/rados.yaml | 1 + .../rados/thrash-erasure-code/recovery-overrides | 1 + .../thrash-erasure-code/supported-random-distro$ | 1 + qa/suites/rados/thrash-erasure-code/thrashers/.qa | 1 + .../thrash-erasure-code/thrashers/careful.yaml | 19 + .../thrash-erasure-code/thrashers/default.yaml | 18 + .../thrash-erasure-code/thrashers/fastread.yaml | 20 + .../thrash-erasure-code/thrashers/morepggrow.yaml | 16 + .../thrash-erasure-code/thrashers/pggrow.yaml | 16 + .../thrash-erasure-code/thrashosds-health.yaml | 1 + qa/suites/rados/thrash-erasure-code/workloads/.qa | 1 + .../workloads/ec-rados-plugin=clay-k=4-m=2.yaml | 1 + .../ec-rados-plugin=jerasure-k=2-m=1.yaml | 1 + .../ec-rados-plugin=jerasure-k=3-m=1.yaml | 1 + .../workloads/ec-radosbench.yaml | 27 + .../workloads/ec-small-objects-fast-read.yaml | 21 + .../workloads/ec-small-objects-many-deletes.yaml | 14 + .../workloads/ec-small-objects.yaml | 20 + qa/suites/rados/thrash-old-clients/% | 0 qa/suites/rados/thrash-old-clients/.qa | 1 + .../0-size-min-size-overrides/.qa | 1 + .../2-size-2-min-size.yaml | 1 + .../3-size-2-min-size.yaml | 1 + qa/suites/rados/thrash-old-clients/1-install/.qa | 1 + .../rados/thrash-old-clients/1-install/hammer.yaml | 29 + .../rados/thrash-old-clients/1-install/jewel.yaml | 19 + .../thrash-old-clients/1-install/luminous.yaml | 15 + qa/suites/rados/thrash-old-clients/backoff/.qa | 1 + .../rados/thrash-old-clients/backoff/normal.yaml | 0 .../rados/thrash-old-clients/backoff/peering.yaml | 5 + .../backoff/peering_and_degraded.yaml | 6 + qa/suites/rados/thrash-old-clients/ceph.yaml | 7 + qa/suites/rados/thrash-old-clients/clusters/+ | 0 qa/suites/rados/thrash-old-clients/clusters/.qa | 1 + .../thrash-old-clients/clusters/openstack.yaml | 4 + .../clusters/three-plus-one.yaml | 14 + qa/suites/rados/thrash-old-clients/d-balancer/.qa | 1 + .../d-balancer/crush-compat.yaml | 6 + .../rados/thrash-old-clients/d-balancer/off.yaml | 0 qa/suites/rados/thrash-old-clients/distro$/.qa | 1 + .../thrash-old-clients/distro$/centos_latest.yaml | 1 + .../thrash-old-clients/distro$/ubuntu_16.04.yaml | 1 + .../rados/thrash-old-clients/msgr-failures/.qa | 1 + .../msgr-failures/fastclose.yaml | 8 + .../thrash-old-clients/msgr-failures/few.yaml | 9 + .../msgr-failures/osd-delay.yaml | 11 + qa/suites/rados/thrash-old-clients/msgr/.qa | 1 + .../thrash-old-clients/msgr/async-v1only.yaml | 1 + qa/suites/rados/thrash-old-clients/msgr/async.yaml | 1 + .../rados/thrash-old-clients/msgr/random.yaml | 1 + .../rados/thrash-old-clients/msgr/simple.yaml | 1 + qa/suites/rados/thrash-old-clients/rados.yaml | 1 + qa/suites/rados/thrash-old-clients/thrashers/.qa | 1 + .../thrash-old-clients/thrashers/careful.yaml | 25 + .../thrash-old-clients/thrashers/default.yaml | 24 + .../rados/thrash-old-clients/thrashers/mapgap.yaml | 26 + .../thrash-old-clients/thrashers/morepggrow.yaml | 22 + .../rados/thrash-old-clients/thrashers/none.yaml | 0 .../rados/thrash-old-clients/thrashers/pggrow.yaml | 24 + .../thrash-old-clients/thrashosds-health.yaml | 1 + qa/suites/rados/thrash-old-clients/workloads/.qa | 1 + .../thrash-old-clients/workloads/cache-snaps.yaml | 34 + .../thrash-old-clients/workloads/radosbench.yaml | 41 + .../thrash-old-clients/workloads/rbd_cls.yaml | 7 + .../workloads/snaps-few-objects.yaml | 13 + .../thrash-old-clients/workloads/test_rbd_api.yaml | 8 + qa/suites/rados/thrash/% | 0 qa/suites/rados/thrash/.qa | 1 + .../rados/thrash/0-size-min-size-overrides/.qa | 1 + .../2-size-2-min-size.yaml | 1 + .../3-size-2-min-size.yaml | 1 + qa/suites/rados/thrash/1-pg-log-overrides/.qa | 1 + .../thrash/1-pg-log-overrides/normal_pg_log.yaml | 0 .../thrash/1-pg-log-overrides/short_pg_log.yaml | 1 + qa/suites/rados/thrash/2-recovery-overrides/$ | 0 qa/suites/rados/thrash/2-recovery-overrides/.qa | 1 + .../rados/thrash/2-recovery-overrides/default.yaml | 0 .../2-recovery-overrides/more-active-recovery.yaml | 1 + qa/suites/rados/thrash/backoff/.qa | 1 + qa/suites/rados/thrash/backoff/normal.yaml | 0 qa/suites/rados/thrash/backoff/peering.yaml | 5 + .../rados/thrash/backoff/peering_and_degraded.yaml | 6 + qa/suites/rados/thrash/ceph.yaml | 3 + qa/suites/rados/thrash/clusters/+ | 0 qa/suites/rados/thrash/clusters/.qa | 1 + qa/suites/rados/thrash/clusters/fixed-2.yaml | 1 + qa/suites/rados/thrash/clusters/openstack.yaml | 4 + .../thrash/crc-failures/bad_map_crc_failure.yaml | 7 + qa/suites/rados/thrash/crc-failures/default.yaml | 0 qa/suites/rados/thrash/d-balancer/.qa | 1 + .../rados/thrash/d-balancer/crush-compat.yaml | 6 + qa/suites/rados/thrash/d-balancer/off.yaml | 0 qa/suites/rados/thrash/d-balancer/upmap.yaml | 7 + qa/suites/rados/thrash/msgr | 1 + qa/suites/rados/thrash/msgr-failures/.qa | 1 + .../rados/thrash/msgr-failures/fastclose.yaml | 8 + qa/suites/rados/thrash/msgr-failures/few.yaml | 9 + .../rados/thrash/msgr-failures/osd-delay.yaml | 11 + qa/suites/rados/thrash/objectstore | 1 + qa/suites/rados/thrash/rados.yaml | 1 + qa/suites/rados/thrash/supported-random-distro$ | 1 + qa/suites/rados/thrash/thrashers/.qa | 1 + qa/suites/rados/thrash/thrashers/careful.yaml | 26 + qa/suites/rados/thrash/thrashers/default.yaml | 26 + qa/suites/rados/thrash/thrashers/mapgap.yaml | 27 + qa/suites/rados/thrash/thrashers/morepggrow.yaml | 22 + qa/suites/rados/thrash/thrashers/none.yaml | 0 qa/suites/rados/thrash/thrashers/pggrow.yaml | 24 + qa/suites/rados/thrash/thrashosds-health.yaml | 1 + qa/suites/rados/thrash/workloads/.qa | 1 + .../workloads/admin_socket_objecter_requests.yaml | 13 + .../rados/thrash/workloads/cache-agent-big.yaml | 36 + .../rados/thrash/workloads/cache-agent-small.yaml | 34 + .../workloads/cache-pool-snaps-readproxy.yaml | 39 + .../rados/thrash/workloads/cache-pool-snaps.yaml | 44 + qa/suites/rados/thrash/workloads/cache-snaps.yaml | 39 + qa/suites/rados/thrash/workloads/cache.yaml | 36 + .../thrash/workloads/pool-snaps-few-objects.yaml | 18 + .../rados/thrash/workloads/rados_api_tests.yaml | 20 + .../workloads/radosbench-high-concurrency.yaml | 49 + qa/suites/rados/thrash/workloads/radosbench.yaml | 33 + qa/suites/rados/thrash/workloads/redirect.yaml | 15 + .../thrash/workloads/redirect_promote_tests.yaml | 14 + .../thrash/workloads/redirect_set_object.yaml | 13 + .../rados/thrash/workloads/set-chunks-read.yaml | 13 + .../rados/thrash/workloads/small-objects.yaml | 24 + .../rados/thrash/workloads/snaps-few-objects.yaml | 13 + .../thrash/workloads/write_fadvise_dontneed.yaml | 8 + qa/suites/rados/upgrade/.qa | 1 + qa/suites/rados/upgrade/mimic-x-singleton | 1 + qa/suites/rados/verify/% | 0 qa/suites/rados/verify/.qa | 1 + qa/suites/rados/verify/ceph.yaml | 13 + qa/suites/rados/verify/clusters/+ | 0 qa/suites/rados/verify/clusters/.qa | 1 + qa/suites/rados/verify/clusters/fixed-2.yaml | 1 + qa/suites/rados/verify/clusters/openstack.yaml | 4 + qa/suites/rados/verify/d-thrash/.qa | 1 + qa/suites/rados/verify/d-thrash/default/+ | 0 qa/suites/rados/verify/d-thrash/default/.qa | 1 + .../rados/verify/d-thrash/default/default.yaml | 11 + .../verify/d-thrash/default/thrashosds-health.yaml | 1 + qa/suites/rados/verify/d-thrash/none.yaml | 0 qa/suites/rados/verify/msgr | 1 + qa/suites/rados/verify/msgr-failures/.qa | 1 + qa/suites/rados/verify/msgr-failures/few.yaml | 7 + qa/suites/rados/verify/objectstore | 1 + qa/suites/rados/verify/rados.yaml | 1 + qa/suites/rados/verify/tasks/.qa | 1 + qa/suites/rados/verify/tasks/mon_recovery.yaml | 10 + qa/suites/rados/verify/tasks/rados_api_tests.yaml | 30 + qa/suites/rados/verify/tasks/rados_cls_all.yaml | 13 + qa/suites/rados/verify/validater/.qa | 1 + qa/suites/rados/verify/validater/lockdep.yaml | 5 + qa/suites/rados/verify/validater/valgrind.yaml | 32 + qa/suites/rbd/.qa | 1 + qa/suites/rbd/basic/% | 0 qa/suites/rbd/basic/.qa | 1 + qa/suites/rbd/basic/base/.qa | 1 + qa/suites/rbd/basic/base/install.yaml | 3 + qa/suites/rbd/basic/cachepool/.qa | 1 + qa/suites/rbd/basic/cachepool/none.yaml | 0 qa/suites/rbd/basic/cachepool/small.yaml | 17 + qa/suites/rbd/basic/clusters/+ | 0 qa/suites/rbd/basic/clusters/.qa | 1 + qa/suites/rbd/basic/clusters/fixed-1.yaml | 1 + qa/suites/rbd/basic/clusters/openstack.yaml | 4 + qa/suites/rbd/basic/msgr-failures/.qa | 1 + qa/suites/rbd/basic/msgr-failures/few.yaml | 7 + qa/suites/rbd/basic/objectstore | 1 + qa/suites/rbd/basic/supported-random-distro$ | 1 + qa/suites/rbd/basic/tasks/.qa | 1 + .../rbd/basic/tasks/rbd_api_tests_old_format.yaml | 11 + qa/suites/rbd/basic/tasks/rbd_cls_tests.yaml | 7 + qa/suites/rbd/basic/tasks/rbd_lock_and_fence.yaml | 5 + .../tasks/rbd_python_api_tests_old_format.yaml | 10 + qa/suites/rbd/cli/% | 0 qa/suites/rbd/cli/.qa | 1 + qa/suites/rbd/cli/base/.qa | 1 + qa/suites/rbd/cli/base/install.yaml | 3 + qa/suites/rbd/cli/clusters | 1 + qa/suites/rbd/cli/features/.qa | 1 + qa/suites/rbd/cli/features/defaults.yaml | 5 + qa/suites/rbd/cli/features/journaling.yaml | 5 + qa/suites/rbd/cli/features/layering.yaml | 5 + qa/suites/rbd/cli/msgr-failures/.qa | 1 + qa/suites/rbd/cli/msgr-failures/few.yaml | 7 + qa/suites/rbd/cli/objectstore | 1 + qa/suites/rbd/cli/pool/.qa | 1 + qa/suites/rbd/cli/pool/ec-data-pool.yaml | 27 + qa/suites/rbd/cli/pool/none.yaml | 0 qa/suites/rbd/cli/pool/replicated-data-pool.yaml | 11 + qa/suites/rbd/cli/pool/small-cache-pool.yaml | 17 + qa/suites/rbd/cli/supported-random-distro$ | 1 + qa/suites/rbd/cli/workloads/.qa | 1 + qa/suites/rbd/cli/workloads/rbd_cli_generic.yaml | 5 + qa/suites/rbd/cli/workloads/rbd_cli_groups.yaml | 5 + .../rbd/cli/workloads/rbd_cli_import_export.yaml | 5 + qa/suites/rbd/cli_v1/% | 0 qa/suites/rbd/cli_v1/.qa | 1 + qa/suites/rbd/cli_v1/base/.qa | 1 + qa/suites/rbd/cli_v1/base/install.yaml | 3 + qa/suites/rbd/cli_v1/clusters | 1 + qa/suites/rbd/cli_v1/features/.qa | 1 + qa/suites/rbd/cli_v1/features/format-1.yaml | 5 + qa/suites/rbd/cli_v1/msgr-failures/.qa | 1 + qa/suites/rbd/cli_v1/msgr-failures/few.yaml | 7 + qa/suites/rbd/cli_v1/objectstore | 1 + qa/suites/rbd/cli_v1/pool/.qa | 1 + qa/suites/rbd/cli_v1/pool/none.yaml | 0 qa/suites/rbd/cli_v1/pool/small-cache-pool.yaml | 17 + qa/suites/rbd/cli_v1/supported-random-distro$ | 1 + qa/suites/rbd/cli_v1/workloads/.qa | 1 + .../rbd/cli_v1/workloads/rbd_cli_generic.yaml | 5 + .../cli_v1/workloads/rbd_cli_import_export.yaml | 5 + qa/suites/rbd/librbd/% | 0 qa/suites/rbd/librbd/.qa | 1 + qa/suites/rbd/librbd/cache/.qa | 1 + qa/suites/rbd/librbd/cache/none.yaml | 6 + qa/suites/rbd/librbd/cache/writeback.yaml | 6 + qa/suites/rbd/librbd/cache/writethrough.yaml | 7 + qa/suites/rbd/librbd/clusters/+ | 0 qa/suites/rbd/librbd/clusters/.qa | 1 + qa/suites/rbd/librbd/clusters/fixed-3.yaml | 1 + qa/suites/rbd/librbd/clusters/openstack.yaml | 4 + qa/suites/rbd/librbd/config/.qa | 1 + qa/suites/rbd/librbd/config/copy-on-read.yaml | 5 + qa/suites/rbd/librbd/config/none.yaml | 0 .../rbd/librbd/config/permit-partial-discard.yaml | 5 + qa/suites/rbd/librbd/msgr-failures/.qa | 1 + qa/suites/rbd/librbd/msgr-failures/few.yaml | 7 + qa/suites/rbd/librbd/objectstore | 1 + qa/suites/rbd/librbd/pool/.qa | 1 + qa/suites/rbd/librbd/pool/ec-data-pool.yaml | 24 + qa/suites/rbd/librbd/pool/none.yaml | 0 .../rbd/librbd/pool/replicated-data-pool.yaml | 11 + qa/suites/rbd/librbd/pool/small-cache-pool.yaml | 17 + qa/suites/rbd/librbd/supported-random-distro$ | 1 + qa/suites/rbd/librbd/workloads/.qa | 1 + qa/suites/rbd/librbd/workloads/c_api_tests.yaml | 13 + .../workloads/c_api_tests_with_defaults.yaml | 13 + .../workloads/c_api_tests_with_journaling.yaml | 13 + qa/suites/rbd/librbd/workloads/fsx.yaml | 4 + .../rbd/librbd/workloads/python_api_tests.yaml | 7 + .../workloads/python_api_tests_with_defaults.yaml | 7 + .../python_api_tests_with_journaling.yaml | 7 + qa/suites/rbd/librbd/workloads/rbd_fio.yaml | 10 + qa/suites/rbd/maintenance/% | 0 qa/suites/rbd/maintenance/.qa | 1 + qa/suites/rbd/maintenance/base/.qa | 1 + qa/suites/rbd/maintenance/base/install.yaml | 3 + qa/suites/rbd/maintenance/clusters/+ | 0 qa/suites/rbd/maintenance/clusters/.qa | 1 + qa/suites/rbd/maintenance/clusters/fixed-3.yaml | 1 + qa/suites/rbd/maintenance/clusters/openstack.yaml | 1 + qa/suites/rbd/maintenance/objectstore | 1 + qa/suites/rbd/maintenance/qemu/.qa | 1 + qa/suites/rbd/maintenance/qemu/xfstests.yaml | 14 + qa/suites/rbd/maintenance/supported-random-distro$ | 1 + qa/suites/rbd/maintenance/workloads/.qa | 1 + .../maintenance/workloads/dynamic_features.yaml | 8 + .../workloads/dynamic_features_no_cache.yaml | 13 + .../maintenance/workloads/rebuild_object_map.yaml | 8 + qa/suites/rbd/mirror-thrash/% | 0 qa/suites/rbd/mirror-thrash/.qa | 1 + qa/suites/rbd/mirror-thrash/base/.qa | 1 + qa/suites/rbd/mirror-thrash/base/install.yaml | 9 + qa/suites/rbd/mirror-thrash/cluster/+ | 0 qa/suites/rbd/mirror-thrash/cluster/.qa | 1 + qa/suites/rbd/mirror-thrash/cluster/2-node.yaml | 31 + qa/suites/rbd/mirror-thrash/cluster/openstack.yaml | 4 + qa/suites/rbd/mirror-thrash/msgr-failures | 1 + qa/suites/rbd/mirror-thrash/objectstore | 1 + qa/suites/rbd/mirror-thrash/policy/.qa | 1 + qa/suites/rbd/mirror-thrash/policy/none.yaml | 5 + qa/suites/rbd/mirror-thrash/policy/simple.yaml | 5 + qa/suites/rbd/mirror-thrash/rbd-mirror/.qa | 1 + .../mirror-thrash/rbd-mirror/four-per-cluster.yaml | 31 + .../rbd/mirror-thrash/supported-random-distro$ | 1 + qa/suites/rbd/mirror-thrash/users/.qa | 1 + qa/suites/rbd/mirror-thrash/users/mirror.yaml | 23 + qa/suites/rbd/mirror-thrash/workloads/.qa | 1 + .../workloads/rbd-mirror-fsx-workunit.yaml | 33 + .../workloads/rbd-mirror-stress-workunit.yaml | 13 + .../workloads/rbd-mirror-workunit.yaml | 12 + qa/suites/rbd/mirror/% | 0 qa/suites/rbd/mirror/.qa | 1 + qa/suites/rbd/mirror/base | 1 + qa/suites/rbd/mirror/cluster | 1 + qa/suites/rbd/mirror/msgr-failures | 1 + qa/suites/rbd/mirror/objectstore | 1 + qa/suites/rbd/mirror/supported-random-distro$ | 1 + qa/suites/rbd/mirror/users | 1 + qa/suites/rbd/mirror/workloads/.qa | 1 + .../workloads/rbd-mirror-bootstrap-workunit.yaml | 11 + .../mirror/workloads/rbd-mirror-ha-workunit.yaml | 26 + .../workloads/rbd-mirror-workunit-config-key.yaml | 12 + .../workloads/rbd-mirror-workunit-policy-none.yaml | 16 + .../rbd-mirror-workunit-policy-simple.yaml | 16 + qa/suites/rbd/nbd/% | 0 qa/suites/rbd/nbd/.qa | 1 + qa/suites/rbd/nbd/base | 1 + qa/suites/rbd/nbd/cluster/+ | 0 qa/suites/rbd/nbd/cluster/.qa | 1 + qa/suites/rbd/nbd/cluster/fixed-3.yaml | 4 + qa/suites/rbd/nbd/cluster/openstack.yaml | 1 + qa/suites/rbd/nbd/msgr-failures | 1 + qa/suites/rbd/nbd/objectstore | 1 + qa/suites/rbd/nbd/thrashers | 1 + qa/suites/rbd/nbd/thrashosds-health.yaml | 1 + qa/suites/rbd/nbd/workloads/.qa | 1 + qa/suites/rbd/nbd/workloads/rbd_fsx_nbd.yaml | 15 + qa/suites/rbd/nbd/workloads/rbd_nbd.yaml | 10 + qa/suites/rbd/qemu/% | 0 qa/suites/rbd/qemu/.qa | 1 + qa/suites/rbd/qemu/cache/.qa | 1 + qa/suites/rbd/qemu/cache/none.yaml | 6 + qa/suites/rbd/qemu/cache/writeback.yaml | 6 + qa/suites/rbd/qemu/cache/writethrough.yaml | 7 + qa/suites/rbd/qemu/clusters/+ | 0 qa/suites/rbd/qemu/clusters/.qa | 1 + qa/suites/rbd/qemu/clusters/fixed-3.yaml | 1 + qa/suites/rbd/qemu/clusters/openstack.yaml | 8 + qa/suites/rbd/qemu/features/.qa | 1 + qa/suites/rbd/qemu/features/defaults.yaml | 5 + qa/suites/rbd/qemu/features/journaling.yaml | 5 + qa/suites/rbd/qemu/msgr-failures/.qa | 1 + qa/suites/rbd/qemu/msgr-failures/few.yaml | 8 + qa/suites/rbd/qemu/objectstore | 1 + qa/suites/rbd/qemu/pool/.qa | 1 + qa/suites/rbd/qemu/pool/ec-cache-pool.yaml | 21 + qa/suites/rbd/qemu/pool/ec-data-pool.yaml | 24 + qa/suites/rbd/qemu/pool/none.yaml | 0 qa/suites/rbd/qemu/pool/replicated-data-pool.yaml | 11 + qa/suites/rbd/qemu/pool/small-cache-pool.yaml | 17 + qa/suites/rbd/qemu/supported-random-distro$ | 1 + qa/suites/rbd/qemu/workloads/.qa | 1 + qa/suites/rbd/qemu/workloads/qemu_bonnie.yaml | 6 + qa/suites/rbd/qemu/workloads/qemu_fsstress.yaml | 6 + .../rbd/qemu/workloads/qemu_iozone.yaml.disabled | 6 + qa/suites/rbd/qemu/workloads/qemu_xfstests.yaml | 8 + qa/suites/rbd/singleton-bluestore/% | 0 qa/suites/rbd/singleton-bluestore/.qa | 1 + qa/suites/rbd/singleton-bluestore/all/.qa | 1 + .../rbd/singleton-bluestore/all/issue-20295.yaml | 14 + qa/suites/rbd/singleton-bluestore/objectstore/.qa | 1 + .../objectstore/bluestore-bitmap.yaml | 1 + .../objectstore/bluestore-comp-snappy.yaml | 1 + qa/suites/rbd/singleton-bluestore/openstack.yaml | 4 + .../singleton-bluestore/supported-random-distro$ | 1 + qa/suites/rbd/singleton/% | 0 qa/suites/rbd/singleton/.qa | 1 + qa/suites/rbd/singleton/all/.qa | 1 + qa/suites/rbd/singleton/all/admin_socket.yaml | 9 + qa/suites/rbd/singleton/all/formatted-output.yaml | 10 + qa/suites/rbd/singleton/all/merge_diff.yaml | 9 + qa/suites/rbd/singleton/all/permissions.yaml | 9 + .../rbd/singleton/all/qemu-iotests-no-cache.yaml | 13 + .../rbd/singleton/all/qemu-iotests-writeback.yaml | 13 + .../singleton/all/qemu-iotests-writethrough.yaml | 14 + .../rbd/singleton/all/rbd-vs-unmanaged-snaps.yaml | 14 + qa/suites/rbd/singleton/all/rbd_mirror.yaml | 13 + qa/suites/rbd/singleton/all/rbd_tasks.yaml | 13 + qa/suites/rbd/singleton/all/rbdmap_RBDMAPFILE.yaml | 7 + .../rbd/singleton/all/read-flags-no-cache.yaml | 12 + .../rbd/singleton/all/read-flags-writeback.yaml | 12 + .../rbd/singleton/all/read-flags-writethrough.yaml | 13 + qa/suites/rbd/singleton/all/snap-diff.yaml | 10 + qa/suites/rbd/singleton/all/verify_pool.yaml | 9 + qa/suites/rbd/singleton/objectstore | 1 + qa/suites/rbd/singleton/openstack.yaml | 4 + qa/suites/rbd/singleton/supported-random-distro$ | 1 + qa/suites/rbd/thrash/% | 0 qa/suites/rbd/thrash/.qa | 1 + qa/suites/rbd/thrash/base/.qa | 1 + qa/suites/rbd/thrash/base/install.yaml | 3 + qa/suites/rbd/thrash/clusters/+ | 0 qa/suites/rbd/thrash/clusters/.qa | 1 + qa/suites/rbd/thrash/clusters/fixed-2.yaml | 1 + qa/suites/rbd/thrash/clusters/openstack.yaml | 8 + qa/suites/rbd/thrash/msgr-failures/.qa | 1 + qa/suites/rbd/thrash/msgr-failures/few.yaml | 7 + qa/suites/rbd/thrash/objectstore | 1 + qa/suites/rbd/thrash/supported-random-distro$ | 1 + qa/suites/rbd/thrash/thrashers/.qa | 1 + qa/suites/rbd/thrash/thrashers/cache.yaml | 21 + qa/suites/rbd/thrash/thrashers/default.yaml | 8 + qa/suites/rbd/thrash/thrashosds-health.yaml | 1 + qa/suites/rbd/thrash/workloads/.qa | 1 + qa/suites/rbd/thrash/workloads/journal.yaml | 5 + qa/suites/rbd/thrash/workloads/rbd_api_tests.yaml | 13 + .../workloads/rbd_api_tests_copy_on_read.yaml | 16 + .../thrash/workloads/rbd_api_tests_journaling.yaml | 13 + .../thrash/workloads/rbd_api_tests_no_locking.yaml | 13 + .../thrash/workloads/rbd_fsx_cache_writeback.yaml | 9 + .../workloads/rbd_fsx_cache_writethrough.yaml | 10 + .../rbd/thrash/workloads/rbd_fsx_copy_on_read.yaml | 10 + .../rbd/thrash/workloads/rbd_fsx_deep_copy.yaml | 5 + .../rbd/thrash/workloads/rbd_fsx_journal.yaml | 5 + .../rbd/thrash/workloads/rbd_fsx_nocache.yaml | 9 + .../rbd/thrash/workloads/rbd_fsx_rate_limit.yaml | 11 + qa/suites/rbd/valgrind/% | 0 qa/suites/rbd/valgrind/.qa | 1 + qa/suites/rbd/valgrind/base/.qa | 1 + qa/suites/rbd/valgrind/base/install.yaml | 3 + qa/suites/rbd/valgrind/centos_latest.yaml | 1 + qa/suites/rbd/valgrind/clusters | 1 + qa/suites/rbd/valgrind/objectstore | 1 + qa/suites/rbd/valgrind/validator/.qa | 1 + qa/suites/rbd/valgrind/validator/memcheck.yaml | 12 + qa/suites/rbd/valgrind/workloads/.qa | 1 + qa/suites/rbd/valgrind/workloads/c_api_tests.yaml | 13 + .../workloads/c_api_tests_with_defaults.yaml | 13 + .../workloads/c_api_tests_with_journaling.yaml | 13 + qa/suites/rbd/valgrind/workloads/fsx.yaml | 4 + .../rbd/valgrind/workloads/python_api_tests.yaml | 7 + .../workloads/python_api_tests_with_defaults.yaml | 7 + .../python_api_tests_with_journaling.yaml | 7 + qa/suites/rbd/valgrind/workloads/rbd_mirror.yaml | 11 + qa/suites/rgw/.qa | 1 + qa/suites/rgw/hadoop-s3a/% | 0 qa/suites/rgw/hadoop-s3a/.qa | 1 + qa/suites/rgw/hadoop-s3a/clusters/.qa | 1 + qa/suites/rgw/hadoop-s3a/clusters/fixed-2.yaml | 1 + qa/suites/rgw/hadoop-s3a/hadoop/.qa | 1 + qa/suites/rgw/hadoop-s3a/hadoop/default.yaml | 1 + qa/suites/rgw/hadoop-s3a/hadoop/v32.yaml | 3 + qa/suites/rgw/hadoop-s3a/s3a-hadoop.yaml | 11 + qa/suites/rgw/hadoop-s3a/supported-random-distro$ | 1 + qa/suites/rgw/multifs/% | 0 qa/suites/rgw/multifs/.qa | 1 + qa/suites/rgw/multifs/clusters/.qa | 1 + qa/suites/rgw/multifs/clusters/fixed-2.yaml | 1 + qa/suites/rgw/multifs/frontend/.qa | 1 + qa/suites/rgw/multifs/frontend/civetweb.yaml | 1 + qa/suites/rgw/multifs/objectstore | 1 + qa/suites/rgw/multifs/overrides.yaml | 10 + qa/suites/rgw/multifs/rgw_pool_type | 1 + qa/suites/rgw/multifs/tasks/.qa | 1 + qa/suites/rgw/multifs/tasks/rgw_bucket_quota.yaml | 10 + .../rgw/multifs/tasks/rgw_multipart_upload.yaml | 10 + qa/suites/rgw/multifs/tasks/rgw_ragweed.yaml | 19 + qa/suites/rgw/multifs/tasks/rgw_readwrite.yaml | 17 + qa/suites/rgw/multifs/tasks/rgw_roundtrip.yaml | 17 + qa/suites/rgw/multifs/tasks/rgw_s3tests.yaml | 13 + qa/suites/rgw/multifs/tasks/rgw_swift.yaml | 8 + qa/suites/rgw/multifs/tasks/rgw_user_quota.yaml | 10 + qa/suites/rgw/multisite/% | 0 qa/suites/rgw/multisite/.qa | 1 + qa/suites/rgw/multisite/clusters.yaml | 3 + qa/suites/rgw/multisite/frontend | 1 + qa/suites/rgw/multisite/omap_limits.yaml | 10 + qa/suites/rgw/multisite/overrides.yaml | 15 + qa/suites/rgw/multisite/realms/.qa | 1 + .../multisite/realms/three-zone-plus-pubsub.yaml | 23 + qa/suites/rgw/multisite/realms/three-zone.yaml | 20 + qa/suites/rgw/multisite/realms/two-zonegroup.yaml | 27 + qa/suites/rgw/multisite/tasks/.qa | 1 + qa/suites/rgw/multisite/tasks/test_multi.yaml | 26 + qa/suites/rgw/multisite/valgrind.yaml | 19 + qa/suites/rgw/singleton/% | 0 qa/suites/rgw/singleton/.qa | 1 + qa/suites/rgw/singleton/all/.qa | 1 + qa/suites/rgw/singleton/all/radosgw-admin.yaml | 21 + qa/suites/rgw/singleton/frontend/.qa | 1 + qa/suites/rgw/singleton/frontend/civetweb.yaml | 1 + qa/suites/rgw/singleton/objectstore | 1 + qa/suites/rgw/singleton/overrides.yaml | 6 + qa/suites/rgw/singleton/rgw_pool_type | 1 + qa/suites/rgw/singleton/supported-random-distro$ | 1 + qa/suites/rgw/tempest/% | 0 qa/suites/rgw/tempest/.qa | 1 + qa/suites/rgw/tempest/clusters/.qa | 1 + qa/suites/rgw/tempest/clusters/fixed-1.yaml | 1 + qa/suites/rgw/tempest/frontend | 1 + qa/suites/rgw/tempest/tasks/.qa | 1 + qa/suites/rgw/tempest/tasks/rgw_tempest.yaml | 78 + qa/suites/rgw/tempest/ubuntu_latest.yaml | 1 + qa/suites/rgw/thrash/% | 0 qa/suites/rgw/thrash/.qa | 1 + qa/suites/rgw/thrash/civetweb.yaml | 3 + qa/suites/rgw/thrash/clusters/.qa | 1 + qa/suites/rgw/thrash/clusters/fixed-2.yaml | 1 + qa/suites/rgw/thrash/install.yaml | 5 + qa/suites/rgw/thrash/objectstore | 1 + qa/suites/rgw/thrash/thrasher/.qa | 1 + qa/suites/rgw/thrash/thrasher/default.yaml | 9 + qa/suites/rgw/thrash/thrashosds-health.yaml | 1 + qa/suites/rgw/thrash/workload/.qa | 1 + .../rgw/thrash/workload/rgw_bucket_quota.yaml | 7 + .../rgw/thrash/workload/rgw_multipart_upload.yaml | 7 + qa/suites/rgw/thrash/workload/rgw_readwrite.yaml | 14 + qa/suites/rgw/thrash/workload/rgw_roundtrip.yaml | 14 + qa/suites/rgw/thrash/workload/rgw_s3tests.yaml | 12 + qa/suites/rgw/thrash/workload/rgw_swift.yaml | 5 + qa/suites/rgw/thrash/workload/rgw_user_quota.yaml | 7 + qa/suites/rgw/tools/+ | 0 qa/suites/rgw/tools/.qa | 1 + qa/suites/rgw/tools/centos_latest.yaml | 1 + qa/suites/rgw/tools/cluster.yaml | 9 + qa/suites/rgw/tools/tasks.yaml | 19 + qa/suites/rgw/verify/% | 0 qa/suites/rgw/verify/.qa | 1 + qa/suites/rgw/verify/clusters/.qa | 1 + qa/suites/rgw/verify/clusters/fixed-2.yaml | 1 + qa/suites/rgw/verify/frontend | 1 + qa/suites/rgw/verify/msgr-failures/.qa | 1 + qa/suites/rgw/verify/msgr-failures/few.yaml | 7 + qa/suites/rgw/verify/objectstore | 1 + qa/suites/rgw/verify/overrides.yaml | 10 + qa/suites/rgw/verify/proto/.qa | 1 + qa/suites/rgw/verify/proto/http.yaml | 0 qa/suites/rgw/verify/proto/https.yaml | 20 + qa/suites/rgw/verify/rgw_pool_type | 1 + .../rgw/verify/striping$/stripe-equals-chunk.yaml | 7 + .../striping$/stripe-greater-than-chunk.yaml | 7 + qa/suites/rgw/verify/tasks/+ | 0 qa/suites/rgw/verify/tasks/.qa | 1 + qa/suites/rgw/verify/tasks/0-install.yaml | 21 + qa/suites/rgw/verify/tasks/cls.yaml | 8 + qa/suites/rgw/verify/tasks/ragweed.yaml | 6 + qa/suites/rgw/verify/tasks/s3tests.yaml | 5 + qa/suites/rgw/verify/tasks/swift.yaml | 8 + qa/suites/rgw/verify/validater/.qa | 1 + qa/suites/rgw/verify/validater/lockdep.yaml | 7 + qa/suites/rgw/verify/validater/valgrind.yaml | 21 + qa/suites/samba/% | 0 qa/suites/samba/.qa | 1 + qa/suites/samba/clusters/.qa | 1 + qa/suites/samba/clusters/samba-basic.yaml | 7 + qa/suites/samba/install/.qa | 1 + qa/suites/samba/install/install.yaml | 9 + qa/suites/samba/mount/.qa | 1 + qa/suites/samba/mount/fuse.yaml | 6 + qa/suites/samba/mount/kclient.yaml | 14 + qa/suites/samba/mount/native.yaml | 2 + qa/suites/samba/mount/noceph.yaml | 5 + qa/suites/samba/objectstore | 1 + qa/suites/samba/workload/.qa | 1 + qa/suites/samba/workload/cifs-dbench.yaml | 8 + qa/suites/samba/workload/cifs-fsstress.yaml | 8 + .../samba/workload/cifs-kernel-build.yaml.disabled | 9 + qa/suites/samba/workload/smbtorture.yaml | 39 + qa/suites/smoke/.qa | 1 + qa/suites/smoke/basic/% | 0 qa/suites/smoke/basic/.qa | 1 + qa/suites/smoke/basic/clusters/+ | 0 qa/suites/smoke/basic/clusters/.qa | 1 + qa/suites/smoke/basic/clusters/fixed-3-cephfs.yaml | 1 + qa/suites/smoke/basic/clusters/openstack.yaml | 8 + qa/suites/smoke/basic/objectstore/.qa | 1 + .../smoke/basic/objectstore/bluestore-bitmap.yaml | 1 + qa/suites/smoke/basic/tasks/.qa | 1 + .../tasks/cfuse_workunit_suites_blogbench.yaml | 9 + .../tasks/cfuse_workunit_suites_fsstress.yaml | 8 + .../basic/tasks/cfuse_workunit_suites_iozone.yaml | 8 + .../basic/tasks/cfuse_workunit_suites_pjd.yaml | 18 + .../basic/tasks/kclient_workunit_direct_io.yaml | 13 + .../tasks/kclient_workunit_suites_dbench.yaml | 14 + .../tasks/kclient_workunit_suites_fsstress.yaml | 14 + .../basic/tasks/kclient_workunit_suites_pjd.yaml | 14 + .../basic/tasks/libcephfs_interface_tests.yaml | 17 + qa/suites/smoke/basic/tasks/mon_thrash.yaml | 36 + qa/suites/smoke/basic/tasks/rados_api_tests.yaml | 30 + qa/suites/smoke/basic/tasks/rados_bench.yaml | 47 + qa/suites/smoke/basic/tasks/rados_cache_snaps.yaml | 51 + qa/suites/smoke/basic/tasks/rados_cls_all.yaml | 16 + qa/suites/smoke/basic/tasks/rados_ec_snaps.yaml | 41 + qa/suites/smoke/basic/tasks/rados_python.yaml | 16 + .../basic/tasks/rados_workunit_loadgen_mix.yaml | 13 + qa/suites/smoke/basic/tasks/rbd_api_tests.yaml | 18 + .../smoke/basic/tasks/rbd_cli_import_export.yaml | 11 + qa/suites/smoke/basic/tasks/rbd_fsx.yaml | 30 + .../smoke/basic/tasks/rbd_python_api_tests.yaml | 10 + .../basic/tasks/rbd_workunit_suites_iozone.yaml | 17 + qa/suites/smoke/basic/tasks/rgw_ec_s3tests.yaml | 20 + qa/suites/smoke/basic/tasks/rgw_s3tests.yaml | 16 + qa/suites/smoke/basic/tasks/rgw_swift.yaml | 9 + qa/suites/stress/.qa | 1 + qa/suites/stress/bench/% | 0 qa/suites/stress/bench/.qa | 1 + qa/suites/stress/bench/clusters/.qa | 1 + .../stress/bench/clusters/fixed-3-cephfs.yaml | 1 + qa/suites/stress/bench/tasks/.qa | 1 + .../stress/bench/tasks/cfuse_workunit_snaps.yaml | 8 + .../bench/tasks/kclient_workunit_suites_fsx.yaml | 8 + qa/suites/stress/thrash/% | 0 qa/suites/stress/thrash/.qa | 1 + qa/suites/stress/thrash/clusters/.qa | 1 + qa/suites/stress/thrash/clusters/16-osd.yaml | 18 + .../stress/thrash/clusters/3-osd-1-machine.yaml | 3 + qa/suites/stress/thrash/clusters/8-osd.yaml | 10 + qa/suites/stress/thrash/thrashers/.qa | 1 + qa/suites/stress/thrash/thrashers/default.yaml | 7 + qa/suites/stress/thrash/thrashers/fast.yaml | 9 + qa/suites/stress/thrash/thrashers/more-down.yaml | 8 + qa/suites/stress/thrash/workloads/.qa | 1 + .../stress/thrash/workloads/bonnie_cfuse.yaml | 6 + .../stress/thrash/workloads/iozone_cfuse.yaml | 6 + qa/suites/stress/thrash/workloads/radosbench.yaml | 4 + qa/suites/stress/thrash/workloads/readwrite.yaml | 9 + qa/suites/teuthology/.qa | 1 + qa/suites/teuthology/buildpackages/% | 0 qa/suites/teuthology/buildpackages/.qa | 1 + .../teuthology/buildpackages/supported-all-distro | 1 + qa/suites/teuthology/buildpackages/tasks/.qa | 1 + .../teuthology/buildpackages/tasks/branch.yaml | 10 + .../teuthology/buildpackages/tasks/default.yaml | 14 + qa/suites/teuthology/buildpackages/tasks/tag.yaml | 11 + qa/suites/teuthology/ceph/% | 0 qa/suites/teuthology/ceph/.qa | 1 + qa/suites/teuthology/ceph/clusters/.qa | 1 + qa/suites/teuthology/ceph/clusters/single.yaml | 2 + qa/suites/teuthology/ceph/distros | 1 + qa/suites/teuthology/ceph/tasks/.qa | 1 + qa/suites/teuthology/ceph/tasks/teuthology.yaml | 3 + qa/suites/teuthology/integration.yaml | 2 + qa/suites/teuthology/multi-cluster/% | 0 qa/suites/teuthology/multi-cluster/.qa | 1 + qa/suites/teuthology/multi-cluster/all/.qa | 1 + qa/suites/teuthology/multi-cluster/all/ceph.yaml | 25 + .../teuthology/multi-cluster/all/thrashosds.yaml | 21 + .../teuthology/multi-cluster/all/upgrade.yaml | 51 + .../teuthology/multi-cluster/all/workunit.yaml | 23 + qa/suites/teuthology/no-ceph/% | 0 qa/suites/teuthology/no-ceph/.qa | 1 + qa/suites/teuthology/no-ceph/clusters/.qa | 1 + qa/suites/teuthology/no-ceph/clusters/single.yaml | 2 + qa/suites/teuthology/no-ceph/tasks/.qa | 1 + qa/suites/teuthology/no-ceph/tasks/teuthology.yaml | 2 + qa/suites/teuthology/nop/% | 0 qa/suites/teuthology/nop/.qa | 1 + qa/suites/teuthology/nop/all/.qa | 1 + qa/suites/teuthology/nop/all/nop.yaml | 3 + qa/suites/teuthology/rgw/% | 0 qa/suites/teuthology/rgw/.qa | 1 + qa/suites/teuthology/rgw/distros | 1 + qa/suites/teuthology/rgw/tasks/.qa | 1 + .../teuthology/rgw/tasks/s3tests-civetweb.yaml | 24 + .../teuthology/rgw/tasks/s3tests-fastcgi.yaml | 24 + qa/suites/teuthology/rgw/tasks/s3tests-fcgi.yaml | 26 + qa/suites/teuthology/workunits/.qa | 1 + qa/suites/teuthology/workunits/yes.yaml | 8 + qa/suites/tgt/.qa | 1 + qa/suites/tgt/basic/% | 1 + qa/suites/tgt/basic/.qa | 1 + qa/suites/tgt/basic/clusters/.qa | 1 + qa/suites/tgt/basic/clusters/fixed-3.yaml | 4 + qa/suites/tgt/basic/msgr-failures/.qa | 1 + qa/suites/tgt/basic/msgr-failures/few.yaml | 7 + qa/suites/tgt/basic/msgr-failures/many.yaml | 7 + qa/suites/tgt/basic/tasks/.qa | 1 + qa/suites/tgt/basic/tasks/blogbench.yaml | 9 + qa/suites/tgt/basic/tasks/bonnie.yaml | 9 + qa/suites/tgt/basic/tasks/dbench-short.yaml | 9 + qa/suites/tgt/basic/tasks/dbench.yaml | 9 + qa/suites/tgt/basic/tasks/ffsb.yaml | 9 + qa/suites/tgt/basic/tasks/fio.yaml | 9 + qa/suites/tgt/basic/tasks/fsstress.yaml | 9 + qa/suites/tgt/basic/tasks/fsx.yaml | 9 + qa/suites/tgt/basic/tasks/fsync-tester.yaml | 9 + qa/suites/tgt/basic/tasks/iogen.yaml | 9 + qa/suites/tgt/basic/tasks/iozone-sync.yaml | 9 + qa/suites/tgt/basic/tasks/iozone.yaml | 9 + qa/suites/tgt/basic/tasks/pjd.yaml | 9 + .../nautilus-client-x/.qa | 1 + .../nautilus-client-x/rbd/% | 0 .../nautilus-client-x/rbd/.qa | 1 + .../nautilus-client-x/rbd/0-cluster/+ | 0 .../nautilus-client-x/rbd/0-cluster/.qa | 1 + .../nautilus-client-x/rbd/0-cluster/openstack.yaml | 4 + .../nautilus-client-x/rbd/0-cluster/start.yaml | 21 + .../nautilus-client-x/rbd/1-install/.qa | 1 + .../rbd/1-install/nautilus-client-x.yaml | 11 + .../nautilus-client-x/rbd/2-features/.qa | 1 + .../nautilus-client-x/rbd/2-features/defaults.yaml | 6 + .../nautilus-client-x/rbd/2-features/layering.yaml | 6 + .../nautilus-client-x/rbd/3-workload/.qa | 1 + .../rbd/3-workload/rbd_notification_tests.yaml | 33 + .../nautilus-client-x/rbd/supported/.qa | 1 + .../rbd/supported/ubuntu_18.04.yaml | 1 + .../upgrade-clients/client-upgrade-nautilus/.qa | 1 + .../client-upgrade-nautilus/nautilus-client-x/.qa | 1 + .../nautilus-client-x/basic/% | 0 .../nautilus-client-x/basic/.qa | 1 + .../nautilus-client-x/basic/0-cluster/+ | 0 .../nautilus-client-x/basic/0-cluster/.qa | 1 + .../basic/0-cluster/openstack.yaml | 4 + .../nautilus-client-x/basic/0-cluster/start.yaml | 23 + .../nautilus-client-x/basic/1-install/.qa | 1 + .../basic/1-install/nautilus-client-x.yaml | 11 + .../nautilus-client-x/basic/2-workload/.qa | 1 + .../basic/2-workload/devstack-tempest-gate.yaml | 56 + .../nautilus-client-x/basic/supported/.qa | 1 + .../basic/supported/centos_7.6.yaml | 1 + .../basic/supported/rhel_7.6.yaml | 1 + .../basic/supported/ubuntu_16.04.yaml | 1 + .../basic/supported/ubuntu_18.04.yaml | 1 + .../nautilus-client-x/rbd/% | 0 .../nautilus-client-x/rbd/.qa | 1 + .../nautilus-client-x/rbd/0-cluster/+ | 0 .../nautilus-client-x/rbd/0-cluster/.qa | 1 + .../nautilus-client-x/rbd/0-cluster/openstack.yaml | 4 + .../nautilus-client-x/rbd/0-cluster/start.yaml | 21 + .../nautilus-client-x/rbd/1-install/.qa | 1 + .../rbd/1-install/nautilus-client-x.yaml | 11 + .../nautilus-client-x/rbd/2-features/.qa | 1 + .../nautilus-client-x/rbd/2-features/defaults.yaml | 6 + .../nautilus-client-x/rbd/2-features/layering.yaml | 6 + .../nautilus-client-x/rbd/3-workload/.qa | 1 + .../rbd/3-workload/rbd_notification_tests.yaml | 21 + .../nautilus-client-x/rbd/supported/.qa | 1 + .../rbd/supported/centos_7.6.yaml | 1 + .../nautilus-client-x/rbd/supported/rhel_7.6.yaml | 1 + .../rbd/supported/ubuntu_16.04.yaml | 1 + .../rbd/supported/ubuntu_18.04.yaml | 1 + qa/suites/upgrade/.qa | 1 + qa/suites/upgrade/luminous-x/.qa | 1 + qa/suites/upgrade/luminous-x/parallel/% | 0 qa/suites/upgrade/luminous-x/parallel/.qa | 1 + qa/suites/upgrade/luminous-x/parallel/0-cluster/+ | 0 .../upgrade/luminous-x/parallel/0-cluster/.qa | 1 + .../luminous-x/parallel/0-cluster/openstack.yaml | 4 + .../luminous-x/parallel/0-cluster/start.yaml | 50 + .../upgrade/luminous-x/parallel/1-ceph-install/.qa | 1 + .../parallel/1-ceph-install/luminous.yaml | 54 + .../1.1-pg-log-overrides/normal_pg_log.yaml | 1 + .../1.1-pg-log-overrides/short_pg_log.yaml | 6 + qa/suites/upgrade/luminous-x/parallel/2-workload/+ | 0 .../upgrade/luminous-x/parallel/2-workload/.qa | 1 + .../luminous-x/parallel/2-workload/blogbench.yaml | 14 + .../parallel/2-workload/ec-rados-default.yaml | 24 + .../luminous-x/parallel/2-workload/rados_api.yaml | 11 + .../parallel/2-workload/rados_loadgenbig.yaml | 11 + .../parallel/2-workload/rgw_ragweed_prepare.yaml | 14 + .../parallel/2-workload/test_rbd_api.yaml | 11 + .../parallel/2-workload/test_rbd_python.yaml | 11 + .../luminous-x/parallel/3-upgrade-sequence/.qa | 1 + .../parallel/3-upgrade-sequence/upgrade-all.yaml | 22 + .../3-upgrade-sequence/upgrade-mon-osd-mds.yaml | 50 + qa/suites/upgrade/luminous-x/parallel/4-msgr2.yaml | 5 + .../upgrade/luminous-x/parallel/4-nautilus.yaml | 1 + .../upgrade/luminous-x/parallel/5-final-workload/+ | 0 .../luminous-x/parallel/5-final-workload/.qa | 1 + .../parallel/5-final-workload/blogbench.yaml | 13 + .../5-final-workload/rados-snaps-few-objects.yaml | 17 + .../5-final-workload/rados_loadgenmix.yaml | 9 + .../5-final-workload/rados_mon_thrash.yaml | 18 + .../parallel/5-final-workload/rbd_cls.yaml | 9 + .../5-final-workload/rbd_import_export.yaml | 11 + .../luminous-x/parallel/5-final-workload/rgw.yaml | 8 + .../5-final-workload/rgw_ragweed_check.yaml | 11 + .../parallel/5-final-workload/rgw_swift.yaml | 10 + qa/suites/upgrade/luminous-x/parallel/objectstore | 1 + .../luminous-x/parallel/supported-all-distro | 1 + .../upgrade/luminous-x/stress-split-erasure-code/% | 0 .../luminous-x/stress-split-erasure-code/.qa | 1 + .../luminous-x/stress-split-erasure-code/0-cluster | 1 + .../stress-split-erasure-code/1-luminous-install | 1 + .../1.1-pg-log-overrides/normal_pg_log.yaml | 1 + .../1.1-pg-log-overrides/short_pg_log.yaml | 6 + .../stress-split-erasure-code/2-partial-upgrade | 1 + .../stress-split-erasure-code/3-thrash/.qa | 1 + .../3-thrash/default.yaml | 27 + .../stress-split-erasure-code/4-ec-workload.yaml | 22 + .../5-finish-upgrade.yaml | 1 + .../7-final-workload.yaml | 35 + .../stress-split-erasure-code/objectstore | 1 + .../stress-split-erasure-code/supported-all-distro | 1 + .../thrashosds-health.yaml | 1 + qa/suites/upgrade/luminous-x/stress-split/% | 0 qa/suites/upgrade/luminous-x/stress-split/.qa | 1 + .../upgrade/luminous-x/stress-split/0-cluster/+ | 0 .../upgrade/luminous-x/stress-split/0-cluster/.qa | 1 + .../stress-split/0-cluster/openstack.yaml | 6 + .../luminous-x/stress-split/0-cluster/start.yaml | 40 + .../luminous-x/stress-split/1-ceph-install/.qa | 1 + .../stress-split/1-ceph-install/luminous.yaml | 29 + .../1.1-pg-log-overrides/normal_pg_log.yaml | 1 + .../1.1-pg-log-overrides/short_pg_log.yaml | 6 + .../luminous-x/stress-split/2-partial-upgrade/.qa | 1 + .../stress-split/2-partial-upgrade/firsthalf.yaml | 14 + .../upgrade/luminous-x/stress-split/3-thrash/.qa | 1 + .../luminous-x/stress-split/3-thrash/default.yaml | 26 + .../upgrade/luminous-x/stress-split/4-workload/+ | 0 .../upgrade/luminous-x/stress-split/4-workload/.qa | 1 + .../stress-split/4-workload/radosbench.yaml | 52 + .../stress-split/4-workload/rbd-cls.yaml | 10 + .../stress-split/4-workload/rbd-import-export.yaml | 12 + .../stress-split/4-workload/rbd_api.yaml | 10 + .../stress-split/4-workload/readwrite.yaml | 16 + .../stress-split/4-workload/snaps-few-objects.yaml | 18 + .../luminous-x/stress-split/5-finish-upgrade.yaml | 15 + .../upgrade/luminous-x/stress-split/6-msgr2.yaml | 6 + .../luminous-x/stress-split/6-nautilus.yaml | 1 + .../luminous-x/stress-split/7-final-workload/+ | 0 .../luminous-x/stress-split/7-final-workload/.qa | 1 + .../stress-split/7-final-workload/rgw-swift.yaml | 12 + .../7-final-workload/snaps-many-objects.yaml | 16 + .../luminous-x/stress-split/objectstore/.qa | 1 + .../stress-split/objectstore/bluestore-bitmap.yaml | 1 + .../stress-split/objectstore/filestore-xfs.yaml | 1 + .../luminous-x/stress-split/supported-all-distro | 1 + .../luminous-x/stress-split/thrashosds-health.yaml | 1 + qa/suites/upgrade/mimic-x-singleton/% | 0 qa/suites/upgrade/mimic-x-singleton/.qa | 1 + qa/suites/upgrade/mimic-x-singleton/0-cluster/+ | 0 qa/suites/upgrade/mimic-x-singleton/0-cluster/.qa | 1 + .../mimic-x-singleton/0-cluster/openstack.yaml | 6 + .../upgrade/mimic-x-singleton/0-cluster/start.yaml | 37 + qa/suites/upgrade/mimic-x-singleton/1-install/.qa | 1 + .../upgrade/mimic-x-singleton/1-install/mimic.yaml | 25 + .../mimic-x-singleton/2-partial-upgrade/.qa | 1 + .../2-partial-upgrade/firsthalf.yaml | 18 + qa/suites/upgrade/mimic-x-singleton/3-thrash/.qa | 1 + .../mimic-x-singleton/3-thrash/default.yaml | 22 + qa/suites/upgrade/mimic-x-singleton/4-workload/+ | 0 qa/suites/upgrade/mimic-x-singleton/4-workload/.qa | 1 + .../mimic-x-singleton/4-workload/rbd-cls.yaml | 11 + .../4-workload/rbd-import-export.yaml | 13 + .../mimic-x-singleton/4-workload/readwrite.yaml | 17 + .../4-workload/snaps-few-objects.yaml | 19 + qa/suites/upgrade/mimic-x-singleton/5-workload/+ | 0 qa/suites/upgrade/mimic-x-singleton/5-workload/.qa | 1 + .../mimic-x-singleton/5-workload/radosbench.yaml | 41 + .../mimic-x-singleton/5-workload/rbd_api.yaml | 11 + .../mimic-x-singleton/6-finish-upgrade.yaml | 30 + .../upgrade/mimic-x-singleton/7-nautilus.yaml | 1 + qa/suites/upgrade/mimic-x-singleton/8-workload/+ | 0 qa/suites/upgrade/mimic-x-singleton/8-workload/.qa | 1 + .../mimic-x-singleton/8-workload/rbd-python.yaml | 9 + .../mimic-x-singleton/8-workload/rgw-swift.yaml | 12 + .../8-workload/snaps-many-objects.yaml | 16 + .../mimic-x-singleton/supported-random-distro$ | 1 + .../mimic-x-singleton/thrashosds-health.yaml | 1 + qa/suites/upgrade/mimic-x/.qa | 1 + qa/suites/upgrade/mimic-x/parallel/% | 0 qa/suites/upgrade/mimic-x/parallel/.qa | 1 + qa/suites/upgrade/mimic-x/parallel/0-cluster/+ | 0 qa/suites/upgrade/mimic-x/parallel/0-cluster/.qa | 1 + .../mimic-x/parallel/0-cluster/openstack.yaml | 4 + .../upgrade/mimic-x/parallel/0-cluster/start.yaml | 49 + .../upgrade/mimic-x/parallel/1-ceph-install/.qa | 1 + .../mimic-x/parallel/1-ceph-install/mimic.yaml | 55 + .../1.1-pg-log-overrides/normal_pg_log.yaml | 1 + .../1.1-pg-log-overrides/short_pg_log.yaml | 6 + qa/suites/upgrade/mimic-x/parallel/2-workload/+ | 0 qa/suites/upgrade/mimic-x/parallel/2-workload/.qa | 1 + .../mimic-x/parallel/2-workload/blogbench.yaml | 14 + .../parallel/2-workload/ec-rados-default.yaml | 24 + .../mimic-x/parallel/2-workload/rados_api.yaml | 11 + .../parallel/2-workload/rados_loadgenbig.yaml | 11 + .../parallel/2-workload/rgw_ragweed_prepare.yaml | 14 + .../mimic-x/parallel/2-workload/test_rbd_api.yaml | 11 + .../parallel/2-workload/test_rbd_python.yaml | 11 + .../mimic-x/parallel/3-upgrade-sequence/.qa | 1 + .../parallel/3-upgrade-sequence/upgrade-all.yaml | 22 + .../3-upgrade-sequence/upgrade-mon-osd-mds.yaml | 51 + qa/suites/upgrade/mimic-x/parallel/4-msgr2.yaml | 5 + qa/suites/upgrade/mimic-x/parallel/4-nautilus.yaml | 1 + .../upgrade/mimic-x/parallel/5-final-workload/+ | 0 .../upgrade/mimic-x/parallel/5-final-workload/.qa | 1 + .../parallel/5-final-workload/blogbench.yaml | 13 + .../5-final-workload/rados-snaps-few-objects.yaml | 17 + .../5-final-workload/rados_loadgenmix.yaml | 9 + .../5-final-workload/rados_mon_thrash.yaml | 18 + .../mimic-x/parallel/5-final-workload/rbd_cls.yaml | 9 + .../5-final-workload/rbd_import_export.yaml | 11 + .../mimic-x/parallel/5-final-workload/rgw.yaml | 8 + .../5-final-workload/rgw_ragweed_check.yaml | 11 + .../parallel/5-final-workload/rgw_swift.yaml | 10 + qa/suites/upgrade/mimic-x/parallel/objectstore | 1 + .../upgrade/mimic-x/parallel/supported-all-distro | 1 + .../upgrade/mimic-x/stress-split-erasure-code/% | 0 .../upgrade/mimic-x/stress-split-erasure-code/.qa | 1 + .../mimic-x/stress-split-erasure-code/0-cluster | 1 + .../stress-split-erasure-code/1-luminous-install | 1 + .../1.1-pg-log-overrides/normal_pg_log.yaml | 1 + .../1.1-pg-log-overrides/short_pg_log.yaml | 6 + .../stress-split-erasure-code/2-partial-upgrade | 1 + .../mimic-x/stress-split-erasure-code/3-thrash/.qa | 1 + .../3-thrash/default.yaml | 26 + .../stress-split-erasure-code/4-ec-workload.yaml | 22 + .../5-finish-upgrade.yaml | 1 + .../7-final-workload.yaml | 35 + .../mimic-x/stress-split-erasure-code/objectstore | 1 + .../stress-split-erasure-code/supported-all-distro | 1 + .../thrashosds-health.yaml | 1 + qa/suites/upgrade/mimic-x/stress-split/% | 0 qa/suites/upgrade/mimic-x/stress-split/.qa | 1 + qa/suites/upgrade/mimic-x/stress-split/0-cluster/+ | 0 .../upgrade/mimic-x/stress-split/0-cluster/.qa | 1 + .../mimic-x/stress-split/0-cluster/openstack.yaml | 6 + .../mimic-x/stress-split/0-cluster/start.yaml | 42 + .../mimic-x/stress-split/1-ceph-install/.qa | 1 + .../mimic-x/stress-split/1-ceph-install/mimic.yaml | 29 + .../1.1-pg-log-overrides/normal_pg_log.yaml | 1 + .../1.1-pg-log-overrides/short_pg_log.yaml | 6 + .../mimic-x/stress-split/2-partial-upgrade/.qa | 1 + .../stress-split/2-partial-upgrade/firsthalf.yaml | 14 + .../upgrade/mimic-x/stress-split/3-thrash/.qa | 1 + .../mimic-x/stress-split/3-thrash/default.yaml | 26 + .../upgrade/mimic-x/stress-split/4-workload/+ | 0 .../upgrade/mimic-x/stress-split/4-workload/.qa | 1 + .../stress-split/4-workload/radosbench.yaml | 52 + .../mimic-x/stress-split/4-workload/rbd-cls.yaml | 10 + .../stress-split/4-workload/rbd-import-export.yaml | 12 + .../mimic-x/stress-split/4-workload/rbd_api.yaml | 10 + .../mimic-x/stress-split/4-workload/readwrite.yaml | 16 + .../4-workload/rgw_ragweed_prepare.yaml | 14 + .../stress-split/4-workload/snaps-few-objects.yaml | 18 + .../mimic-x/stress-split/5-finish-upgrade.yaml | 14 + .../upgrade/mimic-x/stress-split/6-nautilus.yaml | 1 + .../upgrade/mimic-x/stress-split/6.1-msgr2.yaml | 5 + .../mimic-x/stress-split/7-final-workload/+ | 0 .../mimic-x/stress-split/7-final-workload/.qa | 1 + .../stress-split/7-final-workload/rbd-python.yaml | 10 + .../7-final-workload/rgw-swift-ragweed_check.yaml | 19 + .../7-final-workload/snaps-many-objects.yaml | 16 + .../upgrade/mimic-x/stress-split/objectstore/.qa | 1 + .../stress-split/objectstore/bluestore-bitmap.yaml | 1 + .../stress-split/objectstore/filestore-xfs.yaml | 1 + .../mimic-x/stress-split/supported-all-distro | 1 + .../mimic-x/stress-split/thrashosds-health.yaml | 1 + qa/suites/upgrade/nautilus-p2p/.qa | 1 + .../upgrade/nautilus-p2p/nautilus-p2p-parallel/% | 0 .../point-to-point-upgrade.yaml | 182 + .../nautilus-p2p-parallel/supported-all-distro | 1 + .../nautilus-p2p/nautilus-p2p-stress-split/% | 0 .../nautilus-p2p/nautilus-p2p-stress-split/.qa | 1 + .../nautilus-p2p-stress-split/0-cluster/+ | 0 .../nautilus-p2p-stress-split/0-cluster/.qa | 1 + .../0-cluster/openstack.yaml | 6 + .../nautilus-p2p-stress-split/0-cluster/start.yaml | 35 + .../nautilus-p2p-stress-split/1-ceph-install/.qa | 1 + .../1-ceph-install/nautilus.yaml | 19 + .../1.1.short_pg_log.yaml | 1 + .../2-partial-upgrade/.qa | 1 + .../2-partial-upgrade/firsthalf.yaml | 13 + .../nautilus-p2p-stress-split/3-thrash/.qa | 1 + .../3-thrash/default.yaml | 27 + .../nautilus-p2p-stress-split/4-workload/+ | 0 .../nautilus-p2p-stress-split/4-workload/.qa | 1 + .../nautilus-p2p-stress-split/4-workload/fsx.yaml | 8 + .../4-workload/radosbench.yaml | 52 + .../4-workload/rbd-cls.yaml | 10 + .../4-workload/rbd-import-export.yaml | 12 + .../4-workload/rbd_api.yaml | 10 + .../4-workload/readwrite.yaml | 16 + .../4-workload/snaps-few-objects.yaml | 18 + .../5-finish-upgrade.yaml | 8 + .../nautilus-p2p-stress-split/7-final-workload/+ | 0 .../nautilus-p2p-stress-split/7-final-workload/.qa | 1 + .../7-final-workload/rbd-python.yaml | 10 + .../7-final-workload/rgw-swift.yaml | 12 + .../7-final-workload/snaps-many-objects.yaml | 16 + .../nautilus-p2p-stress-split/objectstore/.qa | 1 + .../objectstore/bluestore-bitmap.yaml | 1 + .../objectstore/default.yaml | 0 .../objectstore/filestore-xfs.yaml | 1 + .../nautilus-p2p-stress-split/supported-all-distro | 1 + .../thrashosds-health.yaml | 1 + qa/tasks/__init__.py | 6 + qa/tasks/admin_socket.py | 194 + qa/tasks/autotest.py | 168 + qa/tasks/aver.py | 67 + qa/tasks/blktrace.py | 96 + qa/tasks/boto.cfg.template | 2 + qa/tasks/cbt.py | 283 + qa/tasks/ceph.conf.template | 105 + qa/tasks/ceph.py | 1896 ++++ qa/tasks/ceph_client.py | 42 + qa/tasks/ceph_deploy.py | 932 ++ qa/tasks/ceph_fuse.py | 160 + qa/tasks/ceph_manager.py | 2642 ++++++ qa/tasks/ceph_objectstore_tool.py | 663 ++ qa/tasks/ceph_test_case.py | 203 + qa/tasks/cephfs/__init__.py | 0 qa/tasks/cephfs/cephfs_test_case.py | 324 + qa/tasks/cephfs/filesystem.py | 1386 +++ qa/tasks/cephfs/fuse_mount.py | 502 ++ qa/tasks/cephfs/kernel_mount.py | 260 + qa/tasks/cephfs/mount.py | 728 ++ qa/tasks/cephfs/test_admin.py | 229 + qa/tasks/cephfs/test_auto_repair.py | 90 + qa/tasks/cephfs/test_backtrace.py | 78 + qa/tasks/cephfs/test_cap_flush.py | 64 + qa/tasks/cephfs/test_cephfs_shell.py | 279 + qa/tasks/cephfs/test_client_limits.py | 330 + qa/tasks/cephfs/test_client_recovery.py | 633 ++ qa/tasks/cephfs/test_damage.py | 569 ++ qa/tasks/cephfs/test_data_scan.py | 695 ++ qa/tasks/cephfs/test_dump_tree.py | 66 + qa/tasks/cephfs/test_exports.py | 176 + qa/tasks/cephfs/test_failover.py | 638 ++ qa/tasks/cephfs/test_flush.py | 113 + qa/tasks/cephfs/test_forward_scrub.py | 298 + qa/tasks/cephfs/test_fragment.py | 229 + qa/tasks/cephfs/test_full.py | 398 + qa/tasks/cephfs/test_journal_migration.py | 100 + qa/tasks/cephfs/test_journal_repair.py | 447 + qa/tasks/cephfs/test_mantle.py | 109 + qa/tasks/cephfs/test_misc.py | 291 + qa/tasks/cephfs/test_openfiletable.py | 41 + qa/tasks/cephfs/test_pool_perm.py | 113 + qa/tasks/cephfs/test_quota.py | 106 + qa/tasks/cephfs/test_readahead.py | 31 + qa/tasks/cephfs/test_recovery_pool.py | 207 + qa/tasks/cephfs/test_scrub.py | 175 + qa/tasks/cephfs/test_scrub_checks.py | 405 + qa/tasks/cephfs/test_sessionmap.py | 236 + qa/tasks/cephfs/test_snapshots.py | 530 ++ qa/tasks/cephfs/test_strays.py | 973 ++ qa/tasks/cephfs/test_volume_client.py | 1765 ++++ qa/tasks/cephfs/test_volumes.py | 4435 +++++++++ qa/tasks/cephfs_test_runner.py | 209 + qa/tasks/cephfs_upgrade_snap.py | 45 + qa/tasks/check_counter.py | 98 + qa/tasks/cifs_mount.py | 137 + qa/tasks/cram.py | 151 + qa/tasks/create_verify_lfn_objects.py | 83 + qa/tasks/devstack.py | 379 + qa/tasks/die_on_err.py | 70 + qa/tasks/divergent_priors.py | 160 + qa/tasks/divergent_priors2.py | 192 + qa/tasks/dnsmasq.py | 170 + qa/tasks/dump_stuck.py | 161 + qa/tasks/ec_lost_unfound.py | 158 + qa/tasks/exec_on_cleanup.py | 61 + qa/tasks/filestore_idempotent.py | 83 + qa/tasks/fs.py | 66 + qa/tasks/kclient.py | 130 + qa/tasks/keystone.py | 397 + qa/tasks/locktest.py | 134 + qa/tasks/logrotate.conf | 13 + qa/tasks/lost_unfound.py | 176 + qa/tasks/manypools.py | 73 + qa/tasks/mds_creation_failure.py | 69 + qa/tasks/mds_pre_upgrade.py | 43 + qa/tasks/mds_thrash.py | 543 ++ qa/tasks/metadata.yaml | 2 + qa/tasks/mgr/__init__.py | 0 qa/tasks/mgr/dashboard/__init__.py | 0 qa/tasks/mgr/dashboard/helper.py | 574 ++ qa/tasks/mgr/dashboard/test_auth.py | 240 + qa/tasks/mgr/dashboard/test_cephfs.py | 70 + .../mgr/dashboard/test_cluster_configuration.py | 388 + .../mgr/dashboard/test_erasure_code_profile.py | 110 + qa/tasks/mgr/dashboard/test_ganesha.py | 168 + qa/tasks/mgr/dashboard/test_health.py | 305 + qa/tasks/mgr/dashboard/test_host.py | 31 + qa/tasks/mgr/dashboard/test_logs.py | 38 + qa/tasks/mgr/dashboard/test_mgr_module.py | 160 + qa/tasks/mgr/dashboard/test_monitor.py | 25 + qa/tasks/mgr/dashboard/test_osd.py | 157 + qa/tasks/mgr/dashboard/test_perf_counters.py | 71 + qa/tasks/mgr/dashboard/test_pool.py | 364 + qa/tasks/mgr/dashboard/test_rbd.py | 797 ++ qa/tasks/mgr/dashboard/test_rbd_mirroring.py | 177 + qa/tasks/mgr/dashboard/test_requests.py | 32 + qa/tasks/mgr/dashboard/test_rgw.py | 710 ++ qa/tasks/mgr/dashboard/test_role.py | 140 + qa/tasks/mgr/dashboard/test_settings.py | 65 + qa/tasks/mgr/dashboard/test_summary.py | 40 + qa/tasks/mgr/dashboard/test_user.py | 115 + qa/tasks/mgr/mgr_test_case.py | 204 + qa/tasks/mgr/test_crash.py | 108 + qa/tasks/mgr/test_dashboard.py | 140 + qa/tasks/mgr/test_failover.py | 148 + qa/tasks/mgr/test_insights.py | 203 + qa/tasks/mgr/test_module_selftest.py | 335 + qa/tasks/mgr/test_orchestrator_cli.py | 154 + qa/tasks/mgr/test_progress.py | 376 + qa/tasks/mgr/test_prometheus.py | 79 + qa/tasks/mgr/test_ssh_orchestrator.py | 23 + qa/tasks/mon_clock_skew_check.py | 73 + qa/tasks/mon_recovery.py | 80 + qa/tasks/mon_thrash.py | 343 + qa/tasks/multibench.py | 61 + qa/tasks/netem.py | 268 + qa/tasks/object_source_down.py | 101 + qa/tasks/omapbench.py | 85 + qa/tasks/openssl_keys.py | 227 + qa/tasks/osd_backfill.py | 104 + qa/tasks/osd_failsafe_enospc.py | 219 + qa/tasks/osd_max_pg_per_osd.py | 126 + qa/tasks/osd_recovery.py | 193 + qa/tasks/peer.py | 90 + qa/tasks/peering_speed_test.py | 87 + qa/tasks/populate_rbd_pool.py | 82 + qa/tasks/qemu.py | 580 ++ qa/tasks/rados.py | 272 + qa/tasks/radosbench.py | 140 + qa/tasks/radosbenchsweep.py | 223 + qa/tasks/radosgw_admin.py | 953 ++ qa/tasks/radosgw_admin_rest.py | 721 ++ qa/tasks/ragweed.py | 390 + qa/tasks/rbd.py | 628 ++ qa/tasks/rbd_fio.py | 224 + qa/tasks/rbd_fsx.py | 114 + qa/tasks/rbd_mirror.py | 119 + qa/tasks/rbd_mirror_thrash.py | 214 + qa/tasks/rebuild_mondb.py | 224 + qa/tasks/reg11184.py | 242 + qa/tasks/rep_lost_unfound_delete.py | 178 + qa/tasks/repair_test.py | 309 + qa/tasks/resolve_stuck_peering.py | 112 + qa/tasks/restart.py | 163 + qa/tasks/rgw.py | 357 + qa/tasks/rgw_logsocket.py | 165 + qa/tasks/rgw_multi | 1 + qa/tasks/rgw_multisite.py | 436 + qa/tasks/rgw_multisite_tests.py | 99 + qa/tasks/s3a_hadoop.py | 289 + qa/tasks/s3readwrite.py | 353 + qa/tasks/s3roundtrip.py | 326 + qa/tasks/s3tests.py | 424 + qa/tasks/samba.py | 247 + qa/tasks/scrub.py | 117 + qa/tasks/scrub_test.py | 403 + qa/tasks/swift.py | 256 + qa/tasks/systemd.py | 135 + qa/tasks/tempest.py | 284 + qa/tasks/tests/__init__.py | 0 qa/tasks/tests/test_devstack.py | 48 + qa/tasks/tests/test_radosgw_admin.py | 35 + qa/tasks/teuthology_integration.py | 19 + qa/tasks/tgt.py | 177 + qa/tasks/thrash_pool_snaps.py | 61 + qa/tasks/thrashosds-health.yaml | 15 + qa/tasks/thrashosds.py | 219 + qa/tasks/tox.py | 50 + qa/tasks/userdata_setup.yaml | 25 + qa/tasks/userdata_teardown.yaml | 11 + qa/tasks/util/__init__.py | 26 + qa/tasks/util/rados.py | 87 + qa/tasks/util/rgw.py | 94 + qa/tasks/util/test/__init__.py | 0 qa/tasks/util/test/test_rados.py | 40 + qa/tasks/util/workunit.py | 78 + qa/tasks/vstart_runner.py | 1169 +++ qa/tasks/watch_notify_same_primary.py | 130 + qa/tasks/watch_notify_stress.py | 70 + qa/tasks/workunit.py | 423 + qa/timezone/eastern.yaml | 4 + qa/timezone/pacific.yaml | 4 + qa/timezone/random.yaml | 5 + qa/tox.ini | 15 + qa/valgrind.supp | 627 ++ qa/workunits/Makefile | 4 + qa/workunits/caps/mon_commands.sh | 25 + qa/workunits/ceph-helpers-root.sh | 126 + qa/workunits/ceph-tests/ceph-admin-commands.sh | 10 + qa/workunits/cephtool/test.sh | 2920 ++++++ qa/workunits/cephtool/test_daemon.sh | 43 + qa/workunits/cephtool/test_kvstore_tool.sh | 71 + qa/workunits/cls/test_cls_hello.sh | 5 + qa/workunits/cls/test_cls_journal.sh | 6 + qa/workunits/cls/test_cls_lock.sh | 5 + qa/workunits/cls/test_cls_log.sh | 5 + qa/workunits/cls/test_cls_numops.sh | 5 + qa/workunits/cls/test_cls_rbd.sh | 6 + qa/workunits/cls/test_cls_refcount.sh | 5 + qa/workunits/cls/test_cls_rgw.sh | 8 + qa/workunits/cls/test_cls_sdk.sh | 5 + qa/workunits/direct_io/.gitignore | 3 + qa/workunits/direct_io/Makefile | 11 + qa/workunits/direct_io/big.sh | 6 + qa/workunits/direct_io/direct_io_test.c | 312 + qa/workunits/direct_io/misc.sh | 16 + qa/workunits/direct_io/test_short_dio_read.c | 57 + qa/workunits/direct_io/test_sync_io.c | 250 + qa/workunits/erasure-code/.gitignore | 2 + qa/workunits/erasure-code/bench.html | 34 + qa/workunits/erasure-code/bench.sh | 188 + .../erasure-code/encode-decode-non-regression.sh | 40 + qa/workunits/erasure-code/examples.css | 97 + .../erasure-code/jquery.flot.categories.js | 190 + qa/workunits/erasure-code/jquery.flot.js | 3168 +++++++ qa/workunits/erasure-code/jquery.js | 9472 ++++++++++++++++++++ qa/workunits/erasure-code/plot.js | 82 + qa/workunits/false.sh | 3 + qa/workunits/fs/.gitignore | 1 + qa/workunits/fs/Makefile | 11 + qa/workunits/fs/misc/acl.sh | 50 + qa/workunits/fs/misc/chmod.sh | 60 + qa/workunits/fs/misc/direct_io.py | 50 + qa/workunits/fs/misc/dirfrag.sh | 52 + qa/workunits/fs/misc/filelock_deadlock.py | 72 + qa/workunits/fs/misc/filelock_interrupt.py | 87 + qa/workunits/fs/misc/i_complete_vs_rename.sh | 31 + qa/workunits/fs/misc/layout_vxattrs.sh | 115 + qa/workunits/fs/misc/mkpool_layout_vxattrs.sh | 15 + qa/workunits/fs/misc/multiple_rsync.sh | 25 + qa/workunits/fs/misc/rstats.sh | 80 + qa/workunits/fs/misc/subvolume.sh | 63 + qa/workunits/fs/misc/trivial_sync.sh | 7 + qa/workunits/fs/misc/xattrs.sh | 14 + qa/workunits/fs/multiclient_sync_read_eof.py | 42 + qa/workunits/fs/norstats/kernel_untar_tar.sh | 26 + qa/workunits/fs/quota/quota.sh | 128 + qa/workunits/fs/snap-hierarchy.sh | 24 + qa/workunits/fs/snaps/snap-rm-diff.sh | 11 + qa/workunits/fs/snaps/snaptest-0.sh | 27 + qa/workunits/fs/snaps/snaptest-1.sh | 31 + qa/workunits/fs/snaps/snaptest-2.sh | 61 + qa/workunits/fs/snaps/snaptest-authwb.sh | 14 + qa/workunits/fs/snaps/snaptest-capwb.sh | 35 + qa/workunits/fs/snaps/snaptest-dir-rename.sh | 19 + qa/workunits/fs/snaps/snaptest-double-null.sh | 25 + qa/workunits/fs/snaps/snaptest-estale.sh | 15 + qa/workunits/fs/snaps/snaptest-git-ceph.sh | 35 + qa/workunits/fs/snaps/snaptest-hardlink.sh | 27 + qa/workunits/fs/snaps/snaptest-intodir.sh | 24 + .../fs/snaps/snaptest-multiple-capsnaps.sh | 44 + qa/workunits/fs/snaps/snaptest-parents.sh | 41 + qa/workunits/fs/snaps/snaptest-realm-split.sh | 33 + qa/workunits/fs/snaps/snaptest-snap-rename.sh | 35 + qa/workunits/fs/snaps/snaptest-snap-rm-cmp.sh | 26 + qa/workunits/fs/snaps/snaptest-upchildrealms.sh | 30 + qa/workunits/fs/snaps/snaptest-xattrwb.sh | 31 + qa/workunits/fs/snaps/untar_snap_rm.sh | 20 + qa/workunits/fs/test_o_trunc.c | 45 + qa/workunits/fs/test_o_trunc.sh | 7 + qa/workunits/fs/test_python.sh | 6 + qa/workunits/fs/upgrade/volume_client | 110 + qa/workunits/hadoop/repl.sh | 42 + qa/workunits/hadoop/terasort.sh | 76 + qa/workunits/hadoop/wordcount.sh | 35 + qa/workunits/kernel_untar_build.sh | 20 + qa/workunits/libcephfs/test.sh | 8 + qa/workunits/mgr/test_localpool.sh | 21 + qa/workunits/mon/auth_caps.sh | 130 + qa/workunits/mon/caps.py | 362 + qa/workunits/mon/caps.sh | 90 + qa/workunits/mon/config.sh | 110 + qa/workunits/mon/crush_ops.sh | 239 + qa/workunits/mon/osd.sh | 24 + qa/workunits/mon/pg_autoscaler.sh | 79 + qa/workunits/mon/ping.py | 108 + qa/workunits/mon/pool_ops.sh | 97 + qa/workunits/mon/rbd_snaps_ops.sh | 61 + qa/workunits/mon/test_config_key_caps.sh | 201 + qa/workunits/mon/test_mon_config_key.py | 481 + qa/workunits/mon/test_mon_osdmap_prune.sh | 205 + qa/workunits/objectstore/test_fuse.sh | 129 + qa/workunits/osdc/stress_objectcacher.sh | 28 + qa/workunits/post-file.sh | 8 + qa/workunits/rados/clone.sh | 13 + qa/workunits/rados/load-gen-big.sh | 10 + qa/workunits/rados/load-gen-mix-small-long.sh | 10 + qa/workunits/rados/load-gen-mix-small.sh | 10 + qa/workunits/rados/load-gen-mix.sh | 10 + qa/workunits/rados/load-gen-mostlyread.sh | 10 + qa/workunits/rados/stress_watch.sh | 7 + qa/workunits/rados/test.sh | 61 + qa/workunits/rados/test_alloc_hint.sh | 177 + qa/workunits/rados/test_cache_pool.sh | 170 + qa/workunits/rados/test_crash.sh | 39 + qa/workunits/rados/test_dedup_tool.sh | 156 + qa/workunits/rados/test_envlibrados_for_rocksdb.sh | 94 + qa/workunits/rados/test_hang.sh | 8 + qa/workunits/rados/test_health_warnings.sh | 76 + qa/workunits/rados/test_large_omap_detection.py | 134 + qa/workunits/rados/test_librados_build.sh | 74 + qa/workunits/rados/test_pool_access.sh | 108 + qa/workunits/rados/test_pool_quota.sh | 68 + qa/workunits/rados/test_python.sh | 4 + qa/workunits/rados/test_rados_timeouts.sh | 48 + qa/workunits/rados/test_rados_tool.sh | 924 ++ qa/workunits/rbd/cli_generic.sh | 933 ++ qa/workunits/rbd/concurrent.sh | 375 + qa/workunits/rbd/diff.sh | 53 + qa/workunits/rbd/diff_continuous.sh | 60 + qa/workunits/rbd/huge-tickets.sh | 41 + qa/workunits/rbd/image_read.sh | 680 ++ qa/workunits/rbd/import_export.sh | 259 + qa/workunits/rbd/issue-20295.sh | 18 + qa/workunits/rbd/journal.sh | 326 + qa/workunits/rbd/kernel.sh | 100 + qa/workunits/rbd/krbd_data_pool.sh | 206 + qa/workunits/rbd/krbd_exclusive_option.sh | 233 + qa/workunits/rbd/krbd_fallocate.sh | 151 + qa/workunits/rbd/krbd_latest_osdmap_on_map.sh | 30 + qa/workunits/rbd/krbd_namespaces.sh | 116 + qa/workunits/rbd/krbd_stable_writes.sh | 18 + qa/workunits/rbd/krbd_udev_enumerate.sh | 66 + qa/workunits/rbd/krbd_udev_netlink_enobufs.sh | 24 + qa/workunits/rbd/krbd_udev_netns.sh | 86 + qa/workunits/rbd/krbd_udev_symlinks.sh | 116 + qa/workunits/rbd/map-snapshot-io.sh | 17 + qa/workunits/rbd/map-unmap.sh | 45 + qa/workunits/rbd/merge_diff.sh | 477 + qa/workunits/rbd/notify_master.sh | 5 + qa/workunits/rbd/notify_slave.sh | 5 + qa/workunits/rbd/permissions.sh | 272 + qa/workunits/rbd/qemu-iotests.sh | 53 + qa/workunits/rbd/qemu_dynamic_features.sh | 46 + qa/workunits/rbd/qemu_rebuild_object_map.sh | 37 + qa/workunits/rbd/rbd-ggate.sh | 242 + qa/workunits/rbd/rbd-nbd.sh | 253 + qa/workunits/rbd/rbd_groups.sh | 209 + qa/workunits/rbd/rbd_mirror.sh | 516 ++ qa/workunits/rbd/rbd_mirror_bootstrap.sh | 49 + qa/workunits/rbd/rbd_mirror_fsx_compare.sh | 38 + qa/workunits/rbd/rbd_mirror_fsx_prepare.sh | 10 + qa/workunits/rbd/rbd_mirror_ha.sh | 210 + qa/workunits/rbd/rbd_mirror_helpers.sh | 1211 +++ qa/workunits/rbd/rbd_mirror_stress.sh | 186 + qa/workunits/rbd/read-flags.sh | 61 + qa/workunits/rbd/simple_big.sh | 12 + qa/workunits/rbd/test_admin_socket.sh | 151 + qa/workunits/rbd/test_librbd.sh | 9 + qa/workunits/rbd/test_librbd_python.sh | 12 + qa/workunits/rbd/test_lock_fence.sh | 48 + qa/workunits/rbd/test_rbd_mirror.sh | 9 + qa/workunits/rbd/test_rbd_tasks.sh | 276 + qa/workunits/rbd/test_rbdmap_RBDMAPFILE.sh | 34 + qa/workunits/rbd/verify_pool.sh | 27 + qa/workunits/rename/all.sh | 37 + qa/workunits/rename/dir_pri_nul.sh | 28 + qa/workunits/rename/dir_pri_pri.sh | 11 + qa/workunits/rename/plan.txt | 111 + qa/workunits/rename/prepare.sh | 21 + qa/workunits/rename/pri_nul.sh | 11 + qa/workunits/rename/pri_pri.sh | 12 + qa/workunits/rename/pri_rem.sh | 31 + qa/workunits/rename/rem_nul.sh | 29 + qa/workunits/rename/rem_pri.sh | 29 + qa/workunits/rename/rem_rem.sh | 61 + qa/workunits/rest/test-restful.sh | 16 + qa/workunits/rest/test_mgr_rest_api.py | 97 + qa/workunits/restart/test-backtraces.py | 256 + qa/workunits/rgw/run-s3tests.sh | 83 + qa/workunits/rgw/s3_bucket_quota.pl | 393 + qa/workunits/rgw/s3_multipart_upload.pl | 151 + qa/workunits/rgw/s3_user_quota.pl | 191 + qa/workunits/rgw/s3_utilities.pm | 220 + qa/workunits/rgw/test_rgw_orphan_list.sh | 511 ++ qa/workunits/suites/blogbench.sh | 15 + qa/workunits/suites/bonnie.sh | 11 + qa/workunits/suites/cephfs_journal_tool_smoke.sh | 92 + qa/workunits/suites/dbench-short.sh | 5 + qa/workunits/suites/dbench.sh | 6 + qa/workunits/suites/ffsb.patch | 12 + qa/workunits/suites/ffsb.sh | 23 + qa/workunits/suites/fio.sh | 42 + qa/workunits/suites/fsstress.sh | 17 + qa/workunits/suites/fsx.sh | 16 + qa/workunits/suites/fsync-tester.sh | 12 + qa/workunits/suites/iogen.sh | 17 + qa/workunits/suites/iozone-sync.sh | 22 + qa/workunits/suites/iozone.sh | 7 + qa/workunits/suites/pjd.sh | 17 + qa/workunits/suites/random_write.32.ffsb | 48 + qa/workunits/suites/wac.sh | 12 + qa/workunits/true.sh | 3 + 2918 files changed, 144168 insertions(+) create mode 100644 qa/.gitignore create mode 100644 qa/.teuthology_branch create mode 100644 qa/Makefile create mode 100644 qa/README create mode 100644 qa/archs/aarch64.yaml create mode 100644 qa/archs/armv7.yaml create mode 100644 qa/archs/i686.yaml create mode 100644 qa/archs/x86_64.yaml create mode 100644 qa/btrfs/.gitignore create mode 100644 qa/btrfs/Makefile create mode 100644 qa/btrfs/clone_range.c create mode 100644 qa/btrfs/create_async_snap.c create mode 100644 qa/btrfs/test_async_snap.c create mode 100644 qa/btrfs/test_rmdir_async_snap.c create mode 120000 qa/cephfs/.qa create mode 100644 qa/cephfs/begin.yaml create mode 120000 qa/cephfs/clusters/.qa create mode 100644 qa/cephfs/clusters/1-mds-1-client-coloc.yaml create mode 100644 qa/cephfs/clusters/1-mds-1-client-micro.yaml create mode 100644 qa/cephfs/clusters/1-mds-1-client.yaml create mode 100644 qa/cephfs/clusters/1-mds-2-client-coloc.yaml create mode 100644 qa/cephfs/clusters/1-mds-2-client-micro.yaml create mode 100644 qa/cephfs/clusters/1-mds-2-client.yaml create mode 100644 qa/cephfs/clusters/1-mds-3-client.yaml create mode 100644 qa/cephfs/clusters/1-mds-4-client-coloc.yaml create mode 100644 qa/cephfs/clusters/1-mds-4-client.yaml create mode 100644 qa/cephfs/clusters/1a3s-mds-1c-client.yaml create mode 100644 qa/cephfs/clusters/1a3s-mds-2c-client.yaml create mode 100644 qa/cephfs/clusters/3-mds.yaml create mode 100644 qa/cephfs/clusters/9-mds.yaml create mode 100644 qa/cephfs/clusters/fixed-2-ucephfs.yaml create mode 100644 qa/cephfs/conf/+ create mode 120000 qa/cephfs/conf/.qa create mode 100644 qa/cephfs/conf/client.yaml create mode 100644 qa/cephfs/conf/mds.yaml create mode 100644 qa/cephfs/conf/mon.yaml create mode 100644 qa/cephfs/conf/osd.yaml create mode 120000 qa/cephfs/mount/.qa create mode 100644 qa/cephfs/mount/fuse.yaml create mode 100644 qa/cephfs/mount/kclient/% create mode 120000 qa/cephfs/mount/kclient/.qa create mode 100644 qa/cephfs/mount/kclient/mount.yaml create mode 100644 qa/cephfs/mount/kclient/overrides/% create mode 120000 qa/cephfs/mount/kclient/overrides/.qa create mode 120000 qa/cephfs/mount/kclient/overrides/distro/.qa create mode 100644 qa/cephfs/mount/kclient/overrides/distro/rhel/% create mode 120000 qa/cephfs/mount/kclient/overrides/distro/rhel/.qa create mode 100644 qa/cephfs/mount/kclient/overrides/distro/rhel/k-distro.yaml create mode 120000 qa/cephfs/mount/kclient/overrides/distro/rhel/rhel_latest.yaml create mode 100644 qa/cephfs/mount/kclient/overrides/ms-die-on-skipped.yaml create mode 120000 qa/cephfs/objectstore-ec/.qa create mode 120000 qa/cephfs/objectstore-ec/bluestore-bitmap.yaml create mode 100644 qa/cephfs/objectstore-ec/bluestore-comp-ec-root.yaml create mode 100644 qa/cephfs/objectstore-ec/bluestore-comp.yaml create mode 100644 qa/cephfs/objectstore-ec/bluestore-ec-root.yaml create mode 100644 qa/cephfs/objectstore-ec/filestore-xfs.yaml create mode 120000 qa/cephfs/overrides/.qa create mode 100644 qa/cephfs/overrides/frag_enable.yaml create mode 120000 qa/cephfs/overrides/fuse/.qa create mode 100644 qa/cephfs/overrides/fuse/default-perm/% create mode 120000 qa/cephfs/overrides/fuse/default-perm/.qa create mode 100644 qa/cephfs/overrides/fuse/default-perm/no.yaml create mode 100644 qa/cephfs/overrides/fuse/default-perm/yes.yaml create mode 100644 qa/cephfs/overrides/log-config.yaml create mode 100644 qa/cephfs/overrides/osd-asserts.yaml create mode 100644 qa/cephfs/overrides/session_timeout.yaml create mode 100644 qa/cephfs/overrides/whitelist_health.yaml create mode 100644 qa/cephfs/overrides/whitelist_wrongly_marked_down.yaml create mode 120000 qa/cephfs/tasks/.qa create mode 100644 qa/cephfs/tasks/cfuse_workunit_suites_blogbench.yaml create mode 100644 qa/cephfs/tasks/cfuse_workunit_suites_dbench.yaml create mode 100644 qa/cephfs/tasks/cfuse_workunit_suites_ffsb.yaml create mode 100644 qa/cephfs/tasks/cfuse_workunit_suites_fsstress.yaml create mode 100644 qa/cephfs/tasks/cfuse_workunit_trivial_sync.yaml create mode 100644 qa/cephfs/tasks/libcephfs_interface_tests.yaml create mode 100755 qa/client/30_subdir_mount.sh create mode 100644 qa/client/common.sh create mode 100644 qa/client/gen-1774.sh create mode 100644 qa/clusters/2-node-mgr.yaml create mode 100644 qa/clusters/extra-client.yaml create mode 100644 qa/clusters/fixed-1.yaml create mode 100644 qa/clusters/fixed-2.yaml create mode 100644 qa/clusters/fixed-3-cephfs.yaml create mode 100644 qa/clusters/fixed-3.yaml create mode 100644 qa/clusters/fixed-4.yaml create mode 100644 qa/config/rados.yaml create mode 100644 qa/crontab/teuthology-cronjobs create mode 100644 qa/debug/buildpackages.yaml create mode 100644 qa/debug/mds_client.yaml create mode 100644 qa/debug/mgr.yaml create mode 100644 qa/debug/openstack-15G.yaml create mode 100644 qa/debug/openstack-30G.yaml create mode 120000 qa/distros/a-supported-distro.yaml create mode 100644 qa/distros/all/centos.yaml create mode 100644 qa/distros/all/centos_6.3.yaml create mode 100644 qa/distros/all/centos_6.4.yaml create mode 100644 qa/distros/all/centos_6.5.yaml create mode 100644 qa/distros/all/centos_7.0.yaml create mode 100644 qa/distros/all/centos_7.1.yaml create mode 100644 qa/distros/all/centos_7.2.yaml create mode 100644 qa/distros/all/centos_7.3.yaml create mode 100644 qa/distros/all/centos_7.4.yaml create mode 100644 qa/distros/all/centos_7.5.yaml create mode 100644 qa/distros/all/centos_7.6.yaml create mode 100644 qa/distros/all/centos_7.8.yaml create mode 100644 qa/distros/all/debian_6.0.yaml create mode 100644 qa/distros/all/debian_7.0.yaml create mode 100644 qa/distros/all/debian_8.0.yaml create mode 100644 qa/distros/all/fedora_17.yaml create mode 100644 qa/distros/all/fedora_18.yaml create mode 100644 qa/distros/all/fedora_19.yaml create mode 100644 qa/distros/all/opensuse_12.2.yaml create mode 100644 qa/distros/all/opensuse_13.2.yaml create mode 100644 qa/distros/all/opensuse_15.0.yaml create mode 100644 qa/distros/all/opensuse_42.1.yaml create mode 100644 qa/distros/all/opensuse_42.2.yaml create mode 100644 qa/distros/all/opensuse_42.3.yaml create mode 100644 qa/distros/all/rhel_6.3.yaml create mode 100644 qa/distros/all/rhel_6.4.yaml create mode 100644 qa/distros/all/rhel_6.5.yaml create mode 100644 qa/distros/all/rhel_7.0.yaml create mode 100644 qa/distros/all/rhel_7.5.yaml create mode 100644 qa/distros/all/rhel_7.6.yaml create mode 100644 qa/distros/all/rhel_7.8.yaml create mode 100644 qa/distros/all/rhel_7.9.yaml create mode 120000 qa/distros/all/rhel_7.yaml create mode 100644 qa/distros/all/sle_12.2.yaml create mode 100644 qa/distros/all/ubuntu_12.04.yaml create mode 100644 qa/distros/all/ubuntu_12.10.yaml create mode 100644 qa/distros/all/ubuntu_14.04.yaml create mode 100644 qa/distros/all/ubuntu_14.04_aarch64.yaml create mode 100644 qa/distros/all/ubuntu_14.04_i686.yaml create mode 100644 qa/distros/all/ubuntu_16.04.yaml create mode 100644 qa/distros/all/ubuntu_18.04.yaml create mode 120000 qa/distros/supported-all-distro/centos_latest.yaml create mode 120000 qa/distros/supported-all-distro/rhel_7.yaml create mode 120000 qa/distros/supported-all-distro/ubuntu_16.04.yaml create mode 120000 qa/distros/supported-all-distro/ubuntu_latest.yaml create mode 120000 qa/distros/supported-random-distro$/centos_latest.yaml create mode 120000 qa/distros/supported-random-distro$/rhel_7.yaml create mode 120000 qa/distros/supported-random-distro$/ubuntu_16.04.yaml create mode 120000 qa/distros/supported-random-distro$/ubuntu_latest.yaml create mode 120000 qa/distros/supported/centos_latest.yaml create mode 120000 qa/distros/supported/rhel_latest.yaml create mode 120000 qa/distros/supported/ubuntu_latest.yaml create mode 100644 qa/erasure-code/ec-feature-plugins-v2.yaml create mode 100644 qa/erasure-code/ec-feature-plugins-v3.yaml create mode 100644 qa/erasure-code/ec-rados-default.yaml create mode 100644 qa/erasure-code/ec-rados-parallel.yaml create mode 100644 qa/erasure-code/ec-rados-plugin=clay-k=4-m=2.yaml create mode 100644 qa/erasure-code/ec-rados-plugin=isa-k=2-m=1.yaml create mode 100644 qa/erasure-code/ec-rados-plugin=jerasure-k=2-m=1.yaml create mode 100644 qa/erasure-code/ec-rados-plugin=jerasure-k=3-m=1.yaml create mode 100644 qa/erasure-code/ec-rados-plugin=jerasure-k=4-m=2.yaml create mode 100644 qa/erasure-code/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml create mode 100644 qa/erasure-code/ec-rados-plugin=shec-k=4-m=3-c=2.yaml create mode 100644 qa/erasure-code/ec-rados-sequential.yaml create mode 100755 qa/find-used-ports.sh create mode 100644 qa/libceph/Makefile create mode 100644 qa/libceph/trivial_libceph.c create mode 100755 qa/loopall.sh create mode 100755 qa/machine_types/schedule_rados_ovh.sh create mode 100755 qa/machine_types/schedule_subset.sh create mode 100644 qa/machine_types/vps.yaml create mode 100755 qa/mds/test_anchortable.sh create mode 100755 qa/mds/test_mdstable_failures.sh create mode 100755 qa/mon/bootstrap/host.sh create mode 100755 qa/mon/bootstrap/initial_members.sh create mode 100755 qa/mon/bootstrap/initial_members_asok.sh create mode 100755 qa/mon/bootstrap/simple.sh create mode 100755 qa/mon/bootstrap/simple_expand.sh create mode 100755 qa/mon/bootstrap/simple_expand_monmap.sh create mode 100755 qa/mon/bootstrap/simple_single_expand.sh create mode 100755 qa/mon/bootstrap/simple_single_expand2.sh create mode 100755 qa/mon/bootstrap/single_host.sh create mode 100755 qa/mon/bootstrap/single_host_multi.sh create mode 100644 qa/msgr/async-v1only.yaml create mode 100644 qa/msgr/async-v2only.yaml create mode 100644 qa/msgr/async.yaml create mode 100644 qa/msgr/random.yaml create mode 100644 qa/msgr/simple.yaml create mode 100755 qa/nightlies/cron_wrapper create mode 100644 qa/objectstore/bluestore-bitmap.yaml create mode 100644 qa/objectstore/bluestore-comp-lz4.yaml create mode 100644 qa/objectstore/bluestore-comp-snappy.yaml create mode 100644 qa/objectstore/bluestore-comp-zlib.yaml create mode 100644 qa/objectstore/bluestore-comp-zstd.yaml create mode 100644 qa/objectstore/bluestore-hybrid.yaml create mode 100644 qa/objectstore/bluestore-stupid.yaml create mode 100644 qa/objectstore/filestore-xfs.yaml create mode 120000 qa/objectstore_cephfs/bluestore-bitmap.yaml create mode 120000 qa/objectstore_cephfs/filestore-xfs.yaml create mode 100644 qa/overrides/2-size-1-min-size.yaml create mode 100644 qa/overrides/2-size-2-min-size.yaml create mode 100644 qa/overrides/3-size-2-min-size.yaml create mode 100644 qa/overrides/more-active-recovery.yaml create mode 100644 qa/overrides/no_client_pidfile.yaml create mode 100644 qa/overrides/short_pg_log.yaml create mode 100644 qa/overrides/whitelist_wrongly_marked_down.yaml create mode 100644 qa/packages/packages.yaml create mode 100755 qa/qa_scripts/cephscrub.sh create mode 100644 qa/qa_scripts/openstack/README create mode 100755 qa/qa_scripts/openstack/ceph_install.sh create mode 100644 qa/qa_scripts/openstack/ceph_install_w_ansible/README create mode 100755 qa/qa_scripts/openstack/ceph_install_w_ansible/ceph_install.sh create mode 100644 qa/qa_scripts/openstack/ceph_install_w_ansible/config create mode 120000 qa/qa_scripts/openstack/ceph_install_w_ansible/copy_func.sh create mode 100755 qa/qa_scripts/openstack/ceph_install_w_ansible/execs/cdn_setup.sh create mode 100755 qa/qa_scripts/openstack/ceph_install_w_ansible/execs/ceph_ansible.sh create mode 100755 qa/qa_scripts/openstack/ceph_install_w_ansible/execs/edit_ansible_hosts.sh create mode 100755 qa/qa_scripts/openstack/ceph_install_w_ansible/execs/edit_groupvars_osds.sh create mode 100755 qa/qa_scripts/openstack/ceph_install_w_ansible/multi_action.sh create mode 100755 qa/qa_scripts/openstack/ceph_install_w_ansible/repolocs.sh create mode 100755 qa/qa_scripts/openstack/ceph_install_w_ansible/staller.sh create mode 100755 qa/qa_scripts/openstack/ceph_install_w_ansible/talknice.sh create mode 100755 qa/qa_scripts/openstack/connectceph.sh create mode 100755 qa/qa_scripts/openstack/copy_func.sh create mode 100755 qa/qa_scripts/openstack/execs/ceph-pool-create.sh create mode 100755 qa/qa_scripts/openstack/execs/ceph_cluster.sh create mode 100755 qa/qa_scripts/openstack/execs/libvirt-secret.sh create mode 100755 qa/qa_scripts/openstack/execs/openstack-preinstall.sh create mode 100755 qa/qa_scripts/openstack/execs/run_openstack.sh create mode 100755 qa/qa_scripts/openstack/execs/start_openstack.sh create mode 100644 qa/qa_scripts/openstack/files/cinder.template.conf create mode 100644 qa/qa_scripts/openstack/files/glance-api.template.conf create mode 100644 qa/qa_scripts/openstack/files/kilo.template.conf create mode 100644 qa/qa_scripts/openstack/files/nova.template.conf create mode 100755 qa/qa_scripts/openstack/fix_conf_file.sh create mode 100755 qa/qa_scripts/openstack/image_create.sh create mode 100755 qa/qa_scripts/openstack/openstack.sh create mode 100755 qa/qa_scripts/openstack/packstack.sh create mode 100644 qa/rbd/common.sh create mode 100644 qa/rbd/krbd_blkroset.t create mode 100644 qa/rbd/krbd_deep_flatten.t create mode 100644 qa/rbd/krbd_discard.t create mode 100644 qa/rbd/krbd_discard_4M.t create mode 100644 qa/rbd/krbd_discard_512b.t create mode 100644 qa/rbd/krbd_discard_granularity.t create mode 100644 qa/rbd/krbd_get_features.t create mode 100644 qa/rbd/krbd_huge_image.t create mode 100644 qa/rbd/krbd_msgr_segments.t create mode 100644 qa/rbd/krbd_parent_overlap.t create mode 100644 qa/rbd/krbd_whole_object_zeroout.t create mode 100644 qa/rbd/krbd_zeroout.t create mode 100755 qa/rbd/rbd.sh create mode 100644 qa/releases/infernalis.yaml create mode 100644 qa/releases/jewel.yaml create mode 100644 qa/releases/kraken.yaml create mode 100644 qa/releases/luminous-with-mgr.yaml create mode 100644 qa/releases/luminous.yaml create mode 100644 qa/releases/mimic.yaml create mode 100644 qa/releases/nautilus.yaml create mode 100644 qa/rgw_frontend/beast.yaml create mode 100644 qa/rgw_frontend/civetweb.yaml create mode 100644 qa/rgw_pool_type/ec-profile.yaml create mode 100644 qa/rgw_pool_type/ec.yaml create mode 100644 qa/rgw_pool_type/replicated.yaml create mode 100755 qa/run-standalone.sh create mode 100644 qa/run_xfstests-obsolete.sh create mode 100755 qa/run_xfstests.sh create mode 100644 qa/run_xfstests_qemu.sh create mode 100755 qa/runallonce.sh create mode 100755 qa/runoncfuse.sh create mode 100755 qa/runonkclient.sh create mode 100755 qa/setup-chroot.sh create mode 100644 qa/standalone/README create mode 100755 qa/standalone/ceph-helpers.sh create mode 100755 qa/standalone/crush/crush-choose-args.sh create mode 100755 qa/standalone/crush/crush-classes.sh create mode 100755 qa/standalone/erasure-code/test-erasure-code-plugins.sh create mode 100755 qa/standalone/erasure-code/test-erasure-code.sh create mode 100755 qa/standalone/erasure-code/test-erasure-eio.sh create mode 100755 qa/standalone/mgr/balancer.sh create mode 100755 qa/standalone/misc/network-ping.sh create mode 100755 qa/standalone/misc/ok-to-stop.sh create mode 100755 qa/standalone/misc/rados-striper.sh create mode 100755 qa/standalone/misc/test-ceph-helpers.sh create mode 100755 qa/standalone/mon/misc.sh create mode 100755 qa/standalone/mon/mkfs.sh create mode 100755 qa/standalone/mon/mon-bind.sh create mode 100755 qa/standalone/mon/mon-created-time.sh create mode 100755 qa/standalone/mon/mon-handle-forward.sh create mode 100755 qa/standalone/mon/mon-last-epoch-clean.sh create mode 100755 qa/standalone/mon/mon-osdmap-prune.sh create mode 100755 qa/standalone/mon/mon-ping.sh create mode 100755 qa/standalone/mon/mon-scrub.sh create mode 100755 qa/standalone/mon/mon-seesaw.sh create mode 100755 qa/standalone/mon/msgr-v2-transition.sh create mode 100755 qa/standalone/mon/osd-crush.sh create mode 100755 qa/standalone/mon/osd-erasure-code-profile.sh create mode 100755 qa/standalone/mon/osd-pool-create.sh create mode 100755 qa/standalone/mon/osd-pool-df.sh create mode 100755 qa/standalone/mon/test_pool_quota.sh create mode 100755 qa/standalone/osd/bad-inc-map.sh create mode 100755 qa/standalone/osd/divergent-priors.sh create mode 100755 qa/standalone/osd/ec-error-rollforward.sh create mode 100755 qa/standalone/osd/osd-backfill-prio.sh create mode 100755 qa/standalone/osd/osd-backfill-recovery-log.sh create mode 100755 qa/standalone/osd/osd-backfill-space.sh create mode 100755 qa/standalone/osd/osd-backfill-stats.sh create mode 100755 qa/standalone/osd/osd-bench.sh create mode 100755 qa/standalone/osd/osd-bluefs-volume-ops.sh create mode 100755 qa/standalone/osd/osd-config.sh create mode 100755 qa/standalone/osd/osd-copy-from.sh create mode 100755 qa/standalone/osd/osd-dup.sh create mode 100755 qa/standalone/osd/osd-fast-mark-down.sh create mode 100755 qa/standalone/osd/osd-force-create-pg.sh create mode 100755 qa/standalone/osd/osd-markdown.sh create mode 100755 qa/standalone/osd/osd-reactivate.sh create mode 100755 qa/standalone/osd/osd-recovery-prio.sh create mode 100755 qa/standalone/osd/osd-recovery-space.sh create mode 100755 qa/standalone/osd/osd-recovery-stats.sh create mode 100755 qa/standalone/osd/osd-rep-recov-eio.sh create mode 100755 qa/standalone/osd/osd-reuse-id.sh create mode 100755 qa/standalone/osd/pg-split-merge.sh create mode 100755 qa/standalone/osd/repro_long_log.sh create mode 100755 qa/standalone/scrub/osd-recovery-scrub.sh create mode 100755 qa/standalone/scrub/osd-scrub-dump.sh create mode 100755 qa/standalone/scrub/osd-scrub-repair.sh create mode 100755 qa/standalone/scrub/osd-scrub-snaps.sh create mode 100755 qa/standalone/scrub/osd-scrub-test.sh create mode 100755 qa/standalone/scrub/osd-unexpected-clone.sh create mode 100755 qa/standalone/special/ceph_objectstore_tool.py create mode 100755 qa/standalone/special/test-failure.sh create mode 120000 qa/suites/.qa create mode 120000 qa/suites/big/.qa create mode 100644 qa/suites/big/rados-thrash/% create mode 120000 qa/suites/big/rados-thrash/.qa create mode 120000 qa/suites/big/rados-thrash/ceph/.qa create mode 100644 qa/suites/big/rados-thrash/ceph/ceph.yaml create mode 120000 qa/suites/big/rados-thrash/clusters/.qa create mode 100644 qa/suites/big/rados-thrash/clusters/big.yaml create mode 100644 qa/suites/big/rados-thrash/clusters/medium.yaml create mode 100644 qa/suites/big/rados-thrash/clusters/small.yaml create mode 120000 qa/suites/big/rados-thrash/objectstore create mode 100644 qa/suites/big/rados-thrash/openstack.yaml create mode 120000 qa/suites/big/rados-thrash/thrashers/.qa create mode 100644 qa/suites/big/rados-thrash/thrashers/default.yaml create mode 120000 qa/suites/big/rados-thrash/workloads/.qa create mode 100644 qa/suites/big/rados-thrash/workloads/snaps-few-objects.yaml create mode 120000 qa/suites/buildpackages/.qa create mode 100644 qa/suites/buildpackages/any/% create mode 120000 qa/suites/buildpackages/any/.qa create mode 120000 qa/suites/buildpackages/any/distros create mode 120000 qa/suites/buildpackages/any/tasks/.qa create mode 100644 qa/suites/buildpackages/any/tasks/release.yaml create mode 100644 qa/suites/buildpackages/tests/% create mode 120000 qa/suites/buildpackages/tests/.qa create mode 120000 qa/suites/buildpackages/tests/distros create mode 120000 qa/suites/buildpackages/tests/tasks/.qa create mode 100644 qa/suites/buildpackages/tests/tasks/release.yaml create mode 120000 qa/suites/ceph-ansible/.qa create mode 120000 qa/suites/ceph-ansible/smoke/.qa create mode 100644 qa/suites/ceph-ansible/smoke/basic/% create mode 120000 qa/suites/ceph-ansible/smoke/basic/.qa create mode 120000 qa/suites/ceph-ansible/smoke/basic/0-clusters/.qa create mode 100644 qa/suites/ceph-ansible/smoke/basic/0-clusters/3-node.yaml create mode 100644 qa/suites/ceph-ansible/smoke/basic/0-clusters/4-node.yaml create mode 120000 qa/suites/ceph-ansible/smoke/basic/1-distros/.qa create mode 120000 qa/suites/ceph-ansible/smoke/basic/1-distros/centos_latest.yaml create mode 120000 qa/suites/ceph-ansible/smoke/basic/1-distros/ubuntu_latest.yaml create mode 120000 qa/suites/ceph-ansible/smoke/basic/2-ceph/.qa create mode 100644 qa/suites/ceph-ansible/smoke/basic/2-ceph/ceph_ansible.yaml create mode 120000 qa/suites/ceph-ansible/smoke/basic/3-config/.qa create mode 100644 qa/suites/ceph-ansible/smoke/basic/3-config/bluestore_with_dmcrypt.yaml create mode 100644 qa/suites/ceph-ansible/smoke/basic/3-config/dmcrypt_off.yaml create mode 100644 qa/suites/ceph-ansible/smoke/basic/3-config/dmcrypt_on.yaml create mode 120000 qa/suites/ceph-ansible/smoke/basic/4-tasks/.qa create mode 100644 qa/suites/ceph-ansible/smoke/basic/4-tasks/ceph-admin-commands.yaml create mode 100644 qa/suites/ceph-ansible/smoke/basic/4-tasks/rbd_import_export.yaml create mode 100644 qa/suites/ceph-ansible/smoke/basic/4-tasks/rest.yaml create mode 100644 qa/suites/ceph-deploy/% create mode 120000 qa/suites/ceph-deploy/.qa create mode 120000 qa/suites/ceph-deploy/cluster/.qa create mode 100644 qa/suites/ceph-deploy/cluster/4node.yaml create mode 120000 qa/suites/ceph-deploy/config/.qa create mode 100644 qa/suites/ceph-deploy/config/ceph_volume_bluestore.yaml create mode 100644 qa/suites/ceph-deploy/config/ceph_volume_bluestore_dmcrypt.yaml create mode 100644 qa/suites/ceph-deploy/config/ceph_volume_dmcrypt_off.yaml create mode 100644 qa/suites/ceph-deploy/config/ceph_volume_filestore.yaml create mode 120000 qa/suites/ceph-deploy/distros/.qa create mode 120000 qa/suites/ceph-deploy/distros/centos_latest.yaml create mode 120000 qa/suites/ceph-deploy/distros/ubuntu_latest.yaml create mode 120000 qa/suites/ceph-deploy/python_versions/.qa create mode 100644 qa/suites/ceph-deploy/python_versions/python_2.yaml create mode 100644 qa/suites/ceph-deploy/python_versions/python_3.yaml create mode 120000 qa/suites/ceph-deploy/tasks/.qa create mode 100644 qa/suites/ceph-deploy/tasks/ceph-admin-commands.yaml create mode 100644 qa/suites/ceph-deploy/tasks/rbd_import_export.yaml create mode 100644 qa/suites/cephmetrics/% create mode 120000 qa/suites/cephmetrics/.qa create mode 120000 qa/suites/cephmetrics/0-clusters/.qa create mode 100644 qa/suites/cephmetrics/0-clusters/3-node.yaml create mode 120000 qa/suites/cephmetrics/1-distros/.qa create mode 120000 qa/suites/cephmetrics/1-distros/centos_latest.yaml create mode 120000 qa/suites/cephmetrics/1-distros/ubuntu_latest.yaml create mode 120000 qa/suites/cephmetrics/2-ceph/.qa create mode 100644 qa/suites/cephmetrics/2-ceph/ceph_ansible.yaml create mode 120000 qa/suites/cephmetrics/3-ceph-config/.qa create mode 100644 qa/suites/cephmetrics/3-ceph-config/bluestore_with_dmcrypt.yaml create mode 100644 qa/suites/cephmetrics/3-ceph-config/bluestore_without_dmcrypt.yaml create mode 100644 qa/suites/cephmetrics/3-ceph-config/dmcrypt_off.yaml create mode 100644 qa/suites/cephmetrics/3-ceph-config/dmcrypt_on.yaml create mode 120000 qa/suites/cephmetrics/4-epel/.qa create mode 100644 qa/suites/cephmetrics/4-epel/no_epel.yaml create mode 100644 qa/suites/cephmetrics/4-epel/use_epel.yaml create mode 120000 qa/suites/cephmetrics/5-containers/.qa create mode 100644 qa/suites/cephmetrics/5-containers/containerized.yaml create mode 100644 qa/suites/cephmetrics/5-containers/no_containers.yaml create mode 120000 qa/suites/cephmetrics/6-tasks/.qa create mode 100644 qa/suites/cephmetrics/6-tasks/cephmetrics.yaml create mode 100644 qa/suites/dummy/% create mode 120000 qa/suites/dummy/.qa create mode 120000 qa/suites/dummy/all/.qa create mode 100644 qa/suites/dummy/all/nop.yaml create mode 120000 qa/suites/experimental/.qa create mode 100644 qa/suites/experimental/multimds/% create mode 120000 qa/suites/experimental/multimds/.qa create mode 120000 qa/suites/experimental/multimds/clusters/.qa create mode 100644 qa/suites/experimental/multimds/clusters/7-multimds.yaml create mode 120000 qa/suites/experimental/multimds/tasks/.qa create mode 100644 qa/suites/experimental/multimds/tasks/fsstress_thrash_subtrees.yaml create mode 120000 qa/suites/fs/.qa create mode 100644 qa/suites/fs/32bits/% create mode 120000 qa/suites/fs/32bits/.qa create mode 120000 qa/suites/fs/32bits/begin.yaml create mode 120000 qa/suites/fs/32bits/clusters/.qa create mode 120000 qa/suites/fs/32bits/clusters/fixed-2-ucephfs.yaml create mode 120000 qa/suites/fs/32bits/conf create mode 120000 qa/suites/fs/32bits/mount/.qa create mode 120000 qa/suites/fs/32bits/mount/fuse.yaml create mode 120000 qa/suites/fs/32bits/objectstore-ec create mode 100644 qa/suites/fs/32bits/overrides/+ create mode 120000 qa/suites/fs/32bits/overrides/.qa create mode 100644 qa/suites/fs/32bits/overrides/faked-ino.yaml create mode 120000 qa/suites/fs/32bits/overrides/frag_enable.yaml create mode 120000 qa/suites/fs/32bits/overrides/whitelist_health.yaml create mode 120000 qa/suites/fs/32bits/overrides/whitelist_wrongly_marked_down.yaml create mode 120000 qa/suites/fs/32bits/supported-random-distros$ create mode 120000 qa/suites/fs/32bits/tasks/.qa create mode 120000 qa/suites/fs/32bits/tasks/cfuse_workunit_suites_fsstress.yaml create mode 100644 qa/suites/fs/32bits/tasks/cfuse_workunit_suites_pjd.yaml create mode 100644 qa/suites/fs/basic_functional/% create mode 120000 qa/suites/fs/basic_functional/.qa create mode 120000 qa/suites/fs/basic_functional/begin.yaml create mode 120000 qa/suites/fs/basic_functional/clusters/.qa create mode 120000 qa/suites/fs/basic_functional/clusters/1-mds-4-client-coloc.yaml create mode 120000 qa/suites/fs/basic_functional/conf create mode 120000 qa/suites/fs/basic_functional/mount/.qa create mode 120000 qa/suites/fs/basic_functional/mount/fuse.yaml create mode 120000 qa/suites/fs/basic_functional/objectstore/.qa create mode 120000 qa/suites/fs/basic_functional/objectstore/bluestore-bitmap.yaml create mode 120000 qa/suites/fs/basic_functional/objectstore/bluestore-ec-root.yaml create mode 100644 qa/suites/fs/basic_functional/overrides/+ create mode 120000 qa/suites/fs/basic_functional/overrides/.qa create mode 120000 qa/suites/fs/basic_functional/overrides/frag_enable.yaml create mode 120000 qa/suites/fs/basic_functional/overrides/no_client_pidfile.yaml create mode 120000 qa/suites/fs/basic_functional/overrides/whitelist_health.yaml create mode 120000 qa/suites/fs/basic_functional/overrides/whitelist_wrongly_marked_down.yaml create mode 120000 qa/suites/fs/basic_functional/supported-random-distros$ create mode 120000 qa/suites/fs/basic_functional/tasks/.qa create mode 100644 qa/suites/fs/basic_functional/tasks/admin.yaml create mode 100644 qa/suites/fs/basic_functional/tasks/alternate-pool.yaml create mode 100644 qa/suites/fs/basic_functional/tasks/asok_dump_tree.yaml create mode 100644 qa/suites/fs/basic_functional/tasks/auto-repair.yaml create mode 100644 qa/suites/fs/basic_functional/tasks/backtrace.yaml create mode 100644 qa/suites/fs/basic_functional/tasks/cap-flush.yaml create mode 100644 qa/suites/fs/basic_functional/tasks/cephfs-shell.yaml create mode 100644 qa/suites/fs/basic_functional/tasks/cephfs_scrub_tests.yaml create mode 100644 qa/suites/fs/basic_functional/tasks/cfuse_workunit_quota.yaml create mode 100644 qa/suites/fs/basic_functional/tasks/client-limits.yaml create mode 100644 qa/suites/fs/basic_functional/tasks/client-readahad.yaml create mode 100644 qa/suites/fs/basic_functional/tasks/client-recovery.yaml create mode 100644 qa/suites/fs/basic_functional/tasks/damage.yaml create mode 100644 qa/suites/fs/basic_functional/tasks/data-scan.yaml create mode 100644 qa/suites/fs/basic_functional/tasks/forward-scrub.yaml create mode 100644 qa/suites/fs/basic_functional/tasks/fragment.yaml create mode 100644 qa/suites/fs/basic_functional/tasks/journal-repair.yaml create mode 100644 qa/suites/fs/basic_functional/tasks/libcephfs_python.yaml create mode 100644 qa/suites/fs/basic_functional/tasks/mds-flush.yaml create mode 100644 qa/suites/fs/basic_functional/tasks/mds-full.yaml create mode 100644 qa/suites/fs/basic_functional/tasks/mds_creation_retry.yaml create mode 100644 qa/suites/fs/basic_functional/tasks/openfiletable.yaml create mode 100644 qa/suites/fs/basic_functional/tasks/pool-perm.yaml create mode 100644 qa/suites/fs/basic_functional/tasks/quota.yaml create mode 100644 qa/suites/fs/basic_functional/tasks/sessionmap/+ create mode 120000 qa/suites/fs/basic_functional/tasks/sessionmap/.qa create mode 100644 qa/suites/fs/basic_functional/tasks/sessionmap/sessionmap.yaml create mode 100644 qa/suites/fs/basic_functional/tasks/strays.yaml create mode 100644 qa/suites/fs/basic_functional/tasks/test_journal_migration.yaml create mode 100644 qa/suites/fs/basic_functional/tasks/volume-client/% create mode 120000 qa/suites/fs/basic_functional/tasks/volume-client/.qa create mode 120000 qa/suites/fs/basic_functional/tasks/volume-client/task/.qa create mode 100644 qa/suites/fs/basic_functional/tasks/volume-client/task/test/+ create mode 120000 qa/suites/fs/basic_functional/tasks/volume-client/task/test/.qa create mode 100644 qa/suites/fs/basic_functional/tasks/volume-client/task/test/test.yaml create mode 100644 qa/suites/fs/basic_functional/tasks/volumes.yaml create mode 100644 qa/suites/fs/basic_workload/% create mode 120000 qa/suites/fs/basic_workload/.qa create mode 120000 qa/suites/fs/basic_workload/begin.yaml create mode 120000 qa/suites/fs/basic_workload/clusters/.qa create mode 120000 qa/suites/fs/basic_workload/clusters/fixed-2-ucephfs.yaml create mode 120000 qa/suites/fs/basic_workload/conf create mode 120000 qa/suites/fs/basic_workload/inline/.qa create mode 100644 qa/suites/fs/basic_workload/inline/no.yaml create mode 100644 qa/suites/fs/basic_workload/inline/yes.yaml create mode 120000 qa/suites/fs/basic_workload/mount/.qa create mode 120000 qa/suites/fs/basic_workload/mount/fuse.yaml create mode 120000 qa/suites/fs/basic_workload/objectstore-ec create mode 120000 qa/suites/fs/basic_workload/omap_limit/.qa create mode 100644 qa/suites/fs/basic_workload/omap_limit/10.yaml create mode 100644 qa/suites/fs/basic_workload/omap_limit/10000.yaml create mode 100644 qa/suites/fs/basic_workload/overrides/+ create mode 120000 qa/suites/fs/basic_workload/overrides/.qa create mode 120000 qa/suites/fs/basic_workload/overrides/frag_enable.yaml create mode 120000 qa/suites/fs/basic_workload/overrides/session_timeout.yaml create mode 120000 qa/suites/fs/basic_workload/overrides/whitelist_health.yaml create mode 120000 qa/suites/fs/basic_workload/overrides/whitelist_wrongly_marked_down.yaml create mode 120000 qa/suites/fs/basic_workload/supported-random-distros$ create mode 120000 qa/suites/fs/basic_workload/tasks/.qa create mode 100644 qa/suites/fs/basic_workload/tasks/cfuse_workunit_kernel_untar_build.yaml create mode 100644 qa/suites/fs/basic_workload/tasks/cfuse_workunit_misc.yaml create mode 100644 qa/suites/fs/basic_workload/tasks/cfuse_workunit_misc_test_o_trunc.yaml create mode 100644 qa/suites/fs/basic_workload/tasks/cfuse_workunit_norstats.yaml create mode 120000 qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_blogbench.yaml create mode 120000 qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_dbench.yaml create mode 120000 qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_ffsb.yaml create mode 120000 qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_fsstress.yaml create mode 100644 qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_fsx.yaml create mode 100644 qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_fsync.yaml create mode 100644 qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_iogen.yaml create mode 100644 qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_iozone.yaml create mode 100644 qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_pjd.yaml create mode 100644 qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_truncate_delay.yaml create mode 120000 qa/suites/fs/basic_workload/tasks/cfuse_workunit_trivial_sync.yaml create mode 120000 qa/suites/fs/basic_workload/tasks/libcephfs_interface_tests.yaml create mode 120000 qa/suites/fs/bugs/.qa create mode 100644 qa/suites/fs/bugs/client_trim_caps/% create mode 120000 qa/suites/fs/bugs/client_trim_caps/.qa create mode 120000 qa/suites/fs/bugs/client_trim_caps/begin.yaml create mode 120000 qa/suites/fs/bugs/client_trim_caps/clusters/.qa create mode 100644 qa/suites/fs/bugs/client_trim_caps/clusters/small-cluster.yaml create mode 120000 qa/suites/fs/bugs/client_trim_caps/conf create mode 120000 qa/suites/fs/bugs/client_trim_caps/objectstore/.qa create mode 120000 qa/suites/fs/bugs/client_trim_caps/objectstore/bluestore-bitmap.yaml create mode 100644 qa/suites/fs/bugs/client_trim_caps/overrides/+ create mode 120000 qa/suites/fs/bugs/client_trim_caps/overrides/.qa create mode 120000 qa/suites/fs/bugs/client_trim_caps/overrides/frag_enable.yaml create mode 120000 qa/suites/fs/bugs/client_trim_caps/overrides/no_client_pidfile.yaml create mode 120000 qa/suites/fs/bugs/client_trim_caps/overrides/whitelist_health.yaml create mode 120000 qa/suites/fs/bugs/client_trim_caps/overrides/whitelist_wrongly_marked_down.yaml create mode 120000 qa/suites/fs/bugs/client_trim_caps/tasks/.qa create mode 100644 qa/suites/fs/bugs/client_trim_caps/tasks/trim-i22073.yaml create mode 100644 qa/suites/fs/multiclient/% create mode 120000 qa/suites/fs/multiclient/.qa create mode 120000 qa/suites/fs/multiclient/begin.yaml create mode 120000 qa/suites/fs/multiclient/clusters/.qa create mode 120000 qa/suites/fs/multiclient/clusters/1-mds-2-client.yaml create mode 120000 qa/suites/fs/multiclient/clusters/1-mds-3-client.yaml create mode 120000 qa/suites/fs/multiclient/conf create mode 120000 qa/suites/fs/multiclient/distros/.qa create mode 120000 qa/suites/fs/multiclient/distros/ubuntu_latest.yaml create mode 120000 qa/suites/fs/multiclient/mount/.qa create mode 120000 qa/suites/fs/multiclient/mount/fuse.yaml create mode 100644 qa/suites/fs/multiclient/mount/kclient.yaml.disabled create mode 120000 qa/suites/fs/multiclient/objectstore-ec create mode 100644 qa/suites/fs/multiclient/overrides/+ create mode 120000 qa/suites/fs/multiclient/overrides/.qa create mode 120000 qa/suites/fs/multiclient/overrides/frag_enable.yaml create mode 120000 qa/suites/fs/multiclient/overrides/whitelist_health.yaml create mode 120000 qa/suites/fs/multiclient/overrides/whitelist_wrongly_marked_down.yaml create mode 120000 qa/suites/fs/multiclient/tasks/.qa create mode 100644 qa/suites/fs/multiclient/tasks/cephfs_misc_tests.yaml create mode 100644 qa/suites/fs/multiclient/tasks/fsx-mpi.yaml.disabled create mode 100644 qa/suites/fs/multiclient/tasks/ior-shared-file.yaml create mode 100644 qa/suites/fs/multiclient/tasks/mdtest.yaml create mode 100644 qa/suites/fs/multifs/% create mode 120000 qa/suites/fs/multifs/.qa create mode 120000 qa/suites/fs/multifs/begin.yaml create mode 120000 qa/suites/fs/multifs/clusters/.qa create mode 120000 qa/suites/fs/multifs/clusters/1a3s-mds-2c-client.yaml create mode 120000 qa/suites/fs/multifs/conf create mode 120000 qa/suites/fs/multifs/mount/.qa create mode 120000 qa/suites/fs/multifs/mount/fuse.yaml create mode 120000 qa/suites/fs/multifs/objectstore-ec create mode 100644 qa/suites/fs/multifs/overrides/+ create mode 120000 qa/suites/fs/multifs/overrides/.qa create mode 120000 qa/suites/fs/multifs/overrides/frag_enable.yaml create mode 100644 qa/suites/fs/multifs/overrides/mon-debug.yaml create mode 120000 qa/suites/fs/multifs/overrides/whitelist_health.yaml create mode 120000 qa/suites/fs/multifs/overrides/whitelist_wrongly_marked_down.yaml create mode 120000 qa/suites/fs/multifs/supported-random-distros$ create mode 120000 qa/suites/fs/multifs/tasks/.qa create mode 100644 qa/suites/fs/multifs/tasks/failover.yaml create mode 100644 qa/suites/fs/permission/% create mode 120000 qa/suites/fs/permission/.qa create mode 120000 qa/suites/fs/permission/begin.yaml create mode 120000 qa/suites/fs/permission/clusters/.qa create mode 120000 qa/suites/fs/permission/clusters/fixed-2-ucephfs.yaml create mode 120000 qa/suites/fs/permission/conf create mode 120000 qa/suites/fs/permission/mount/.qa create mode 120000 qa/suites/fs/permission/mount/fuse.yaml create mode 120000 qa/suites/fs/permission/objectstore-ec create mode 100644 qa/suites/fs/permission/overrides/+ create mode 120000 qa/suites/fs/permission/overrides/.qa create mode 120000 qa/suites/fs/permission/overrides/frag_enable.yaml create mode 120000 qa/suites/fs/permission/overrides/whitelist_health.yaml create mode 120000 qa/suites/fs/permission/overrides/whitelist_wrongly_marked_down.yaml create mode 120000 qa/suites/fs/permission/supported-random-distros$ create mode 120000 qa/suites/fs/permission/tasks/.qa create mode 100644 qa/suites/fs/permission/tasks/cfuse_workunit_misc.yaml create mode 100644 qa/suites/fs/permission/tasks/cfuse_workunit_suites_pjd.yaml create mode 100644 qa/suites/fs/snaps/% create mode 120000 qa/suites/fs/snaps/.qa create mode 120000 qa/suites/fs/snaps/begin.yaml create mode 120000 qa/suites/fs/snaps/clusters/.qa create mode 120000 qa/suites/fs/snaps/clusters/fixed-2-ucephfs.yaml create mode 120000 qa/suites/fs/snaps/conf create mode 120000 qa/suites/fs/snaps/mount/.qa create mode 120000 qa/suites/fs/snaps/mount/fuse.yaml create mode 120000 qa/suites/fs/snaps/objectstore-ec create mode 100644 qa/suites/fs/snaps/overrides/+ create mode 120000 qa/suites/fs/snaps/overrides/.qa create mode 120000 qa/suites/fs/snaps/overrides/frag_enable.yaml create mode 120000 qa/suites/fs/snaps/overrides/whitelist_health.yaml create mode 120000 qa/suites/fs/snaps/overrides/whitelist_wrongly_marked_down.yaml create mode 120000 qa/suites/fs/snaps/supported-random-distros$ create mode 120000 qa/suites/fs/snaps/tasks/.qa create mode 100644 qa/suites/fs/snaps/tasks/snaptests.yaml create mode 100644 qa/suites/fs/thrash/% create mode 120000 qa/suites/fs/thrash/.qa create mode 120000 qa/suites/fs/thrash/begin.yaml create mode 120000 qa/suites/fs/thrash/ceph-thrash/.qa create mode 100644 qa/suites/fs/thrash/ceph-thrash/default.yaml create mode 120000 qa/suites/fs/thrash/clusters/.qa create mode 120000 qa/suites/fs/thrash/clusters/1-mds-1-client-coloc.yaml create mode 120000 qa/suites/fs/thrash/conf create mode 120000 qa/suites/fs/thrash/mount/.qa create mode 120000 qa/suites/fs/thrash/mount/fuse.yaml create mode 120000 qa/suites/fs/thrash/msgr-failures/.qa create mode 100644 qa/suites/fs/thrash/msgr-failures/none.yaml create mode 100644 qa/suites/fs/thrash/msgr-failures/osd-mds-delay.yaml create mode 120000 qa/suites/fs/thrash/objectstore-ec create mode 100644 qa/suites/fs/thrash/overrides/+ create mode 120000 qa/suites/fs/thrash/overrides/.qa create mode 120000 qa/suites/fs/thrash/overrides/frag_enable.yaml create mode 120000 qa/suites/fs/thrash/overrides/session_timeout.yaml create mode 120000 qa/suites/fs/thrash/overrides/whitelist_health.yaml create mode 120000 qa/suites/fs/thrash/overrides/whitelist_wrongly_marked_down.yaml create mode 120000 qa/suites/fs/thrash/supported-random-distros$ create mode 120000 qa/suites/fs/thrash/tasks/.qa create mode 100644 qa/suites/fs/thrash/tasks/cfuse_workunit_snaptests.yaml create mode 120000 qa/suites/fs/thrash/tasks/cfuse_workunit_suites_fsstress.yaml create mode 100644 qa/suites/fs/thrash/tasks/cfuse_workunit_suites_pjd.yaml create mode 120000 qa/suites/fs/thrash/tasks/cfuse_workunit_trivial_sync.yaml create mode 100644 qa/suites/fs/traceless/% create mode 120000 qa/suites/fs/traceless/.qa create mode 120000 qa/suites/fs/traceless/begin.yaml create mode 120000 qa/suites/fs/traceless/clusters/.qa create mode 120000 qa/suites/fs/traceless/clusters/fixed-2-ucephfs.yaml create mode 120000 qa/suites/fs/traceless/conf create mode 120000 qa/suites/fs/traceless/mount/.qa create mode 120000 qa/suites/fs/traceless/mount/fuse.yaml create mode 120000 qa/suites/fs/traceless/objectstore-ec create mode 100644 qa/suites/fs/traceless/overrides/+ create mode 120000 qa/suites/fs/traceless/overrides/.qa create mode 120000 qa/suites/fs/traceless/overrides/frag_enable.yaml create mode 120000 qa/suites/fs/traceless/overrides/whitelist_health.yaml create mode 120000 qa/suites/fs/traceless/overrides/whitelist_wrongly_marked_down.yaml create mode 120000 qa/suites/fs/traceless/supported-random-distros$ create mode 120000 qa/suites/fs/traceless/tasks/.qa create mode 120000 qa/suites/fs/traceless/tasks/cfuse_workunit_suites_blogbench.yaml create mode 120000 qa/suites/fs/traceless/tasks/cfuse_workunit_suites_dbench.yaml create mode 120000 qa/suites/fs/traceless/tasks/cfuse_workunit_suites_ffsb.yaml create mode 120000 qa/suites/fs/traceless/tasks/cfuse_workunit_suites_fsstress.yaml create mode 120000 qa/suites/fs/traceless/traceless/.qa create mode 100644 qa/suites/fs/traceless/traceless/50pc.yaml create mode 120000 qa/suites/fs/upgrade/.qa create mode 120000 qa/suites/fs/upgrade/featureful_client/.qa create mode 100644 qa/suites/fs/upgrade/featureful_client/old_client/% create mode 120000 qa/suites/fs/upgrade/featureful_client/old_client/.qa create mode 120000 qa/suites/fs/upgrade/featureful_client/old_client/bluestore-bitmap.yaml create mode 120000 qa/suites/fs/upgrade/featureful_client/old_client/clusters/.qa create mode 120000 qa/suites/fs/upgrade/featureful_client/old_client/clusters/1-mds-2-client-micro.yaml create mode 120000 qa/suites/fs/upgrade/featureful_client/old_client/conf create mode 100644 qa/suites/fs/upgrade/featureful_client/old_client/overrides/% create mode 120000 qa/suites/fs/upgrade/featureful_client/old_client/overrides/.qa create mode 120000 qa/suites/fs/upgrade/featureful_client/old_client/overrides/frag_enable.yaml create mode 100644 qa/suites/fs/upgrade/featureful_client/old_client/overrides/multimds/no.yaml create mode 100644 qa/suites/fs/upgrade/featureful_client/old_client/overrides/multimds/yes.yaml create mode 120000 qa/suites/fs/upgrade/featureful_client/old_client/overrides/whitelist_health.yaml create mode 120000 qa/suites/fs/upgrade/featureful_client/old_client/overrides/whitelist_wrongly_marked_down.yaml create mode 100644 qa/suites/fs/upgrade/featureful_client/old_client/tasks/% create mode 120000 qa/suites/fs/upgrade/featureful_client/old_client/tasks/.qa create mode 100644 qa/suites/fs/upgrade/featureful_client/old_client/tasks/0-luminous.yaml create mode 100644 qa/suites/fs/upgrade/featureful_client/old_client/tasks/1-client.yaml create mode 100644 qa/suites/fs/upgrade/featureful_client/old_client/tasks/2-upgrade.yaml create mode 100644 qa/suites/fs/upgrade/featureful_client/old_client/tasks/3-compat_client/mimic.yaml create mode 100644 qa/suites/fs/upgrade/featureful_client/old_client/tasks/3-compat_client/no.yaml create mode 100644 qa/suites/fs/upgrade/featureful_client/upgraded_client/% create mode 120000 qa/suites/fs/upgrade/featureful_client/upgraded_client/.qa create mode 120000 qa/suites/fs/upgrade/featureful_client/upgraded_client/bluestore-bitmap.yaml create mode 120000 qa/suites/fs/upgrade/featureful_client/upgraded_client/clusters/.qa create mode 120000 qa/suites/fs/upgrade/featureful_client/upgraded_client/clusters/1-mds-2-client-micro.yaml create mode 120000 qa/suites/fs/upgrade/featureful_client/upgraded_client/conf create mode 100644 qa/suites/fs/upgrade/featureful_client/upgraded_client/overrides/% create mode 120000 qa/suites/fs/upgrade/featureful_client/upgraded_client/overrides/.qa create mode 120000 qa/suites/fs/upgrade/featureful_client/upgraded_client/overrides/frag_enable.yaml create mode 100644 qa/suites/fs/upgrade/featureful_client/upgraded_client/overrides/multimds/no.yaml create mode 100644 qa/suites/fs/upgrade/featureful_client/upgraded_client/overrides/multimds/yes.yaml create mode 120000 qa/suites/fs/upgrade/featureful_client/upgraded_client/overrides/whitelist_health.yaml create mode 120000 qa/suites/fs/upgrade/featureful_client/upgraded_client/overrides/whitelist_wrongly_marked_down.yaml create mode 100644 qa/suites/fs/upgrade/featureful_client/upgraded_client/tasks/% create mode 120000 qa/suites/fs/upgrade/featureful_client/upgraded_client/tasks/.qa create mode 100644 qa/suites/fs/upgrade/featureful_client/upgraded_client/tasks/0-luminous.yaml create mode 100644 qa/suites/fs/upgrade/featureful_client/upgraded_client/tasks/1-client.yaml create mode 100644 qa/suites/fs/upgrade/featureful_client/upgraded_client/tasks/2-upgrade.yaml create mode 100644 qa/suites/fs/upgrade/featureful_client/upgraded_client/tasks/3-client-upgrade.yaml create mode 100644 qa/suites/fs/upgrade/featureful_client/upgraded_client/tasks/4-compat_client.yaml create mode 100644 qa/suites/fs/upgrade/featureful_client/upgraded_client/tasks/5-client-sanity.yaml create mode 100644 qa/suites/fs/upgrade/snaps/% create mode 120000 qa/suites/fs/upgrade/snaps/.qa create mode 120000 qa/suites/fs/upgrade/snaps/clusters/.qa create mode 120000 qa/suites/fs/upgrade/snaps/clusters/3-mds.yaml create mode 120000 qa/suites/fs/upgrade/snaps/conf create mode 120000 qa/suites/fs/upgrade/snaps/objectstore-ec create mode 100644 qa/suites/fs/upgrade/snaps/overrides/% create mode 120000 qa/suites/fs/upgrade/snaps/overrides/.qa create mode 120000 qa/suites/fs/upgrade/snaps/overrides/frag_enable.yaml create mode 120000 qa/suites/fs/upgrade/snaps/overrides/multimds/.qa create mode 100644 qa/suites/fs/upgrade/snaps/overrides/multimds/no.yaml create mode 100644 qa/suites/fs/upgrade/snaps/overrides/multimds/yes.yaml create mode 120000 qa/suites/fs/upgrade/snaps/overrides/whitelist_health.yaml create mode 100644 qa/suites/fs/upgrade/snaps/overrides/whitelist_rstat.yaml create mode 120000 qa/suites/fs/upgrade/snaps/overrides/whitelist_wrongly_marked_down.yaml create mode 100644 qa/suites/fs/upgrade/snaps/tasks/% create mode 120000 qa/suites/fs/upgrade/snaps/tasks/.qa create mode 100644 qa/suites/fs/upgrade/snaps/tasks/0-luminous.yaml create mode 100644 qa/suites/fs/upgrade/snaps/tasks/1-client.yaml create mode 100644 qa/suites/fs/upgrade/snaps/tasks/2-upgrade.yaml create mode 100644 qa/suites/fs/upgrade/snaps/tasks/3-sanity.yaml create mode 120000 qa/suites/fs/upgrade/snaps/tasks/4-client-upgrade/.qa create mode 100644 qa/suites/fs/upgrade/snaps/tasks/4-client-upgrade/no.yaml create mode 100644 qa/suites/fs/upgrade/snaps/tasks/4-client-upgrade/yes.yaml create mode 100644 qa/suites/fs/upgrade/snaps/tasks/5-client-sanity.yaml create mode 100644 qa/suites/fs/upgrade/snaps/tasks/6-snap-upgrade.yaml create mode 120000 qa/suites/fs/upgrade/snaps/tasks/7-client-sanity.yaml create mode 120000 qa/suites/fs/upgrade/volumes/.qa create mode 100644 qa/suites/fs/upgrade/volumes/import-legacy/% create mode 120000 qa/suites/fs/upgrade/volumes/import-legacy/.qa create mode 120000 qa/suites/fs/upgrade/volumes/import-legacy/bluestore-bitmap.yaml create mode 120000 qa/suites/fs/upgrade/volumes/import-legacy/clusters/.qa create mode 100644 qa/suites/fs/upgrade/volumes/import-legacy/clusters/1-mds-2-client-micro.yaml create mode 120000 qa/suites/fs/upgrade/volumes/import-legacy/conf create mode 100644 qa/suites/fs/upgrade/volumes/import-legacy/overrides/+ create mode 120000 qa/suites/fs/upgrade/volumes/import-legacy/overrides/.qa create mode 120000 qa/suites/fs/upgrade/volumes/import-legacy/overrides/frag_enable.yaml create mode 100644 qa/suites/fs/upgrade/volumes/import-legacy/overrides/pg-warn.yaml create mode 120000 qa/suites/fs/upgrade/volumes/import-legacy/overrides/whitelist_health.yaml create mode 120000 qa/suites/fs/upgrade/volumes/import-legacy/overrides/whitelist_wrongly_marked_down.yaml create mode 100644 qa/suites/fs/upgrade/volumes/import-legacy/tasks/% create mode 120000 qa/suites/fs/upgrade/volumes/import-legacy/tasks/.qa create mode 100644 qa/suites/fs/upgrade/volumes/import-legacy/tasks/0-mimic.yaml create mode 100644 qa/suites/fs/upgrade/volumes/import-legacy/tasks/1-client.yaml create mode 100644 qa/suites/fs/upgrade/volumes/import-legacy/tasks/2-upgrade.yaml create mode 100644 qa/suites/fs/upgrade/volumes/import-legacy/tasks/3-verify.yaml create mode 120000 qa/suites/fs/upgrade/volumes/import-legacy/ubuntu_18.04.yaml create mode 100644 qa/suites/fs/verify/% create mode 120000 qa/suites/fs/verify/.qa create mode 120000 qa/suites/fs/verify/begin.yaml create mode 120000 qa/suites/fs/verify/centos_latest.yaml create mode 120000 qa/suites/fs/verify/clusters/.qa create mode 120000 qa/suites/fs/verify/clusters/fixed-2-ucephfs.yaml create mode 120000 qa/suites/fs/verify/conf create mode 120000 qa/suites/fs/verify/mount/.qa create mode 120000 qa/suites/fs/verify/mount/fuse.yaml create mode 120000 qa/suites/fs/verify/objectstore-ec create mode 100644 qa/suites/fs/verify/overrides/+ create mode 120000 qa/suites/fs/verify/overrides/.qa create mode 120000 qa/suites/fs/verify/overrides/frag_enable.yaml create mode 100644 qa/suites/fs/verify/overrides/mon-debug.yaml create mode 120000 qa/suites/fs/verify/overrides/session_timeout.yaml create mode 120000 qa/suites/fs/verify/overrides/whitelist_health.yaml create mode 120000 qa/suites/fs/verify/overrides/whitelist_wrongly_marked_down.yaml create mode 120000 qa/suites/fs/verify/tasks/.qa create mode 120000 qa/suites/fs/verify/tasks/cfuse_workunit_suites_dbench.yaml create mode 120000 qa/suites/fs/verify/tasks/cfuse_workunit_suites_fsstress.yaml create mode 120000 qa/suites/fs/verify/validater/.qa create mode 100644 qa/suites/fs/verify/validater/lockdep.yaml create mode 100644 qa/suites/fs/verify/validater/valgrind.yaml create mode 120000 qa/suites/hadoop/.qa create mode 100644 qa/suites/hadoop/basic/% create mode 120000 qa/suites/hadoop/basic/.qa create mode 120000 qa/suites/hadoop/basic/clusters/.qa create mode 100644 qa/suites/hadoop/basic/clusters/fixed-3.yaml create mode 120000 qa/suites/hadoop/basic/distros/.qa create mode 120000 qa/suites/hadoop/basic/distros/ubuntu_latest.yaml create mode 120000 qa/suites/hadoop/basic/filestore-xfs.yaml create mode 120000 qa/suites/hadoop/basic/tasks/.qa create mode 100644 qa/suites/hadoop/basic/tasks/repl.yaml create mode 100644 qa/suites/hadoop/basic/tasks/terasort.yaml create mode 100644 qa/suites/hadoop/basic/tasks/wordcount.yaml create mode 120000 qa/suites/kcephfs/.qa create mode 100644 qa/suites/kcephfs/cephfs/% create mode 120000 qa/suites/kcephfs/cephfs/.qa create mode 120000 qa/suites/kcephfs/cephfs/begin.yaml create mode 120000 qa/suites/kcephfs/cephfs/clusters/.qa create mode 120000 qa/suites/kcephfs/cephfs/clusters/1-mds-1-client.yaml create mode 120000 qa/suites/kcephfs/cephfs/conf create mode 120000 qa/suites/kcephfs/cephfs/inline/.qa create mode 100644 qa/suites/kcephfs/cephfs/inline/no.yaml create mode 100644 qa/suites/kcephfs/cephfs/inline/yes.yaml create mode 120000 qa/suites/kcephfs/cephfs/kclient create mode 120000 qa/suites/kcephfs/cephfs/objectstore-ec create mode 100644 qa/suites/kcephfs/cephfs/overrides/+ create mode 120000 qa/suites/kcephfs/cephfs/overrides/.qa create mode 120000 qa/suites/kcephfs/cephfs/overrides/frag_enable.yaml create mode 120000 qa/suites/kcephfs/cephfs/overrides/log-config.yaml create mode 120000 qa/suites/kcephfs/cephfs/overrides/osd-asserts.yaml create mode 120000 qa/suites/kcephfs/cephfs/overrides/whitelist_health.yaml create mode 120000 qa/suites/kcephfs/cephfs/overrides/whitelist_wrongly_marked_down.yaml create mode 120000 qa/suites/kcephfs/cephfs/tasks/.qa create mode 100644 qa/suites/kcephfs/cephfs/tasks/kclient_workunit_direct_io.yaml create mode 100644 qa/suites/kcephfs/cephfs/tasks/kclient_workunit_kernel_untar_build.yaml create mode 100644 qa/suites/kcephfs/cephfs/tasks/kclient_workunit_misc.yaml create mode 100644 qa/suites/kcephfs/cephfs/tasks/kclient_workunit_o_trunc.yaml create mode 100644 qa/suites/kcephfs/cephfs/tasks/kclient_workunit_snaps.yaml create mode 100644 qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_dbench.yaml create mode 100644 qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_ffsb.yaml create mode 100644 qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_fsstress.yaml create mode 100644 qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_fsx.yaml create mode 100644 qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_fsync.yaml create mode 100644 qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_iozone.yaml create mode 100644 qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_pjd.yaml create mode 100644 qa/suites/kcephfs/cephfs/tasks/kclient_workunit_trivial_sync.yaml create mode 100644 qa/suites/kcephfs/mixed-clients/% create mode 120000 qa/suites/kcephfs/mixed-clients/.qa create mode 120000 qa/suites/kcephfs/mixed-clients/begin.yaml create mode 120000 qa/suites/kcephfs/mixed-clients/clusters/.qa create mode 120000 qa/suites/kcephfs/mixed-clients/clusters/1-mds-2-client.yaml create mode 120000 qa/suites/kcephfs/mixed-clients/conf create mode 120000 qa/suites/kcephfs/mixed-clients/kclient-overrides create mode 120000 qa/suites/kcephfs/mixed-clients/objectstore-ec create mode 100644 qa/suites/kcephfs/mixed-clients/overrides/+ create mode 120000 qa/suites/kcephfs/mixed-clients/overrides/.qa create mode 120000 qa/suites/kcephfs/mixed-clients/overrides/frag_enable.yaml create mode 120000 qa/suites/kcephfs/mixed-clients/overrides/log-config.yaml create mode 120000 qa/suites/kcephfs/mixed-clients/overrides/osd-asserts.yaml create mode 120000 qa/suites/kcephfs/mixed-clients/overrides/whitelist_health.yaml create mode 120000 qa/suites/kcephfs/mixed-clients/overrides/whitelist_wrongly_marked_down.yaml create mode 120000 qa/suites/kcephfs/mixed-clients/tasks/.qa create mode 100644 qa/suites/kcephfs/mixed-clients/tasks/kernel_cfuse_workunits_dbench_iozone.yaml create mode 100644 qa/suites/kcephfs/mixed-clients/tasks/kernel_cfuse_workunits_untarbuild_blogbench.yaml create mode 100644 qa/suites/kcephfs/recovery/% create mode 120000 qa/suites/kcephfs/recovery/.qa create mode 120000 qa/suites/kcephfs/recovery/begin.yaml create mode 120000 qa/suites/kcephfs/recovery/clusters/.qa create mode 120000 qa/suites/kcephfs/recovery/clusters/1-mds-4-client.yaml create mode 120000 qa/suites/kcephfs/recovery/conf create mode 120000 qa/suites/kcephfs/recovery/kclient create mode 120000 qa/suites/kcephfs/recovery/objectstore-ec create mode 100644 qa/suites/kcephfs/recovery/overrides/+ create mode 120000 qa/suites/kcephfs/recovery/overrides/.qa create mode 120000 qa/suites/kcephfs/recovery/overrides/frag_enable.yaml create mode 120000 qa/suites/kcephfs/recovery/overrides/log-config.yaml create mode 120000 qa/suites/kcephfs/recovery/overrides/osd-asserts.yaml create mode 120000 qa/suites/kcephfs/recovery/overrides/whitelist_health.yaml create mode 120000 qa/suites/kcephfs/recovery/overrides/whitelist_wrongly_marked_down.yaml create mode 120000 qa/suites/kcephfs/recovery/tasks/.qa create mode 100644 qa/suites/kcephfs/recovery/tasks/auto-repair.yaml create mode 100644 qa/suites/kcephfs/recovery/tasks/backtrace.yaml create mode 100644 qa/suites/kcephfs/recovery/tasks/client-limits.yaml create mode 100644 qa/suites/kcephfs/recovery/tasks/client-recovery.yaml create mode 100644 qa/suites/kcephfs/recovery/tasks/damage.yaml create mode 100644 qa/suites/kcephfs/recovery/tasks/data-scan.yaml create mode 100644 qa/suites/kcephfs/recovery/tasks/failover.yaml create mode 100644 qa/suites/kcephfs/recovery/tasks/forward-scrub.yaml create mode 100644 qa/suites/kcephfs/recovery/tasks/journal-repair.yaml create mode 100644 qa/suites/kcephfs/recovery/tasks/mds-flush.yaml create mode 100644 qa/suites/kcephfs/recovery/tasks/mds-full.yaml create mode 100644 qa/suites/kcephfs/recovery/tasks/pool-perm.yaml create mode 100644 qa/suites/kcephfs/recovery/tasks/sessionmap.yaml create mode 100644 qa/suites/kcephfs/recovery/tasks/strays.yaml create mode 100644 qa/suites/kcephfs/recovery/tasks/volume-client.yaml create mode 100644 qa/suites/kcephfs/thrash/% create mode 120000 qa/suites/kcephfs/thrash/.qa create mode 120000 qa/suites/kcephfs/thrash/begin.yaml create mode 120000 qa/suites/kcephfs/thrash/clusters/.qa create mode 120000 qa/suites/kcephfs/thrash/clusters/1-mds-1-client.yaml create mode 120000 qa/suites/kcephfs/thrash/conf create mode 120000 qa/suites/kcephfs/thrash/kclient create mode 120000 qa/suites/kcephfs/thrash/objectstore-ec create mode 100644 qa/suites/kcephfs/thrash/overrides/+ create mode 120000 qa/suites/kcephfs/thrash/overrides/.qa create mode 120000 qa/suites/kcephfs/thrash/overrides/frag_enable.yaml create mode 120000 qa/suites/kcephfs/thrash/overrides/log-config.yaml create mode 120000 qa/suites/kcephfs/thrash/overrides/osd-asserts.yaml create mode 120000 qa/suites/kcephfs/thrash/overrides/thrash-health-whitelist.yaml create mode 120000 qa/suites/kcephfs/thrash/overrides/whitelist_health.yaml create mode 120000 qa/suites/kcephfs/thrash/overrides/whitelist_wrongly_marked_down.yaml create mode 120000 qa/suites/kcephfs/thrash/thrashers/.qa create mode 100644 qa/suites/kcephfs/thrash/thrashers/default.yaml create mode 100644 qa/suites/kcephfs/thrash/thrashers/mds.yaml create mode 100644 qa/suites/kcephfs/thrash/thrashers/mon.yaml create mode 120000 qa/suites/kcephfs/thrash/workloads/.qa create mode 100644 qa/suites/kcephfs/thrash/workloads/kclient_workunit_suites_ffsb.yaml create mode 100644 qa/suites/kcephfs/thrash/workloads/kclient_workunit_suites_iozone.yaml create mode 120000 qa/suites/krbd/.qa create mode 100644 qa/suites/krbd/basic/% create mode 120000 qa/suites/krbd/basic/.qa create mode 120000 qa/suites/krbd/basic/bluestore-bitmap.yaml create mode 120000 qa/suites/krbd/basic/ceph/.qa create mode 100644 qa/suites/krbd/basic/ceph/ceph.yaml create mode 120000 qa/suites/krbd/basic/clusters/.qa create mode 120000 qa/suites/krbd/basic/clusters/fixed-1.yaml create mode 100644 qa/suites/krbd/basic/conf.yaml create mode 120000 qa/suites/krbd/basic/ms_mode/.qa create mode 100644 qa/suites/krbd/basic/ms_mode/crc.yaml create mode 100644 qa/suites/krbd/basic/ms_mode/legacy.yaml create mode 100644 qa/suites/krbd/basic/ms_mode/secure.yaml create mode 120000 qa/suites/krbd/basic/tasks/.qa create mode 100644 qa/suites/krbd/basic/tasks/krbd_deep_flatten.yaml create mode 100644 qa/suites/krbd/basic/tasks/krbd_discard.yaml create mode 100644 qa/suites/krbd/basic/tasks/krbd_huge_image.yaml create mode 100644 qa/suites/krbd/basic/tasks/krbd_msgr_segments.yaml create mode 100644 qa/suites/krbd/basic/tasks/krbd_parent_overlap.yaml create mode 100644 qa/suites/krbd/basic/tasks/krbd_read_only.yaml create mode 100644 qa/suites/krbd/basic/tasks/krbd_whole_object_zeroout.yaml create mode 100644 qa/suites/krbd/fsx/% create mode 120000 qa/suites/krbd/fsx/.qa create mode 120000 qa/suites/krbd/fsx/ceph/.qa create mode 100644 qa/suites/krbd/fsx/ceph/ceph.yaml create mode 120000 qa/suites/krbd/fsx/clusters/.qa create mode 100644 qa/suites/krbd/fsx/clusters/3-node.yaml create mode 100644 qa/suites/krbd/fsx/conf.yaml create mode 120000 qa/suites/krbd/fsx/ms_mode$/.qa create mode 100644 qa/suites/krbd/fsx/ms_mode$/crc.yaml create mode 100644 qa/suites/krbd/fsx/ms_mode$/legacy.yaml create mode 100644 qa/suites/krbd/fsx/ms_mode$/prefer-crc.yaml create mode 100644 qa/suites/krbd/fsx/ms_mode$/secure.yaml create mode 120000 qa/suites/krbd/fsx/objectstore/.qa create mode 120000 qa/suites/krbd/fsx/objectstore/bluestore-bitmap.yaml create mode 120000 qa/suites/krbd/fsx/objectstore/filestore-xfs.yaml create mode 120000 qa/suites/krbd/fsx/striping/.qa create mode 100644 qa/suites/krbd/fsx/striping/default/% create mode 120000 qa/suites/krbd/fsx/striping/default/.qa create mode 120000 qa/suites/krbd/fsx/striping/default/msgr-failures/.qa create mode 100644 qa/suites/krbd/fsx/striping/default/msgr-failures/few.yaml create mode 100644 qa/suites/krbd/fsx/striping/default/msgr-failures/many.yaml create mode 100644 qa/suites/krbd/fsx/striping/default/randomized-striping-off.yaml create mode 100644 qa/suites/krbd/fsx/striping/fancy/% create mode 120000 qa/suites/krbd/fsx/striping/fancy/.qa create mode 120000 qa/suites/krbd/fsx/striping/fancy/msgr-failures/.qa create mode 100644 qa/suites/krbd/fsx/striping/fancy/msgr-failures/few.yaml create mode 100644 qa/suites/krbd/fsx/striping/fancy/randomized-striping-on.yaml create mode 120000 qa/suites/krbd/fsx/tasks/.qa create mode 100644 qa/suites/krbd/fsx/tasks/fsx-1-client.yaml create mode 100644 qa/suites/krbd/fsx/tasks/fsx-3-client.yaml create mode 100644 qa/suites/krbd/rbd-nomount/% create mode 120000 qa/suites/krbd/rbd-nomount/.qa create mode 120000 qa/suites/krbd/rbd-nomount/bluestore-bitmap.yaml create mode 120000 qa/suites/krbd/rbd-nomount/clusters/.qa create mode 120000 qa/suites/krbd/rbd-nomount/clusters/fixed-3.yaml create mode 100644 qa/suites/krbd/rbd-nomount/conf.yaml create mode 120000 qa/suites/krbd/rbd-nomount/install/.qa create mode 100644 qa/suites/krbd/rbd-nomount/install/ceph.yaml create mode 120000 qa/suites/krbd/rbd-nomount/ms_mode/.qa create mode 100644 qa/suites/krbd/rbd-nomount/ms_mode/crc.yaml create mode 100644 qa/suites/krbd/rbd-nomount/ms_mode/legacy.yaml create mode 100644 qa/suites/krbd/rbd-nomount/ms_mode/secure.yaml create mode 120000 qa/suites/krbd/rbd-nomount/msgr-failures/.qa create mode 100644 qa/suites/krbd/rbd-nomount/msgr-failures/few.yaml create mode 100644 qa/suites/krbd/rbd-nomount/msgr-failures/many.yaml create mode 120000 qa/suites/krbd/rbd-nomount/tasks/.qa create mode 100644 qa/suites/krbd/rbd-nomount/tasks/krbd_data_pool.yaml create mode 100644 qa/suites/krbd/rbd-nomount/tasks/krbd_exclusive_option.yaml create mode 100644 qa/suites/krbd/rbd-nomount/tasks/krbd_fallocate.yaml create mode 100644 qa/suites/krbd/rbd-nomount/tasks/krbd_latest_osdmap_on_map.yaml create mode 100644 qa/suites/krbd/rbd-nomount/tasks/krbd_namespaces.yaml create mode 100644 qa/suites/krbd/rbd-nomount/tasks/krbd_udev_enumerate.yaml create mode 100644 qa/suites/krbd/rbd-nomount/tasks/krbd_udev_netlink_enobufs.yaml create mode 100644 qa/suites/krbd/rbd-nomount/tasks/krbd_udev_netns.yaml create mode 100644 qa/suites/krbd/rbd-nomount/tasks/krbd_udev_symlinks.yaml create mode 100644 qa/suites/krbd/rbd-nomount/tasks/rbd_concurrent.yaml create mode 100644 qa/suites/krbd/rbd-nomount/tasks/rbd_huge_tickets.yaml create mode 100644 qa/suites/krbd/rbd-nomount/tasks/rbd_image_read.yaml create mode 100644 qa/suites/krbd/rbd-nomount/tasks/rbd_kernel.yaml create mode 100644 qa/suites/krbd/rbd-nomount/tasks/rbd_map_snapshot_io.yaml create mode 100644 qa/suites/krbd/rbd-nomount/tasks/rbd_map_unmap.yaml create mode 100644 qa/suites/krbd/rbd-nomount/tasks/rbd_simple_big.yaml create mode 100644 qa/suites/krbd/rbd/% create mode 120000 qa/suites/krbd/rbd/.qa create mode 120000 qa/suites/krbd/rbd/bluestore-bitmap.yaml create mode 120000 qa/suites/krbd/rbd/clusters/.qa create mode 120000 qa/suites/krbd/rbd/clusters/fixed-3.yaml create mode 100644 qa/suites/krbd/rbd/conf.yaml create mode 120000 qa/suites/krbd/rbd/ms_mode/.qa create mode 100644 qa/suites/krbd/rbd/ms_mode/crc.yaml create mode 100644 qa/suites/krbd/rbd/ms_mode/legacy.yaml create mode 100644 qa/suites/krbd/rbd/ms_mode/secure.yaml create mode 120000 qa/suites/krbd/rbd/msgr-failures/.qa create mode 100644 qa/suites/krbd/rbd/msgr-failures/few.yaml create mode 100644 qa/suites/krbd/rbd/msgr-failures/many.yaml create mode 120000 qa/suites/krbd/rbd/tasks/.qa create mode 100644 qa/suites/krbd/rbd/tasks/rbd_fio.yaml create mode 100644 qa/suites/krbd/rbd/tasks/rbd_workunit_kernel_untar_build.yaml create mode 100644 qa/suites/krbd/rbd/tasks/rbd_workunit_suites_dbench.yaml create mode 100644 qa/suites/krbd/rbd/tasks/rbd_workunit_suites_ffsb.yaml create mode 100644 qa/suites/krbd/rbd/tasks/rbd_workunit_suites_fsstress.yaml create mode 100644 qa/suites/krbd/rbd/tasks/rbd_workunit_suites_fsstress_ext4.yaml create mode 100644 qa/suites/krbd/rbd/tasks/rbd_workunit_suites_fsx.yaml create mode 100644 qa/suites/krbd/rbd/tasks/rbd_workunit_suites_iozone.yaml create mode 100644 qa/suites/krbd/rbd/tasks/rbd_workunit_trivial_sync.yaml create mode 100644 qa/suites/krbd/singleton/% create mode 120000 qa/suites/krbd/singleton/.qa create mode 120000 qa/suites/krbd/singleton/bluestore-bitmap.yaml create mode 100644 qa/suites/krbd/singleton/conf.yaml create mode 120000 qa/suites/krbd/singleton/ms_mode$/.qa create mode 100644 qa/suites/krbd/singleton/ms_mode$/crc.yaml create mode 100644 qa/suites/krbd/singleton/ms_mode$/legacy.yaml create mode 100644 qa/suites/krbd/singleton/ms_mode$/prefer-crc.yaml create mode 100644 qa/suites/krbd/singleton/ms_mode$/secure.yaml create mode 120000 qa/suites/krbd/singleton/msgr-failures/.qa create mode 100644 qa/suites/krbd/singleton/msgr-failures/few.yaml create mode 100644 qa/suites/krbd/singleton/msgr-failures/many.yaml create mode 120000 qa/suites/krbd/singleton/tasks/.qa create mode 100644 qa/suites/krbd/singleton/tasks/rbd_xfstests.yaml create mode 100644 qa/suites/krbd/thrash/% create mode 120000 qa/suites/krbd/thrash/.qa create mode 120000 qa/suites/krbd/thrash/bluestore-bitmap.yaml create mode 120000 qa/suites/krbd/thrash/ceph/.qa create mode 100644 qa/suites/krbd/thrash/ceph/ceph.yaml create mode 120000 qa/suites/krbd/thrash/clusters/.qa create mode 120000 qa/suites/krbd/thrash/clusters/fixed-3.yaml create mode 100644 qa/suites/krbd/thrash/conf.yaml create mode 120000 qa/suites/krbd/thrash/ms_mode$/.qa create mode 100644 qa/suites/krbd/thrash/ms_mode$/crc.yaml create mode 100644 qa/suites/krbd/thrash/ms_mode$/legacy.yaml create mode 100644 qa/suites/krbd/thrash/ms_mode$/prefer-crc.yaml create mode 100644 qa/suites/krbd/thrash/ms_mode$/secure.yaml create mode 120000 qa/suites/krbd/thrash/thrashers/.qa create mode 100644 qa/suites/krbd/thrash/thrashers/backoff.yaml create mode 100644 qa/suites/krbd/thrash/thrashers/mon-thrasher.yaml create mode 100644 qa/suites/krbd/thrash/thrashers/pggrow.yaml create mode 100644 qa/suites/krbd/thrash/thrashers/upmap.yaml create mode 120000 qa/suites/krbd/thrash/thrashosds-health.yaml create mode 120000 qa/suites/krbd/thrash/workloads/.qa create mode 100644 qa/suites/krbd/thrash/workloads/rbd_fio.yaml create mode 100644 qa/suites/krbd/thrash/workloads/rbd_workunit_suites_ffsb.yaml create mode 100644 qa/suites/krbd/unmap/% create mode 120000 qa/suites/krbd/unmap/.qa create mode 120000 qa/suites/krbd/unmap/ceph/.qa create mode 100644 qa/suites/krbd/unmap/ceph/ceph.yaml create mode 120000 qa/suites/krbd/unmap/clusters/.qa create mode 100644 qa/suites/krbd/unmap/clusters/separate-client.yaml create mode 100644 qa/suites/krbd/unmap/conf.yaml create mode 120000 qa/suites/krbd/unmap/filestore-xfs.yaml create mode 120000 qa/suites/krbd/unmap/kernels/.qa create mode 100644 qa/suites/krbd/unmap/kernels/pre-single-major.yaml create mode 100644 qa/suites/krbd/unmap/kernels/single-major-off.yaml create mode 100644 qa/suites/krbd/unmap/kernels/single-major-on.yaml create mode 120000 qa/suites/krbd/unmap/tasks/.qa create mode 100644 qa/suites/krbd/unmap/tasks/unmap.yaml create mode 120000 qa/suites/krbd/wac/.qa create mode 100644 qa/suites/krbd/wac/sysfs/% create mode 120000 qa/suites/krbd/wac/sysfs/.qa create mode 120000 qa/suites/krbd/wac/sysfs/bluestore-bitmap.yaml create mode 120000 qa/suites/krbd/wac/sysfs/ceph/.qa create mode 100644 qa/suites/krbd/wac/sysfs/ceph/ceph.yaml create mode 120000 qa/suites/krbd/wac/sysfs/clusters/.qa create mode 120000 qa/suites/krbd/wac/sysfs/clusters/fixed-1.yaml create mode 100644 qa/suites/krbd/wac/sysfs/conf.yaml create mode 120000 qa/suites/krbd/wac/sysfs/tasks/.qa create mode 100644 qa/suites/krbd/wac/sysfs/tasks/stable_writes.yaml create mode 100644 qa/suites/krbd/wac/wac/% create mode 120000 qa/suites/krbd/wac/wac/.qa create mode 120000 qa/suites/krbd/wac/wac/bluestore-bitmap.yaml create mode 120000 qa/suites/krbd/wac/wac/ceph/.qa create mode 100644 qa/suites/krbd/wac/wac/ceph/ceph.yaml create mode 120000 qa/suites/krbd/wac/wac/clusters/.qa create mode 120000 qa/suites/krbd/wac/wac/clusters/fixed-3.yaml create mode 100644 qa/suites/krbd/wac/wac/conf.yaml create mode 120000 qa/suites/krbd/wac/wac/tasks/.qa create mode 100644 qa/suites/krbd/wac/wac/tasks/wac.yaml create mode 120000 qa/suites/krbd/wac/wac/verify/.qa create mode 100644 qa/suites/krbd/wac/wac/verify/many-resets.yaml create mode 100644 qa/suites/krbd/wac/wac/verify/no-resets.yaml create mode 120000 qa/suites/marginal/.qa create mode 100644 qa/suites/marginal/basic/% create mode 120000 qa/suites/marginal/basic/.qa create mode 120000 qa/suites/marginal/basic/clusters/.qa create mode 100644 qa/suites/marginal/basic/clusters/fixed-3.yaml create mode 120000 qa/suites/marginal/basic/tasks/.qa create mode 100644 qa/suites/marginal/basic/tasks/kclient_workunit_suites_blogbench.yaml create mode 100644 qa/suites/marginal/basic/tasks/kclient_workunit_suites_fsx.yaml create mode 100644 qa/suites/marginal/fs-misc/% create mode 120000 qa/suites/marginal/fs-misc/.qa create mode 120000 qa/suites/marginal/fs-misc/clusters/.qa create mode 100644 qa/suites/marginal/fs-misc/clusters/two_clients.yaml create mode 120000 qa/suites/marginal/fs-misc/tasks/.qa create mode 100644 qa/suites/marginal/fs-misc/tasks/locktest.yaml create mode 100644 qa/suites/marginal/mds_restart/% create mode 120000 qa/suites/marginal/mds_restart/.qa create mode 120000 qa/suites/marginal/mds_restart/clusters/.qa create mode 100644 qa/suites/marginal/mds_restart/clusters/one_mds.yaml create mode 120000 qa/suites/marginal/mds_restart/tasks/.qa create mode 100644 qa/suites/marginal/mds_restart/tasks/restart-workunit-backtraces.yaml create mode 100644 qa/suites/marginal/multimds/% create mode 120000 qa/suites/marginal/multimds/.qa create mode 120000 qa/suites/marginal/multimds/clusters/.qa create mode 100644 qa/suites/marginal/multimds/clusters/3-node-3-mds.yaml create mode 100644 qa/suites/marginal/multimds/clusters/3-node-9-mds.yaml create mode 120000 qa/suites/marginal/multimds/mounts/.qa create mode 100644 qa/suites/marginal/multimds/mounts/ceph-fuse.yaml create mode 100644 qa/suites/marginal/multimds/mounts/kclient.yaml create mode 120000 qa/suites/marginal/multimds/tasks/.qa create mode 100644 qa/suites/marginal/multimds/tasks/workunit_misc.yaml create mode 100644 qa/suites/marginal/multimds/tasks/workunit_suites_blogbench.yaml create mode 100644 qa/suites/marginal/multimds/tasks/workunit_suites_dbench.yaml create mode 100644 qa/suites/marginal/multimds/tasks/workunit_suites_fsstress.yaml create mode 100644 qa/suites/marginal/multimds/tasks/workunit_suites_fsync.yaml create mode 100644 qa/suites/marginal/multimds/tasks/workunit_suites_pjd.yaml create mode 100644 qa/suites/marginal/multimds/tasks/workunit_suites_truncate_delay.yaml create mode 120000 qa/suites/marginal/multimds/thrash/.qa create mode 100644 qa/suites/marginal/multimds/thrash/exports.yaml create mode 100644 qa/suites/marginal/multimds/thrash/normal.yaml create mode 120000 qa/suites/mixed-clients/.qa create mode 120000 qa/suites/mixed-clients/basic/.qa create mode 120000 qa/suites/mixed-clients/basic/clusters/.qa create mode 100644 qa/suites/mixed-clients/basic/clusters/fixed-3.yaml create mode 120000 qa/suites/mixed-clients/basic/objectstore create mode 120000 qa/suites/mixed-clients/basic/tasks/.qa create mode 100644 qa/suites/mixed-clients/basic/tasks/kernel_cfuse_workunits_dbench_iozone.yaml create mode 100644 qa/suites/mixed-clients/basic/tasks/kernel_cfuse_workunits_untarbuild_blogbench.yaml create mode 120000 qa/suites/multimds/.qa create mode 100644 qa/suites/multimds/basic/% create mode 120000 qa/suites/multimds/basic/.qa create mode 120000 qa/suites/multimds/basic/0-supported-random-distro$ create mode 120000 qa/suites/multimds/basic/begin.yaml create mode 120000 qa/suites/multimds/basic/clusters/.qa create mode 120000 qa/suites/multimds/basic/clusters/3-mds.yaml create mode 120000 qa/suites/multimds/basic/clusters/9-mds.yaml create mode 120000 qa/suites/multimds/basic/conf create mode 120000 qa/suites/multimds/basic/inline create mode 120000 qa/suites/multimds/basic/mount create mode 120000 qa/suites/multimds/basic/objectstore-ec create mode 100644 qa/suites/multimds/basic/overrides/% create mode 120000 qa/suites/multimds/basic/overrides/.qa create mode 120000 qa/suites/multimds/basic/overrides/basic create mode 120000 qa/suites/multimds/basic/overrides/fuse-default-perm-no.yaml create mode 120000 qa/suites/multimds/basic/q_check_counter/.qa create mode 100644 qa/suites/multimds/basic/q_check_counter/check_counter.yaml create mode 120000 qa/suites/multimds/basic/tasks/.qa create mode 100644 qa/suites/multimds/basic/tasks/cephfs_test_exports.yaml create mode 100644 qa/suites/multimds/basic/tasks/cephfs_test_snapshots.yaml create mode 100644 qa/suites/multimds/basic/tasks/cfuse_workunit_kernel_untar_build.yaml create mode 100644 qa/suites/multimds/basic/tasks/cfuse_workunit_misc.yaml create mode 100644 qa/suites/multimds/basic/tasks/cfuse_workunit_norstats.yaml create mode 120000 qa/suites/multimds/basic/tasks/cfuse_workunit_suites_blogbench.yaml create mode 120000 qa/suites/multimds/basic/tasks/cfuse_workunit_suites_dbench.yaml create mode 120000 qa/suites/multimds/basic/tasks/cfuse_workunit_suites_ffsb.yaml create mode 120000 qa/suites/multimds/basic/tasks/cfuse_workunit_suites_fsstress.yaml create mode 100644 qa/suites/multimds/basic/tasks/cfuse_workunit_suites_fsx.yaml create mode 100644 qa/suites/multimds/basic/tasks/cfuse_workunit_suites_pjd.yaml create mode 100644 qa/suites/multimds/thrash/% create mode 120000 qa/suites/multimds/thrash/.qa create mode 120000 qa/suites/multimds/thrash/0-supported-random-distro$ create mode 120000 qa/suites/multimds/thrash/begin.yaml create mode 120000 qa/suites/multimds/thrash/ceph-thrash create mode 120000 qa/suites/multimds/thrash/clusters/.qa create mode 100644 qa/suites/multimds/thrash/clusters/3-mds-2-standby.yaml create mode 100644 qa/suites/multimds/thrash/clusters/9-mds-3-standby.yaml create mode 120000 qa/suites/multimds/thrash/conf create mode 120000 qa/suites/multimds/thrash/mount create mode 120000 qa/suites/multimds/thrash/msgr-failures create mode 120000 qa/suites/multimds/thrash/objectstore-ec create mode 100644 qa/suites/multimds/thrash/overrides/% create mode 120000 qa/suites/multimds/thrash/overrides/.qa create mode 120000 qa/suites/multimds/thrash/overrides/fuse-default-perm-no.yaml create mode 120000 qa/suites/multimds/thrash/overrides/thrash create mode 100644 qa/suites/multimds/thrash/overrides/thrash_debug.yaml create mode 120000 qa/suites/multimds/thrash/tasks/.qa create mode 120000 qa/suites/multimds/thrash/tasks/cfuse_workunit_suites_fsstress.yaml create mode 120000 qa/suites/multimds/thrash/tasks/cfuse_workunit_suites_pjd.yaml create mode 100644 qa/suites/multimds/verify/% create mode 120000 qa/suites/multimds/verify/.qa create mode 120000 qa/suites/multimds/verify/begin.yaml create mode 120000 qa/suites/multimds/verify/centos_latest.yaml create mode 120000 qa/suites/multimds/verify/clusters/.qa create mode 120000 qa/suites/multimds/verify/clusters/3-mds.yaml create mode 120000 qa/suites/multimds/verify/clusters/9-mds.yaml create mode 120000 qa/suites/multimds/verify/conf create mode 120000 qa/suites/multimds/verify/mount create mode 120000 qa/suites/multimds/verify/objectstore-ec create mode 100644 qa/suites/multimds/verify/overrides/% create mode 120000 qa/suites/multimds/verify/overrides/.qa create mode 120000 qa/suites/multimds/verify/overrides/fuse-default-perm-no.yaml create mode 120000 qa/suites/multimds/verify/overrides/verify create mode 120000 qa/suites/multimds/verify/tasks create mode 120000 qa/suites/multimds/verify/validater create mode 100644 qa/suites/perf-basic/% create mode 120000 qa/suites/perf-basic/.qa create mode 100644 qa/suites/perf-basic/ceph.yaml create mode 120000 qa/suites/perf-basic/objectstore/.qa create mode 100644 qa/suites/perf-basic/objectstore/bluestore.yaml create mode 100644 qa/suites/perf-basic/objectstore/filestore-xfs.yaml create mode 120000 qa/suites/perf-basic/settings/.qa create mode 100644 qa/suites/perf-basic/settings/optimized.yaml create mode 120000 qa/suites/perf-basic/supported-all-distro create mode 120000 qa/suites/perf-basic/workloads/.qa create mode 100644 qa/suites/perf-basic/workloads/cosbench_64K_write.yaml create mode 100644 qa/suites/perf-basic/workloads/fio_4K_rand_write.yaml create mode 100644 qa/suites/perf-basic/workloads/radosbench_4K_write.yaml create mode 120000 qa/suites/powercycle/.qa create mode 100644 qa/suites/powercycle/osd/% create mode 120000 qa/suites/powercycle/osd/.qa create mode 120000 qa/suites/powercycle/osd/clusters/.qa create mode 100644 qa/suites/powercycle/osd/clusters/3osd-1per-target.yaml create mode 120000 qa/suites/powercycle/osd/objectstore create mode 120000 qa/suites/powercycle/osd/powercycle/.qa create mode 100644 qa/suites/powercycle/osd/powercycle/default.yaml create mode 120000 qa/suites/powercycle/osd/supported-all-distro create mode 120000 qa/suites/powercycle/osd/tasks/.qa create mode 100644 qa/suites/powercycle/osd/tasks/admin_socket_objecter_requests.yaml create mode 100644 qa/suites/powercycle/osd/tasks/cfuse_workunit_kernel_untar_build.yaml create mode 100644 qa/suites/powercycle/osd/tasks/cfuse_workunit_misc.yaml create mode 100644 qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_ffsb.yaml create mode 100644 qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_fsstress.yaml create mode 100644 qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_fsx.yaml create mode 100644 qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_fsync.yaml create mode 100644 qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_pjd.yaml create mode 100644 qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_truncate_delay.yaml create mode 100644 qa/suites/powercycle/osd/tasks/rados_api_tests.yaml create mode 100644 qa/suites/powercycle/osd/tasks/radosbench.yaml create mode 100644 qa/suites/powercycle/osd/tasks/readwrite.yaml create mode 100644 qa/suites/powercycle/osd/tasks/snaps-few-objects.yaml create mode 100644 qa/suites/powercycle/osd/tasks/snaps-many-objects.yaml create mode 120000 qa/suites/powercycle/osd/thrashosds-health.yaml create mode 100644 qa/suites/powercycle/osd/whitelist_health.yaml create mode 120000 qa/suites/rados/.qa create mode 100644 qa/suites/rados/basic/% create mode 120000 qa/suites/rados/basic/.qa create mode 100644 qa/suites/rados/basic/ceph.yaml create mode 100644 qa/suites/rados/basic/clusters/+ create mode 120000 qa/suites/rados/basic/clusters/.qa create mode 120000 qa/suites/rados/basic/clusters/fixed-2.yaml create mode 100644 qa/suites/rados/basic/clusters/openstack.yaml create mode 120000 qa/suites/rados/basic/msgr create mode 120000 qa/suites/rados/basic/msgr-failures/.qa create mode 100644 qa/suites/rados/basic/msgr-failures/few.yaml create mode 100644 qa/suites/rados/basic/msgr-failures/many.yaml create mode 120000 qa/suites/rados/basic/objectstore create mode 120000 qa/suites/rados/basic/rados.yaml create mode 120000 qa/suites/rados/basic/supported-random-distro$ create mode 120000 qa/suites/rados/basic/tasks/.qa create mode 100644 qa/suites/rados/basic/tasks/rados_api_tests.yaml create mode 100644 qa/suites/rados/basic/tasks/rados_cls_all.yaml create mode 100644 qa/suites/rados/basic/tasks/rados_python.yaml create mode 100644 qa/suites/rados/basic/tasks/rados_stress_watch.yaml create mode 100644 qa/suites/rados/basic/tasks/rados_striper.yaml create mode 100644 qa/suites/rados/basic/tasks/rados_workunit_loadgen_big.yaml create mode 100644 qa/suites/rados/basic/tasks/rados_workunit_loadgen_mix.yaml create mode 100644 qa/suites/rados/basic/tasks/rados_workunit_loadgen_mostlyread.yaml create mode 100644 qa/suites/rados/basic/tasks/readwrite.yaml create mode 100644 qa/suites/rados/basic/tasks/repair_test.yaml create mode 100644 qa/suites/rados/basic/tasks/rgw_snaps.yaml create mode 100644 qa/suites/rados/basic/tasks/scrub_test.yaml create mode 100644 qa/suites/rados/dashboard/% create mode 120000 qa/suites/rados/dashboard/.qa create mode 100644 qa/suites/rados/dashboard/clusters/+ create mode 120000 qa/suites/rados/dashboard/clusters/.qa create mode 120000 qa/suites/rados/dashboard/clusters/2-node-mgr.yaml create mode 120000 qa/suites/rados/dashboard/debug/.qa create mode 120000 qa/suites/rados/dashboard/debug/mgr.yaml create mode 120000 qa/suites/rados/dashboard/objectstore create mode 120000 qa/suites/rados/dashboard/supported-random-distro$ create mode 120000 qa/suites/rados/dashboard/tasks/.qa create mode 100644 qa/suites/rados/dashboard/tasks/dashboard.yaml create mode 100644 qa/suites/rados/mgr/% create mode 120000 qa/suites/rados/mgr/.qa create mode 100644 qa/suites/rados/mgr/clusters/+ create mode 120000 qa/suites/rados/mgr/clusters/.qa create mode 120000 qa/suites/rados/mgr/clusters/2-node-mgr.yaml create mode 120000 qa/suites/rados/mgr/debug/.qa create mode 120000 qa/suites/rados/mgr/debug/mgr.yaml create mode 120000 qa/suites/rados/mgr/objectstore create mode 120000 qa/suites/rados/mgr/supported-random-distro$ create mode 120000 qa/suites/rados/mgr/tasks/.qa create mode 100644 qa/suites/rados/mgr/tasks/crash.yaml create mode 100644 qa/suites/rados/mgr/tasks/failover.yaml create mode 100644 qa/suites/rados/mgr/tasks/insights.yaml create mode 100644 qa/suites/rados/mgr/tasks/module_selftest.yaml create mode 100644 qa/suites/rados/mgr/tasks/orchestrator_cli.yaml create mode 100644 qa/suites/rados/mgr/tasks/progress.yaml create mode 100644 qa/suites/rados/mgr/tasks/prometheus.yaml create mode 100644 qa/suites/rados/mgr/tasks/ssh_orchestrator.yaml create mode 100644 qa/suites/rados/mgr/tasks/workunits.yaml create mode 100644 qa/suites/rados/monthrash/% create mode 120000 qa/suites/rados/monthrash/.qa create mode 100644 qa/suites/rados/monthrash/ceph.yaml create mode 120000 qa/suites/rados/monthrash/clusters/.qa create mode 100644 qa/suites/rados/monthrash/clusters/3-mons.yaml create mode 100644 qa/suites/rados/monthrash/clusters/9-mons.yaml create mode 120000 qa/suites/rados/monthrash/msgr create mode 120000 qa/suites/rados/monthrash/msgr-failures/.qa create mode 100644 qa/suites/rados/monthrash/msgr-failures/few.yaml create mode 100644 qa/suites/rados/monthrash/msgr-failures/mon-delay.yaml create mode 120000 qa/suites/rados/monthrash/objectstore create mode 120000 qa/suites/rados/monthrash/rados.yaml create mode 120000 qa/suites/rados/monthrash/supported-random-distro$ create mode 120000 qa/suites/rados/monthrash/thrashers/.qa create mode 100644 qa/suites/rados/monthrash/thrashers/force-sync-many.yaml create mode 100644 qa/suites/rados/monthrash/thrashers/many.yaml create mode 100644 qa/suites/rados/monthrash/thrashers/one.yaml create mode 100644 qa/suites/rados/monthrash/thrashers/sync-many.yaml create mode 100644 qa/suites/rados/monthrash/thrashers/sync.yaml create mode 120000 qa/suites/rados/monthrash/workloads/.qa create mode 100644 qa/suites/rados/monthrash/workloads/pool-create-delete.yaml create mode 100644 qa/suites/rados/monthrash/workloads/rados_5925.yaml create mode 100644 qa/suites/rados/monthrash/workloads/rados_api_tests.yaml create mode 100644 qa/suites/rados/monthrash/workloads/rados_mon_osdmap_prune.yaml create mode 100644 qa/suites/rados/monthrash/workloads/rados_mon_workunits.yaml create mode 100644 qa/suites/rados/monthrash/workloads/snaps-few-objects.yaml create mode 100644 qa/suites/rados/multimon/% create mode 120000 qa/suites/rados/multimon/.qa create mode 120000 qa/suites/rados/multimon/clusters/.qa create mode 100644 qa/suites/rados/multimon/clusters/21.yaml create mode 100644 qa/suites/rados/multimon/clusters/3.yaml create mode 100644 qa/suites/rados/multimon/clusters/6.yaml create mode 100644 qa/suites/rados/multimon/clusters/9.yaml create mode 120000 qa/suites/rados/multimon/msgr create mode 120000 qa/suites/rados/multimon/msgr-failures/.qa create mode 100644 qa/suites/rados/multimon/msgr-failures/few.yaml create mode 100644 qa/suites/rados/multimon/msgr-failures/many.yaml create mode 100644 qa/suites/rados/multimon/no_pools.yaml create mode 120000 qa/suites/rados/multimon/objectstore create mode 120000 qa/suites/rados/multimon/rados.yaml create mode 120000 qa/suites/rados/multimon/supported-random-distro$ create mode 120000 qa/suites/rados/multimon/tasks/.qa create mode 100644 qa/suites/rados/multimon/tasks/mon_clock_no_skews.yaml create mode 100644 qa/suites/rados/multimon/tasks/mon_clock_with_skews.yaml create mode 100644 qa/suites/rados/multimon/tasks/mon_recovery.yaml create mode 100644 qa/suites/rados/objectstore/% create mode 120000 qa/suites/rados/objectstore/.qa create mode 120000 qa/suites/rados/objectstore/backends/.qa create mode 100644 qa/suites/rados/objectstore/backends/alloc-hint.yaml create mode 100644 qa/suites/rados/objectstore/backends/ceph_objectstore_tool.yaml create mode 100644 qa/suites/rados/objectstore/backends/filejournal.yaml create mode 100644 qa/suites/rados/objectstore/backends/filestore-idempotent-aio-journal.yaml create mode 100644 qa/suites/rados/objectstore/backends/filestore-idempotent.yaml create mode 100644 qa/suites/rados/objectstore/backends/fusestore.yaml create mode 100644 qa/suites/rados/objectstore/backends/keyvaluedb.yaml create mode 100644 qa/suites/rados/objectstore/backends/objectcacher-stress.yaml create mode 100644 qa/suites/rados/objectstore/backends/objectstore.yaml create mode 120000 qa/suites/rados/objectstore/supported-random-distro$ create mode 100644 qa/suites/rados/perf/% create mode 120000 qa/suites/rados/perf/.qa create mode 100644 qa/suites/rados/perf/ceph.yaml create mode 120000 qa/suites/rados/perf/distros/ubuntu_16.04.yaml create mode 120000 qa/suites/rados/perf/distros/ubuntu_latest.yaml create mode 120000 qa/suites/rados/perf/objectstore create mode 100644 qa/suites/rados/perf/openstack.yaml create mode 120000 qa/suites/rados/perf/settings/.qa create mode 100644 qa/suites/rados/perf/settings/optimized.yaml create mode 120000 qa/suites/rados/perf/workloads/.qa create mode 100644 qa/suites/rados/perf/workloads/cosbench_64K_read_write.yaml create mode 100644 qa/suites/rados/perf/workloads/cosbench_64K_write.yaml create mode 100644 qa/suites/rados/perf/workloads/fio_4K_rand_read.yaml create mode 100644 qa/suites/rados/perf/workloads/fio_4K_rand_rw.yaml create mode 100644 qa/suites/rados/perf/workloads/fio_4M_rand_read.yaml create mode 100644 qa/suites/rados/perf/workloads/fio_4M_rand_rw.yaml create mode 100644 qa/suites/rados/perf/workloads/fio_4M_rand_write.yaml create mode 100644 qa/suites/rados/perf/workloads/radosbench_4K_rand_read.yaml create mode 100644 qa/suites/rados/perf/workloads/radosbench_4K_seq_read.yaml create mode 100644 qa/suites/rados/perf/workloads/radosbench_4M_rand_read.yaml create mode 100644 qa/suites/rados/perf/workloads/radosbench_4M_seq_read.yaml create mode 100644 qa/suites/rados/perf/workloads/radosbench_4M_write.yaml create mode 100644 qa/suites/rados/perf/workloads/sample_fio.yaml create mode 100644 qa/suites/rados/perf/workloads/sample_radosbench.yaml create mode 100644 qa/suites/rados/rest/% create mode 120000 qa/suites/rados/rest/.qa create mode 100644 qa/suites/rados/rest/mgr-restful.yaml create mode 120000 qa/suites/rados/rest/supported-random-distro$ create mode 100644 qa/suites/rados/singleton-bluestore/% create mode 120000 qa/suites/rados/singleton-bluestore/.qa create mode 120000 qa/suites/rados/singleton-bluestore/all/.qa create mode 100644 qa/suites/rados/singleton-bluestore/all/cephtool.yaml create mode 120000 qa/suites/rados/singleton-bluestore/msgr create mode 120000 qa/suites/rados/singleton-bluestore/msgr-failures/.qa create mode 100644 qa/suites/rados/singleton-bluestore/msgr-failures/few.yaml create mode 100644 qa/suites/rados/singleton-bluestore/msgr-failures/many.yaml create mode 120000 qa/suites/rados/singleton-bluestore/objectstore/.qa create mode 120000 qa/suites/rados/singleton-bluestore/objectstore/bluestore-bitmap.yaml create mode 120000 qa/suites/rados/singleton-bluestore/objectstore/bluestore-comp-lz4.yaml create mode 120000 qa/suites/rados/singleton-bluestore/objectstore/bluestore-comp-snappy.yaml create mode 120000 qa/suites/rados/singleton-bluestore/rados.yaml create mode 120000 qa/suites/rados/singleton-bluestore/supported-random-distro$ create mode 120000 qa/suites/rados/singleton-flat/.qa create mode 100644 qa/suites/rados/singleton-flat/valgrind-leaks.yaml create mode 100644 qa/suites/rados/singleton-nomsgr/% create mode 120000 qa/suites/rados/singleton-nomsgr/.qa create mode 120000 qa/suites/rados/singleton-nomsgr/all/.qa create mode 100644 qa/suites/rados/singleton-nomsgr/all/admin_socket_output.yaml create mode 100644 qa/suites/rados/singleton-nomsgr/all/balancer.yaml create mode 100644 qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml create mode 100644 qa/suites/rados/singleton-nomsgr/all/ceph-kvstore-tool.yaml create mode 100644 qa/suites/rados/singleton-nomsgr/all/ceph-post-file.yaml create mode 100644 qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml create mode 100644 qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml create mode 100644 qa/suites/rados/singleton-nomsgr/all/health-warnings.yaml create mode 100644 qa/suites/rados/singleton-nomsgr/all/large-omap-object-warnings.yaml create mode 100644 qa/suites/rados/singleton-nomsgr/all/lazy_omap_stats_output.yaml create mode 100644 qa/suites/rados/singleton-nomsgr/all/librados_hello_world.yaml create mode 100644 qa/suites/rados/singleton-nomsgr/all/msgr.yaml create mode 100644 qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml create mode 100644 qa/suites/rados/singleton-nomsgr/all/pool-access.yaml create mode 100644 qa/suites/rados/singleton-nomsgr/all/recovery-unfound-found.yaml create mode 120000 qa/suites/rados/singleton-nomsgr/rados.yaml create mode 120000 qa/suites/rados/singleton-nomsgr/supported-random-distro$ create mode 100644 qa/suites/rados/singleton/% create mode 120000 qa/suites/rados/singleton/.qa create mode 120000 qa/suites/rados/singleton/all/.qa create mode 100644 qa/suites/rados/singleton/all/admin-socket.yaml create mode 100644 qa/suites/rados/singleton/all/deduptool.yaml create mode 100644 qa/suites/rados/singleton/all/divergent_priors.yaml create mode 100644 qa/suites/rados/singleton/all/divergent_priors2.yaml create mode 100644 qa/suites/rados/singleton/all/dump-stuck.yaml create mode 100644 qa/suites/rados/singleton/all/ec-lost-unfound.yaml create mode 100644 qa/suites/rados/singleton/all/erasure-code-nonregression.yaml create mode 100644 qa/suites/rados/singleton/all/lost-unfound-delete.yaml create mode 100644 qa/suites/rados/singleton/all/lost-unfound.yaml create mode 100644 qa/suites/rados/singleton/all/max-pg-per-osd.from-mon.yaml create mode 100644 qa/suites/rados/singleton/all/max-pg-per-osd.from-primary.yaml create mode 100644 qa/suites/rados/singleton/all/max-pg-per-osd.from-replica.yaml create mode 100644 qa/suites/rados/singleton/all/mon-auth-caps.yaml create mode 100644 qa/suites/rados/singleton/all/mon-config-key-caps.yaml create mode 100644 qa/suites/rados/singleton/all/mon-config-keys.yaml create mode 100644 qa/suites/rados/singleton/all/mon-config.yaml create mode 100644 qa/suites/rados/singleton/all/mon-memory-target-compliance.yaml.disabled create mode 100644 qa/suites/rados/singleton/all/osd-backfill.yaml create mode 100644 qa/suites/rados/singleton/all/osd-recovery-incomplete.yaml create mode 100644 qa/suites/rados/singleton/all/osd-recovery.yaml create mode 100644 qa/suites/rados/singleton/all/peer.yaml create mode 100644 qa/suites/rados/singleton/all/pg-autoscaler-progress-off.yaml create mode 100644 qa/suites/rados/singleton/all/pg-autoscaler.yaml create mode 100644 qa/suites/rados/singleton/all/pg-removal-interruption.yaml create mode 100644 qa/suites/rados/singleton/all/radostool.yaml create mode 100644 qa/suites/rados/singleton/all/random-eio.yaml create mode 100644 qa/suites/rados/singleton/all/rebuild-mondb.yaml create mode 100644 qa/suites/rados/singleton/all/recovery-preemption.yaml create mode 100644 qa/suites/rados/singleton/all/resolve_stuck_peering.yaml create mode 100644 qa/suites/rados/singleton/all/test-crash.yaml create mode 100644 qa/suites/rados/singleton/all/test_envlibrados_for_rocksdb.yaml create mode 100644 qa/suites/rados/singleton/all/thrash-backfill-full.yaml create mode 100644 qa/suites/rados/singleton/all/thrash-eio.yaml create mode 100644 qa/suites/rados/singleton/all/thrash-rados/+ create mode 120000 qa/suites/rados/singleton/all/thrash-rados/.qa create mode 100644 qa/suites/rados/singleton/all/thrash-rados/thrash-rados.yaml create mode 120000 qa/suites/rados/singleton/all/thrash-rados/thrashosds-health.yaml create mode 100644 qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml create mode 100644 qa/suites/rados/singleton/all/watch-notify-same-primary.yaml create mode 120000 qa/suites/rados/singleton/msgr create mode 120000 qa/suites/rados/singleton/msgr-failures/.qa create mode 100644 qa/suites/rados/singleton/msgr-failures/few.yaml create mode 100644 qa/suites/rados/singleton/msgr-failures/many.yaml create mode 120000 qa/suites/rados/singleton/objectstore create mode 120000 qa/suites/rados/singleton/rados.yaml create mode 120000 qa/suites/rados/singleton/supported-random-distro$ create mode 100644 qa/suites/rados/standalone/% create mode 120000 qa/suites/rados/standalone/.qa create mode 120000 qa/suites/rados/standalone/supported-random-distro$ create mode 120000 qa/suites/rados/standalone/workloads/.qa create mode 100644 qa/suites/rados/standalone/workloads/crush.yaml create mode 100644 qa/suites/rados/standalone/workloads/erasure-code.yaml create mode 100644 qa/suites/rados/standalone/workloads/mgr.yaml create mode 100644 qa/suites/rados/standalone/workloads/misc.yaml create mode 100644 qa/suites/rados/standalone/workloads/mon.yaml create mode 100644 qa/suites/rados/standalone/workloads/osd.yaml create mode 100644 qa/suites/rados/standalone/workloads/scrub.yaml create mode 100644 qa/suites/rados/thrash-erasure-code-big/% create mode 120000 qa/suites/rados/thrash-erasure-code-big/.qa create mode 120000 qa/suites/rados/thrash-erasure-code-big/ceph.yaml create mode 100644 qa/suites/rados/thrash-erasure-code-big/cluster/+ create mode 120000 qa/suites/rados/thrash-erasure-code-big/cluster/.qa create mode 100644 qa/suites/rados/thrash-erasure-code-big/cluster/12-osds.yaml create mode 100644 qa/suites/rados/thrash-erasure-code-big/cluster/openstack.yaml create mode 120000 qa/suites/rados/thrash-erasure-code-big/msgr-failures create mode 120000 qa/suites/rados/thrash-erasure-code-big/objectstore create mode 120000 qa/suites/rados/thrash-erasure-code-big/rados.yaml create mode 120000 qa/suites/rados/thrash-erasure-code-big/recovery-overrides create mode 120000 qa/suites/rados/thrash-erasure-code-big/supported-random-distro$ create mode 120000 qa/suites/rados/thrash-erasure-code-big/thrashers/.qa create mode 100644 qa/suites/rados/thrash-erasure-code-big/thrashers/careful.yaml create mode 100644 qa/suites/rados/thrash-erasure-code-big/thrashers/default.yaml create mode 100644 qa/suites/rados/thrash-erasure-code-big/thrashers/fastread.yaml create mode 100644 qa/suites/rados/thrash-erasure-code-big/thrashers/mapgap.yaml create mode 100644 qa/suites/rados/thrash-erasure-code-big/thrashers/morepggrow.yaml create mode 100644 qa/suites/rados/thrash-erasure-code-big/thrashers/pggrow.yaml create mode 120000 qa/suites/rados/thrash-erasure-code-big/thrashosds-health.yaml create mode 120000 qa/suites/rados/thrash-erasure-code-big/workloads/.qa create mode 120000 qa/suites/rados/thrash-erasure-code-big/workloads/ec-rados-plugin=jerasure-k=4-m=2.yaml create mode 120000 qa/suites/rados/thrash-erasure-code-big/workloads/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml create mode 100644 qa/suites/rados/thrash-erasure-code-isa/% create mode 120000 qa/suites/rados/thrash-erasure-code-isa/.qa create mode 120000 qa/suites/rados/thrash-erasure-code-isa/arch/.qa create mode 100644 qa/suites/rados/thrash-erasure-code-isa/arch/x86_64.yaml create mode 120000 qa/suites/rados/thrash-erasure-code-isa/ceph.yaml create mode 120000 qa/suites/rados/thrash-erasure-code-isa/clusters create mode 120000 qa/suites/rados/thrash-erasure-code-isa/msgr-failures create mode 120000 qa/suites/rados/thrash-erasure-code-isa/objectstore create mode 120000 qa/suites/rados/thrash-erasure-code-isa/rados.yaml create mode 120000 qa/suites/rados/thrash-erasure-code-isa/recovery-overrides create mode 120000 qa/suites/rados/thrash-erasure-code-isa/supported-random-distro$ create mode 120000 qa/suites/rados/thrash-erasure-code-isa/thrashers create mode 120000 qa/suites/rados/thrash-erasure-code-isa/thrashosds-health.yaml create mode 120000 qa/suites/rados/thrash-erasure-code-isa/workloads/.qa create mode 120000 qa/suites/rados/thrash-erasure-code-isa/workloads/ec-rados-plugin=isa-k=2-m=1.yaml create mode 100644 qa/suites/rados/thrash-erasure-code-overwrites/% create mode 120000 qa/suites/rados/thrash-erasure-code-overwrites/.qa create mode 120000 qa/suites/rados/thrash-erasure-code-overwrites/bluestore-bitmap.yaml create mode 120000 qa/suites/rados/thrash-erasure-code-overwrites/ceph.yaml create mode 120000 qa/suites/rados/thrash-erasure-code-overwrites/clusters create mode 120000 qa/suites/rados/thrash-erasure-code-overwrites/fast create mode 120000 qa/suites/rados/thrash-erasure-code-overwrites/msgr-failures create mode 120000 qa/suites/rados/thrash-erasure-code-overwrites/rados.yaml create mode 120000 qa/suites/rados/thrash-erasure-code-overwrites/recovery-overrides create mode 120000 qa/suites/rados/thrash-erasure-code-overwrites/supported-random-distro$ create mode 120000 qa/suites/rados/thrash-erasure-code-overwrites/thrashers create mode 120000 qa/suites/rados/thrash-erasure-code-overwrites/thrashosds-health.yaml create mode 120000 qa/suites/rados/thrash-erasure-code-overwrites/workloads/.qa create mode 100644 qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-pool-snaps-few-objects-overwrites.yaml create mode 100644 qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-small-objects-fast-read-overwrites.yaml create mode 100644 qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-small-objects-overwrites.yaml create mode 100644 qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-snaps-few-objects-overwrites.yaml create mode 100644 qa/suites/rados/thrash-erasure-code-shec/% create mode 120000 qa/suites/rados/thrash-erasure-code-shec/.qa create mode 120000 qa/suites/rados/thrash-erasure-code-shec/ceph.yaml create mode 100644 qa/suites/rados/thrash-erasure-code-shec/clusters/+ create mode 120000 qa/suites/rados/thrash-erasure-code-shec/clusters/.qa create mode 120000 qa/suites/rados/thrash-erasure-code-shec/clusters/fixed-4.yaml create mode 100644 qa/suites/rados/thrash-erasure-code-shec/clusters/openstack.yaml create mode 120000 qa/suites/rados/thrash-erasure-code-shec/msgr-failures create mode 120000 qa/suites/rados/thrash-erasure-code-shec/objectstore create mode 120000 qa/suites/rados/thrash-erasure-code-shec/rados.yaml create mode 120000 qa/suites/rados/thrash-erasure-code-shec/recovery-overrides create mode 120000 qa/suites/rados/thrash-erasure-code-shec/supported-random-distro$ create mode 120000 qa/suites/rados/thrash-erasure-code-shec/thrashers/.qa create mode 100644 qa/suites/rados/thrash-erasure-code-shec/thrashers/careful.yaml create mode 100644 qa/suites/rados/thrash-erasure-code-shec/thrashers/default.yaml create mode 120000 qa/suites/rados/thrash-erasure-code-shec/thrashosds-health.yaml create mode 120000 qa/suites/rados/thrash-erasure-code-shec/workloads/.qa create mode 120000 qa/suites/rados/thrash-erasure-code-shec/workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml create mode 100644 qa/suites/rados/thrash-erasure-code/% create mode 120000 qa/suites/rados/thrash-erasure-code/.qa create mode 100644 qa/suites/rados/thrash-erasure-code/ceph.yaml create mode 120000 qa/suites/rados/thrash-erasure-code/clusters create mode 120000 qa/suites/rados/thrash-erasure-code/fast/.qa create mode 100644 qa/suites/rados/thrash-erasure-code/fast/fast.yaml create mode 100644 qa/suites/rados/thrash-erasure-code/fast/normal.yaml create mode 120000 qa/suites/rados/thrash-erasure-code/msgr-failures create mode 120000 qa/suites/rados/thrash-erasure-code/objectstore create mode 120000 qa/suites/rados/thrash-erasure-code/rados.yaml create mode 120000 qa/suites/rados/thrash-erasure-code/recovery-overrides create mode 120000 qa/suites/rados/thrash-erasure-code/supported-random-distro$ create mode 120000 qa/suites/rados/thrash-erasure-code/thrashers/.qa create mode 100644 qa/suites/rados/thrash-erasure-code/thrashers/careful.yaml create mode 100644 qa/suites/rados/thrash-erasure-code/thrashers/default.yaml create mode 100644 qa/suites/rados/thrash-erasure-code/thrashers/fastread.yaml create mode 100644 qa/suites/rados/thrash-erasure-code/thrashers/morepggrow.yaml create mode 100644 qa/suites/rados/thrash-erasure-code/thrashers/pggrow.yaml create mode 120000 qa/suites/rados/thrash-erasure-code/thrashosds-health.yaml create mode 120000 qa/suites/rados/thrash-erasure-code/workloads/.qa create mode 120000 qa/suites/rados/thrash-erasure-code/workloads/ec-rados-plugin=clay-k=4-m=2.yaml create mode 120000 qa/suites/rados/thrash-erasure-code/workloads/ec-rados-plugin=jerasure-k=2-m=1.yaml create mode 120000 qa/suites/rados/thrash-erasure-code/workloads/ec-rados-plugin=jerasure-k=3-m=1.yaml create mode 100644 qa/suites/rados/thrash-erasure-code/workloads/ec-radosbench.yaml create mode 100644 qa/suites/rados/thrash-erasure-code/workloads/ec-small-objects-fast-read.yaml create mode 100644 qa/suites/rados/thrash-erasure-code/workloads/ec-small-objects-many-deletes.yaml create mode 100644 qa/suites/rados/thrash-erasure-code/workloads/ec-small-objects.yaml create mode 100644 qa/suites/rados/thrash-old-clients/% create mode 120000 qa/suites/rados/thrash-old-clients/.qa create mode 120000 qa/suites/rados/thrash-old-clients/0-size-min-size-overrides/.qa create mode 120000 qa/suites/rados/thrash-old-clients/0-size-min-size-overrides/2-size-2-min-size.yaml create mode 120000 qa/suites/rados/thrash-old-clients/0-size-min-size-overrides/3-size-2-min-size.yaml create mode 120000 qa/suites/rados/thrash-old-clients/1-install/.qa create mode 100644 qa/suites/rados/thrash-old-clients/1-install/hammer.yaml create mode 100644 qa/suites/rados/thrash-old-clients/1-install/jewel.yaml create mode 100644 qa/suites/rados/thrash-old-clients/1-install/luminous.yaml create mode 120000 qa/suites/rados/thrash-old-clients/backoff/.qa create mode 100644 qa/suites/rados/thrash-old-clients/backoff/normal.yaml create mode 100644 qa/suites/rados/thrash-old-clients/backoff/peering.yaml create mode 100644 qa/suites/rados/thrash-old-clients/backoff/peering_and_degraded.yaml create mode 100644 qa/suites/rados/thrash-old-clients/ceph.yaml create mode 100644 qa/suites/rados/thrash-old-clients/clusters/+ create mode 120000 qa/suites/rados/thrash-old-clients/clusters/.qa create mode 100644 qa/suites/rados/thrash-old-clients/clusters/openstack.yaml create mode 100644 qa/suites/rados/thrash-old-clients/clusters/three-plus-one.yaml create mode 120000 qa/suites/rados/thrash-old-clients/d-balancer/.qa create mode 100644 qa/suites/rados/thrash-old-clients/d-balancer/crush-compat.yaml create mode 100644 qa/suites/rados/thrash-old-clients/d-balancer/off.yaml create mode 120000 qa/suites/rados/thrash-old-clients/distro$/.qa create mode 120000 qa/suites/rados/thrash-old-clients/distro$/centos_latest.yaml create mode 120000 qa/suites/rados/thrash-old-clients/distro$/ubuntu_16.04.yaml create mode 120000 qa/suites/rados/thrash-old-clients/msgr-failures/.qa create mode 100644 qa/suites/rados/thrash-old-clients/msgr-failures/fastclose.yaml create mode 100644 qa/suites/rados/thrash-old-clients/msgr-failures/few.yaml create mode 100644 qa/suites/rados/thrash-old-clients/msgr-failures/osd-delay.yaml create mode 120000 qa/suites/rados/thrash-old-clients/msgr/.qa create mode 120000 qa/suites/rados/thrash-old-clients/msgr/async-v1only.yaml create mode 120000 qa/suites/rados/thrash-old-clients/msgr/async.yaml create mode 120000 qa/suites/rados/thrash-old-clients/msgr/random.yaml create mode 120000 qa/suites/rados/thrash-old-clients/msgr/simple.yaml create mode 120000 qa/suites/rados/thrash-old-clients/rados.yaml create mode 120000 qa/suites/rados/thrash-old-clients/thrashers/.qa create mode 100644 qa/suites/rados/thrash-old-clients/thrashers/careful.yaml create mode 100644 qa/suites/rados/thrash-old-clients/thrashers/default.yaml create mode 100644 qa/suites/rados/thrash-old-clients/thrashers/mapgap.yaml create mode 100644 qa/suites/rados/thrash-old-clients/thrashers/morepggrow.yaml create mode 100644 qa/suites/rados/thrash-old-clients/thrashers/none.yaml create mode 100644 qa/suites/rados/thrash-old-clients/thrashers/pggrow.yaml create mode 120000 qa/suites/rados/thrash-old-clients/thrashosds-health.yaml create mode 120000 qa/suites/rados/thrash-old-clients/workloads/.qa create mode 100644 qa/suites/rados/thrash-old-clients/workloads/cache-snaps.yaml create mode 100644 qa/suites/rados/thrash-old-clients/workloads/radosbench.yaml create mode 100644 qa/suites/rados/thrash-old-clients/workloads/rbd_cls.yaml create mode 100644 qa/suites/rados/thrash-old-clients/workloads/snaps-few-objects.yaml create mode 100644 qa/suites/rados/thrash-old-clients/workloads/test_rbd_api.yaml create mode 100644 qa/suites/rados/thrash/% create mode 120000 qa/suites/rados/thrash/.qa create mode 120000 qa/suites/rados/thrash/0-size-min-size-overrides/.qa create mode 120000 qa/suites/rados/thrash/0-size-min-size-overrides/2-size-2-min-size.yaml create mode 120000 qa/suites/rados/thrash/0-size-min-size-overrides/3-size-2-min-size.yaml create mode 120000 qa/suites/rados/thrash/1-pg-log-overrides/.qa create mode 100644 qa/suites/rados/thrash/1-pg-log-overrides/normal_pg_log.yaml create mode 120000 qa/suites/rados/thrash/1-pg-log-overrides/short_pg_log.yaml create mode 100644 qa/suites/rados/thrash/2-recovery-overrides/$ create mode 120000 qa/suites/rados/thrash/2-recovery-overrides/.qa create mode 100644 qa/suites/rados/thrash/2-recovery-overrides/default.yaml create mode 120000 qa/suites/rados/thrash/2-recovery-overrides/more-active-recovery.yaml create mode 120000 qa/suites/rados/thrash/backoff/.qa create mode 100644 qa/suites/rados/thrash/backoff/normal.yaml create mode 100644 qa/suites/rados/thrash/backoff/peering.yaml create mode 100644 qa/suites/rados/thrash/backoff/peering_and_degraded.yaml create mode 100644 qa/suites/rados/thrash/ceph.yaml create mode 100644 qa/suites/rados/thrash/clusters/+ create mode 120000 qa/suites/rados/thrash/clusters/.qa create mode 120000 qa/suites/rados/thrash/clusters/fixed-2.yaml create mode 100644 qa/suites/rados/thrash/clusters/openstack.yaml create mode 100644 qa/suites/rados/thrash/crc-failures/bad_map_crc_failure.yaml create mode 100644 qa/suites/rados/thrash/crc-failures/default.yaml create mode 120000 qa/suites/rados/thrash/d-balancer/.qa create mode 100644 qa/suites/rados/thrash/d-balancer/crush-compat.yaml create mode 100644 qa/suites/rados/thrash/d-balancer/off.yaml create mode 100644 qa/suites/rados/thrash/d-balancer/upmap.yaml create mode 120000 qa/suites/rados/thrash/msgr create mode 120000 qa/suites/rados/thrash/msgr-failures/.qa create mode 100644 qa/suites/rados/thrash/msgr-failures/fastclose.yaml create mode 100644 qa/suites/rados/thrash/msgr-failures/few.yaml create mode 100644 qa/suites/rados/thrash/msgr-failures/osd-delay.yaml create mode 120000 qa/suites/rados/thrash/objectstore create mode 120000 qa/suites/rados/thrash/rados.yaml create mode 120000 qa/suites/rados/thrash/supported-random-distro$ create mode 120000 qa/suites/rados/thrash/thrashers/.qa create mode 100644 qa/suites/rados/thrash/thrashers/careful.yaml create mode 100644 qa/suites/rados/thrash/thrashers/default.yaml create mode 100644 qa/suites/rados/thrash/thrashers/mapgap.yaml create mode 100644 qa/suites/rados/thrash/thrashers/morepggrow.yaml create mode 100644 qa/suites/rados/thrash/thrashers/none.yaml create mode 100644 qa/suites/rados/thrash/thrashers/pggrow.yaml create mode 120000 qa/suites/rados/thrash/thrashosds-health.yaml create mode 120000 qa/suites/rados/thrash/workloads/.qa create mode 100644 qa/suites/rados/thrash/workloads/admin_socket_objecter_requests.yaml create mode 100644 qa/suites/rados/thrash/workloads/cache-agent-big.yaml create mode 100644 qa/suites/rados/thrash/workloads/cache-agent-small.yaml create mode 100644 qa/suites/rados/thrash/workloads/cache-pool-snaps-readproxy.yaml create mode 100644 qa/suites/rados/thrash/workloads/cache-pool-snaps.yaml create mode 100644 qa/suites/rados/thrash/workloads/cache-snaps.yaml create mode 100644 qa/suites/rados/thrash/workloads/cache.yaml create mode 100644 qa/suites/rados/thrash/workloads/pool-snaps-few-objects.yaml create mode 100644 qa/suites/rados/thrash/workloads/rados_api_tests.yaml create mode 100644 qa/suites/rados/thrash/workloads/radosbench-high-concurrency.yaml create mode 100644 qa/suites/rados/thrash/workloads/radosbench.yaml create mode 100644 qa/suites/rados/thrash/workloads/redirect.yaml create mode 100644 qa/suites/rados/thrash/workloads/redirect_promote_tests.yaml create mode 100644 qa/suites/rados/thrash/workloads/redirect_set_object.yaml create mode 100644 qa/suites/rados/thrash/workloads/set-chunks-read.yaml create mode 100644 qa/suites/rados/thrash/workloads/small-objects.yaml create mode 100644 qa/suites/rados/thrash/workloads/snaps-few-objects.yaml create mode 100644 qa/suites/rados/thrash/workloads/write_fadvise_dontneed.yaml create mode 120000 qa/suites/rados/upgrade/.qa create mode 120000 qa/suites/rados/upgrade/mimic-x-singleton create mode 100644 qa/suites/rados/verify/% create mode 120000 qa/suites/rados/verify/.qa create mode 100644 qa/suites/rados/verify/ceph.yaml create mode 100644 qa/suites/rados/verify/clusters/+ create mode 120000 qa/suites/rados/verify/clusters/.qa create mode 120000 qa/suites/rados/verify/clusters/fixed-2.yaml create mode 100644 qa/suites/rados/verify/clusters/openstack.yaml create mode 120000 qa/suites/rados/verify/d-thrash/.qa create mode 100644 qa/suites/rados/verify/d-thrash/default/+ create mode 120000 qa/suites/rados/verify/d-thrash/default/.qa create mode 100644 qa/suites/rados/verify/d-thrash/default/default.yaml create mode 120000 qa/suites/rados/verify/d-thrash/default/thrashosds-health.yaml create mode 100644 qa/suites/rados/verify/d-thrash/none.yaml create mode 120000 qa/suites/rados/verify/msgr create mode 120000 qa/suites/rados/verify/msgr-failures/.qa create mode 100644 qa/suites/rados/verify/msgr-failures/few.yaml create mode 120000 qa/suites/rados/verify/objectstore create mode 120000 qa/suites/rados/verify/rados.yaml create mode 120000 qa/suites/rados/verify/tasks/.qa create mode 100644 qa/suites/rados/verify/tasks/mon_recovery.yaml create mode 100644 qa/suites/rados/verify/tasks/rados_api_tests.yaml create mode 100644 qa/suites/rados/verify/tasks/rados_cls_all.yaml create mode 120000 qa/suites/rados/verify/validater/.qa create mode 100644 qa/suites/rados/verify/validater/lockdep.yaml create mode 100644 qa/suites/rados/verify/validater/valgrind.yaml create mode 120000 qa/suites/rbd/.qa create mode 100644 qa/suites/rbd/basic/% create mode 120000 qa/suites/rbd/basic/.qa create mode 120000 qa/suites/rbd/basic/base/.qa create mode 100644 qa/suites/rbd/basic/base/install.yaml create mode 120000 qa/suites/rbd/basic/cachepool/.qa create mode 100644 qa/suites/rbd/basic/cachepool/none.yaml create mode 100644 qa/suites/rbd/basic/cachepool/small.yaml create mode 100644 qa/suites/rbd/basic/clusters/+ create mode 120000 qa/suites/rbd/basic/clusters/.qa create mode 120000 qa/suites/rbd/basic/clusters/fixed-1.yaml create mode 100644 qa/suites/rbd/basic/clusters/openstack.yaml create mode 120000 qa/suites/rbd/basic/msgr-failures/.qa create mode 100644 qa/suites/rbd/basic/msgr-failures/few.yaml create mode 120000 qa/suites/rbd/basic/objectstore create mode 120000 qa/suites/rbd/basic/supported-random-distro$ create mode 120000 qa/suites/rbd/basic/tasks/.qa create mode 100644 qa/suites/rbd/basic/tasks/rbd_api_tests_old_format.yaml create mode 100644 qa/suites/rbd/basic/tasks/rbd_cls_tests.yaml create mode 100644 qa/suites/rbd/basic/tasks/rbd_lock_and_fence.yaml create mode 100644 qa/suites/rbd/basic/tasks/rbd_python_api_tests_old_format.yaml create mode 100644 qa/suites/rbd/cli/% create mode 120000 qa/suites/rbd/cli/.qa create mode 120000 qa/suites/rbd/cli/base/.qa create mode 100644 qa/suites/rbd/cli/base/install.yaml create mode 120000 qa/suites/rbd/cli/clusters create mode 120000 qa/suites/rbd/cli/features/.qa create mode 100644 qa/suites/rbd/cli/features/defaults.yaml create mode 100644 qa/suites/rbd/cli/features/journaling.yaml create mode 100644 qa/suites/rbd/cli/features/layering.yaml create mode 120000 qa/suites/rbd/cli/msgr-failures/.qa create mode 100644 qa/suites/rbd/cli/msgr-failures/few.yaml create mode 120000 qa/suites/rbd/cli/objectstore create mode 120000 qa/suites/rbd/cli/pool/.qa create mode 100644 qa/suites/rbd/cli/pool/ec-data-pool.yaml create mode 100644 qa/suites/rbd/cli/pool/none.yaml create mode 100644 qa/suites/rbd/cli/pool/replicated-data-pool.yaml create mode 100644 qa/suites/rbd/cli/pool/small-cache-pool.yaml create mode 120000 qa/suites/rbd/cli/supported-random-distro$ create mode 120000 qa/suites/rbd/cli/workloads/.qa create mode 100644 qa/suites/rbd/cli/workloads/rbd_cli_generic.yaml create mode 100644 qa/suites/rbd/cli/workloads/rbd_cli_groups.yaml create mode 100644 qa/suites/rbd/cli/workloads/rbd_cli_import_export.yaml create mode 100644 qa/suites/rbd/cli_v1/% create mode 120000 qa/suites/rbd/cli_v1/.qa create mode 120000 qa/suites/rbd/cli_v1/base/.qa create mode 100644 qa/suites/rbd/cli_v1/base/install.yaml create mode 120000 qa/suites/rbd/cli_v1/clusters create mode 120000 qa/suites/rbd/cli_v1/features/.qa create mode 100644 qa/suites/rbd/cli_v1/features/format-1.yaml create mode 120000 qa/suites/rbd/cli_v1/msgr-failures/.qa create mode 100644 qa/suites/rbd/cli_v1/msgr-failures/few.yaml create mode 120000 qa/suites/rbd/cli_v1/objectstore create mode 120000 qa/suites/rbd/cli_v1/pool/.qa create mode 100644 qa/suites/rbd/cli_v1/pool/none.yaml create mode 100644 qa/suites/rbd/cli_v1/pool/small-cache-pool.yaml create mode 120000 qa/suites/rbd/cli_v1/supported-random-distro$ create mode 120000 qa/suites/rbd/cli_v1/workloads/.qa create mode 100644 qa/suites/rbd/cli_v1/workloads/rbd_cli_generic.yaml create mode 100644 qa/suites/rbd/cli_v1/workloads/rbd_cli_import_export.yaml create mode 100644 qa/suites/rbd/librbd/% create mode 120000 qa/suites/rbd/librbd/.qa create mode 120000 qa/suites/rbd/librbd/cache/.qa create mode 100644 qa/suites/rbd/librbd/cache/none.yaml create mode 100644 qa/suites/rbd/librbd/cache/writeback.yaml create mode 100644 qa/suites/rbd/librbd/cache/writethrough.yaml create mode 100644 qa/suites/rbd/librbd/clusters/+ create mode 120000 qa/suites/rbd/librbd/clusters/.qa create mode 120000 qa/suites/rbd/librbd/clusters/fixed-3.yaml create mode 100644 qa/suites/rbd/librbd/clusters/openstack.yaml create mode 120000 qa/suites/rbd/librbd/config/.qa create mode 100644 qa/suites/rbd/librbd/config/copy-on-read.yaml create mode 100644 qa/suites/rbd/librbd/config/none.yaml create mode 100644 qa/suites/rbd/librbd/config/permit-partial-discard.yaml create mode 120000 qa/suites/rbd/librbd/msgr-failures/.qa create mode 100644 qa/suites/rbd/librbd/msgr-failures/few.yaml create mode 120000 qa/suites/rbd/librbd/objectstore create mode 120000 qa/suites/rbd/librbd/pool/.qa create mode 100644 qa/suites/rbd/librbd/pool/ec-data-pool.yaml create mode 100644 qa/suites/rbd/librbd/pool/none.yaml create mode 100644 qa/suites/rbd/librbd/pool/replicated-data-pool.yaml create mode 100644 qa/suites/rbd/librbd/pool/small-cache-pool.yaml create mode 120000 qa/suites/rbd/librbd/supported-random-distro$ create mode 120000 qa/suites/rbd/librbd/workloads/.qa create mode 100644 qa/suites/rbd/librbd/workloads/c_api_tests.yaml create mode 100644 qa/suites/rbd/librbd/workloads/c_api_tests_with_defaults.yaml create mode 100644 qa/suites/rbd/librbd/workloads/c_api_tests_with_journaling.yaml create mode 100644 qa/suites/rbd/librbd/workloads/fsx.yaml create mode 100644 qa/suites/rbd/librbd/workloads/python_api_tests.yaml create mode 100644 qa/suites/rbd/librbd/workloads/python_api_tests_with_defaults.yaml create mode 100644 qa/suites/rbd/librbd/workloads/python_api_tests_with_journaling.yaml create mode 100644 qa/suites/rbd/librbd/workloads/rbd_fio.yaml create mode 100644 qa/suites/rbd/maintenance/% create mode 120000 qa/suites/rbd/maintenance/.qa create mode 120000 qa/suites/rbd/maintenance/base/.qa create mode 100644 qa/suites/rbd/maintenance/base/install.yaml create mode 100644 qa/suites/rbd/maintenance/clusters/+ create mode 120000 qa/suites/rbd/maintenance/clusters/.qa create mode 120000 qa/suites/rbd/maintenance/clusters/fixed-3.yaml create mode 120000 qa/suites/rbd/maintenance/clusters/openstack.yaml create mode 120000 qa/suites/rbd/maintenance/objectstore create mode 120000 qa/suites/rbd/maintenance/qemu/.qa create mode 100644 qa/suites/rbd/maintenance/qemu/xfstests.yaml create mode 120000 qa/suites/rbd/maintenance/supported-random-distro$ create mode 120000 qa/suites/rbd/maintenance/workloads/.qa create mode 100644 qa/suites/rbd/maintenance/workloads/dynamic_features.yaml create mode 100644 qa/suites/rbd/maintenance/workloads/dynamic_features_no_cache.yaml create mode 100644 qa/suites/rbd/maintenance/workloads/rebuild_object_map.yaml create mode 100644 qa/suites/rbd/mirror-thrash/% create mode 120000 qa/suites/rbd/mirror-thrash/.qa create mode 120000 qa/suites/rbd/mirror-thrash/base/.qa create mode 100644 qa/suites/rbd/mirror-thrash/base/install.yaml create mode 100644 qa/suites/rbd/mirror-thrash/cluster/+ create mode 120000 qa/suites/rbd/mirror-thrash/cluster/.qa create mode 100644 qa/suites/rbd/mirror-thrash/cluster/2-node.yaml create mode 100644 qa/suites/rbd/mirror-thrash/cluster/openstack.yaml create mode 120000 qa/suites/rbd/mirror-thrash/msgr-failures create mode 120000 qa/suites/rbd/mirror-thrash/objectstore create mode 120000 qa/suites/rbd/mirror-thrash/policy/.qa create mode 100644 qa/suites/rbd/mirror-thrash/policy/none.yaml create mode 100644 qa/suites/rbd/mirror-thrash/policy/simple.yaml create mode 120000 qa/suites/rbd/mirror-thrash/rbd-mirror/.qa create mode 100644 qa/suites/rbd/mirror-thrash/rbd-mirror/four-per-cluster.yaml create mode 120000 qa/suites/rbd/mirror-thrash/supported-random-distro$ create mode 120000 qa/suites/rbd/mirror-thrash/users/.qa create mode 100644 qa/suites/rbd/mirror-thrash/users/mirror.yaml create mode 120000 qa/suites/rbd/mirror-thrash/workloads/.qa create mode 100644 qa/suites/rbd/mirror-thrash/workloads/rbd-mirror-fsx-workunit.yaml create mode 100644 qa/suites/rbd/mirror-thrash/workloads/rbd-mirror-stress-workunit.yaml create mode 100644 qa/suites/rbd/mirror-thrash/workloads/rbd-mirror-workunit.yaml create mode 100644 qa/suites/rbd/mirror/% create mode 120000 qa/suites/rbd/mirror/.qa create mode 120000 qa/suites/rbd/mirror/base create mode 120000 qa/suites/rbd/mirror/cluster create mode 120000 qa/suites/rbd/mirror/msgr-failures create mode 120000 qa/suites/rbd/mirror/objectstore create mode 120000 qa/suites/rbd/mirror/supported-random-distro$ create mode 120000 qa/suites/rbd/mirror/users create mode 120000 qa/suites/rbd/mirror/workloads/.qa create mode 100644 qa/suites/rbd/mirror/workloads/rbd-mirror-bootstrap-workunit.yaml create mode 100644 qa/suites/rbd/mirror/workloads/rbd-mirror-ha-workunit.yaml create mode 100644 qa/suites/rbd/mirror/workloads/rbd-mirror-workunit-config-key.yaml create mode 100644 qa/suites/rbd/mirror/workloads/rbd-mirror-workunit-policy-none.yaml create mode 100644 qa/suites/rbd/mirror/workloads/rbd-mirror-workunit-policy-simple.yaml create mode 100644 qa/suites/rbd/nbd/% create mode 120000 qa/suites/rbd/nbd/.qa create mode 120000 qa/suites/rbd/nbd/base create mode 100644 qa/suites/rbd/nbd/cluster/+ create mode 120000 qa/suites/rbd/nbd/cluster/.qa create mode 100644 qa/suites/rbd/nbd/cluster/fixed-3.yaml create mode 120000 qa/suites/rbd/nbd/cluster/openstack.yaml create mode 120000 qa/suites/rbd/nbd/msgr-failures create mode 120000 qa/suites/rbd/nbd/objectstore create mode 120000 qa/suites/rbd/nbd/thrashers create mode 120000 qa/suites/rbd/nbd/thrashosds-health.yaml create mode 120000 qa/suites/rbd/nbd/workloads/.qa create mode 100644 qa/suites/rbd/nbd/workloads/rbd_fsx_nbd.yaml create mode 100644 qa/suites/rbd/nbd/workloads/rbd_nbd.yaml create mode 100644 qa/suites/rbd/qemu/% create mode 120000 qa/suites/rbd/qemu/.qa create mode 120000 qa/suites/rbd/qemu/cache/.qa create mode 100644 qa/suites/rbd/qemu/cache/none.yaml create mode 100644 qa/suites/rbd/qemu/cache/writeback.yaml create mode 100644 qa/suites/rbd/qemu/cache/writethrough.yaml create mode 100644 qa/suites/rbd/qemu/clusters/+ create mode 120000 qa/suites/rbd/qemu/clusters/.qa create mode 120000 qa/suites/rbd/qemu/clusters/fixed-3.yaml create mode 100644 qa/suites/rbd/qemu/clusters/openstack.yaml create mode 120000 qa/suites/rbd/qemu/features/.qa create mode 100644 qa/suites/rbd/qemu/features/defaults.yaml create mode 100644 qa/suites/rbd/qemu/features/journaling.yaml create mode 120000 qa/suites/rbd/qemu/msgr-failures/.qa create mode 100644 qa/suites/rbd/qemu/msgr-failures/few.yaml create mode 120000 qa/suites/rbd/qemu/objectstore create mode 120000 qa/suites/rbd/qemu/pool/.qa create mode 100644 qa/suites/rbd/qemu/pool/ec-cache-pool.yaml create mode 100644 qa/suites/rbd/qemu/pool/ec-data-pool.yaml create mode 100644 qa/suites/rbd/qemu/pool/none.yaml create mode 100644 qa/suites/rbd/qemu/pool/replicated-data-pool.yaml create mode 100644 qa/suites/rbd/qemu/pool/small-cache-pool.yaml create mode 120000 qa/suites/rbd/qemu/supported-random-distro$ create mode 120000 qa/suites/rbd/qemu/workloads/.qa create mode 100644 qa/suites/rbd/qemu/workloads/qemu_bonnie.yaml create mode 100644 qa/suites/rbd/qemu/workloads/qemu_fsstress.yaml create mode 100644 qa/suites/rbd/qemu/workloads/qemu_iozone.yaml.disabled create mode 100644 qa/suites/rbd/qemu/workloads/qemu_xfstests.yaml create mode 100644 qa/suites/rbd/singleton-bluestore/% create mode 120000 qa/suites/rbd/singleton-bluestore/.qa create mode 120000 qa/suites/rbd/singleton-bluestore/all/.qa create mode 100644 qa/suites/rbd/singleton-bluestore/all/issue-20295.yaml create mode 120000 qa/suites/rbd/singleton-bluestore/objectstore/.qa create mode 120000 qa/suites/rbd/singleton-bluestore/objectstore/bluestore-bitmap.yaml create mode 120000 qa/suites/rbd/singleton-bluestore/objectstore/bluestore-comp-snappy.yaml create mode 100644 qa/suites/rbd/singleton-bluestore/openstack.yaml create mode 120000 qa/suites/rbd/singleton-bluestore/supported-random-distro$ create mode 100644 qa/suites/rbd/singleton/% create mode 120000 qa/suites/rbd/singleton/.qa create mode 120000 qa/suites/rbd/singleton/all/.qa create mode 100644 qa/suites/rbd/singleton/all/admin_socket.yaml create mode 100644 qa/suites/rbd/singleton/all/formatted-output.yaml create mode 100644 qa/suites/rbd/singleton/all/merge_diff.yaml create mode 100644 qa/suites/rbd/singleton/all/permissions.yaml create mode 100644 qa/suites/rbd/singleton/all/qemu-iotests-no-cache.yaml create mode 100644 qa/suites/rbd/singleton/all/qemu-iotests-writeback.yaml create mode 100644 qa/suites/rbd/singleton/all/qemu-iotests-writethrough.yaml create mode 100644 qa/suites/rbd/singleton/all/rbd-vs-unmanaged-snaps.yaml create mode 100644 qa/suites/rbd/singleton/all/rbd_mirror.yaml create mode 100644 qa/suites/rbd/singleton/all/rbd_tasks.yaml create mode 100644 qa/suites/rbd/singleton/all/rbdmap_RBDMAPFILE.yaml create mode 100644 qa/suites/rbd/singleton/all/read-flags-no-cache.yaml create mode 100644 qa/suites/rbd/singleton/all/read-flags-writeback.yaml create mode 100644 qa/suites/rbd/singleton/all/read-flags-writethrough.yaml create mode 100644 qa/suites/rbd/singleton/all/snap-diff.yaml create mode 100644 qa/suites/rbd/singleton/all/verify_pool.yaml create mode 120000 qa/suites/rbd/singleton/objectstore create mode 100644 qa/suites/rbd/singleton/openstack.yaml create mode 120000 qa/suites/rbd/singleton/supported-random-distro$ create mode 100644 qa/suites/rbd/thrash/% create mode 120000 qa/suites/rbd/thrash/.qa create mode 120000 qa/suites/rbd/thrash/base/.qa create mode 100644 qa/suites/rbd/thrash/base/install.yaml create mode 100644 qa/suites/rbd/thrash/clusters/+ create mode 120000 qa/suites/rbd/thrash/clusters/.qa create mode 120000 qa/suites/rbd/thrash/clusters/fixed-2.yaml create mode 100644 qa/suites/rbd/thrash/clusters/openstack.yaml create mode 120000 qa/suites/rbd/thrash/msgr-failures/.qa create mode 100644 qa/suites/rbd/thrash/msgr-failures/few.yaml create mode 120000 qa/suites/rbd/thrash/objectstore create mode 120000 qa/suites/rbd/thrash/supported-random-distro$ create mode 120000 qa/suites/rbd/thrash/thrashers/.qa create mode 100644 qa/suites/rbd/thrash/thrashers/cache.yaml create mode 100644 qa/suites/rbd/thrash/thrashers/default.yaml create mode 120000 qa/suites/rbd/thrash/thrashosds-health.yaml create mode 120000 qa/suites/rbd/thrash/workloads/.qa create mode 100644 qa/suites/rbd/thrash/workloads/journal.yaml create mode 100644 qa/suites/rbd/thrash/workloads/rbd_api_tests.yaml create mode 100644 qa/suites/rbd/thrash/workloads/rbd_api_tests_copy_on_read.yaml create mode 100644 qa/suites/rbd/thrash/workloads/rbd_api_tests_journaling.yaml create mode 100644 qa/suites/rbd/thrash/workloads/rbd_api_tests_no_locking.yaml create mode 100644 qa/suites/rbd/thrash/workloads/rbd_fsx_cache_writeback.yaml create mode 100644 qa/suites/rbd/thrash/workloads/rbd_fsx_cache_writethrough.yaml create mode 100644 qa/suites/rbd/thrash/workloads/rbd_fsx_copy_on_read.yaml create mode 100644 qa/suites/rbd/thrash/workloads/rbd_fsx_deep_copy.yaml create mode 100644 qa/suites/rbd/thrash/workloads/rbd_fsx_journal.yaml create mode 100644 qa/suites/rbd/thrash/workloads/rbd_fsx_nocache.yaml create mode 100644 qa/suites/rbd/thrash/workloads/rbd_fsx_rate_limit.yaml create mode 100644 qa/suites/rbd/valgrind/% create mode 120000 qa/suites/rbd/valgrind/.qa create mode 120000 qa/suites/rbd/valgrind/base/.qa create mode 100644 qa/suites/rbd/valgrind/base/install.yaml create mode 120000 qa/suites/rbd/valgrind/centos_latest.yaml create mode 120000 qa/suites/rbd/valgrind/clusters create mode 120000 qa/suites/rbd/valgrind/objectstore create mode 120000 qa/suites/rbd/valgrind/validator/.qa create mode 100644 qa/suites/rbd/valgrind/validator/memcheck.yaml create mode 120000 qa/suites/rbd/valgrind/workloads/.qa create mode 100644 qa/suites/rbd/valgrind/workloads/c_api_tests.yaml create mode 100644 qa/suites/rbd/valgrind/workloads/c_api_tests_with_defaults.yaml create mode 100644 qa/suites/rbd/valgrind/workloads/c_api_tests_with_journaling.yaml create mode 100644 qa/suites/rbd/valgrind/workloads/fsx.yaml create mode 100644 qa/suites/rbd/valgrind/workloads/python_api_tests.yaml create mode 100644 qa/suites/rbd/valgrind/workloads/python_api_tests_with_defaults.yaml create mode 100644 qa/suites/rbd/valgrind/workloads/python_api_tests_with_journaling.yaml create mode 100644 qa/suites/rbd/valgrind/workloads/rbd_mirror.yaml create mode 120000 qa/suites/rgw/.qa create mode 100644 qa/suites/rgw/hadoop-s3a/% create mode 120000 qa/suites/rgw/hadoop-s3a/.qa create mode 120000 qa/suites/rgw/hadoop-s3a/clusters/.qa create mode 120000 qa/suites/rgw/hadoop-s3a/clusters/fixed-2.yaml create mode 120000 qa/suites/rgw/hadoop-s3a/hadoop/.qa create mode 100644 qa/suites/rgw/hadoop-s3a/hadoop/default.yaml create mode 100644 qa/suites/rgw/hadoop-s3a/hadoop/v32.yaml create mode 100644 qa/suites/rgw/hadoop-s3a/s3a-hadoop.yaml create mode 120000 qa/suites/rgw/hadoop-s3a/supported-random-distro$ create mode 100644 qa/suites/rgw/multifs/% create mode 120000 qa/suites/rgw/multifs/.qa create mode 120000 qa/suites/rgw/multifs/clusters/.qa create mode 120000 qa/suites/rgw/multifs/clusters/fixed-2.yaml create mode 120000 qa/suites/rgw/multifs/frontend/.qa create mode 120000 qa/suites/rgw/multifs/frontend/civetweb.yaml create mode 120000 qa/suites/rgw/multifs/objectstore create mode 100644 qa/suites/rgw/multifs/overrides.yaml create mode 120000 qa/suites/rgw/multifs/rgw_pool_type create mode 120000 qa/suites/rgw/multifs/tasks/.qa create mode 100644 qa/suites/rgw/multifs/tasks/rgw_bucket_quota.yaml create mode 100644 qa/suites/rgw/multifs/tasks/rgw_multipart_upload.yaml create mode 100644 qa/suites/rgw/multifs/tasks/rgw_ragweed.yaml create mode 100644 qa/suites/rgw/multifs/tasks/rgw_readwrite.yaml create mode 100644 qa/suites/rgw/multifs/tasks/rgw_roundtrip.yaml create mode 100644 qa/suites/rgw/multifs/tasks/rgw_s3tests.yaml create mode 100644 qa/suites/rgw/multifs/tasks/rgw_swift.yaml create mode 100644 qa/suites/rgw/multifs/tasks/rgw_user_quota.yaml create mode 100644 qa/suites/rgw/multisite/% create mode 120000 qa/suites/rgw/multisite/.qa create mode 100644 qa/suites/rgw/multisite/clusters.yaml create mode 120000 qa/suites/rgw/multisite/frontend create mode 100644 qa/suites/rgw/multisite/omap_limits.yaml create mode 100644 qa/suites/rgw/multisite/overrides.yaml create mode 120000 qa/suites/rgw/multisite/realms/.qa create mode 100644 qa/suites/rgw/multisite/realms/three-zone-plus-pubsub.yaml create mode 100644 qa/suites/rgw/multisite/realms/three-zone.yaml create mode 100644 qa/suites/rgw/multisite/realms/two-zonegroup.yaml create mode 120000 qa/suites/rgw/multisite/tasks/.qa create mode 100644 qa/suites/rgw/multisite/tasks/test_multi.yaml create mode 100644 qa/suites/rgw/multisite/valgrind.yaml create mode 100644 qa/suites/rgw/singleton/% create mode 120000 qa/suites/rgw/singleton/.qa create mode 120000 qa/suites/rgw/singleton/all/.qa create mode 100644 qa/suites/rgw/singleton/all/radosgw-admin.yaml create mode 120000 qa/suites/rgw/singleton/frontend/.qa create mode 120000 qa/suites/rgw/singleton/frontend/civetweb.yaml create mode 120000 qa/suites/rgw/singleton/objectstore create mode 100644 qa/suites/rgw/singleton/overrides.yaml create mode 120000 qa/suites/rgw/singleton/rgw_pool_type create mode 120000 qa/suites/rgw/singleton/supported-random-distro$ create mode 100644 qa/suites/rgw/tempest/% create mode 120000 qa/suites/rgw/tempest/.qa create mode 120000 qa/suites/rgw/tempest/clusters/.qa create mode 120000 qa/suites/rgw/tempest/clusters/fixed-1.yaml create mode 120000 qa/suites/rgw/tempest/frontend create mode 120000 qa/suites/rgw/tempest/tasks/.qa create mode 100644 qa/suites/rgw/tempest/tasks/rgw_tempest.yaml create mode 120000 qa/suites/rgw/tempest/ubuntu_latest.yaml create mode 100644 qa/suites/rgw/thrash/% create mode 120000 qa/suites/rgw/thrash/.qa create mode 100644 qa/suites/rgw/thrash/civetweb.yaml create mode 120000 qa/suites/rgw/thrash/clusters/.qa create mode 120000 qa/suites/rgw/thrash/clusters/fixed-2.yaml create mode 100644 qa/suites/rgw/thrash/install.yaml create mode 120000 qa/suites/rgw/thrash/objectstore create mode 120000 qa/suites/rgw/thrash/thrasher/.qa create mode 100644 qa/suites/rgw/thrash/thrasher/default.yaml create mode 120000 qa/suites/rgw/thrash/thrashosds-health.yaml create mode 120000 qa/suites/rgw/thrash/workload/.qa create mode 100644 qa/suites/rgw/thrash/workload/rgw_bucket_quota.yaml create mode 100644 qa/suites/rgw/thrash/workload/rgw_multipart_upload.yaml create mode 100644 qa/suites/rgw/thrash/workload/rgw_readwrite.yaml create mode 100644 qa/suites/rgw/thrash/workload/rgw_roundtrip.yaml create mode 100644 qa/suites/rgw/thrash/workload/rgw_s3tests.yaml create mode 100644 qa/suites/rgw/thrash/workload/rgw_swift.yaml create mode 100644 qa/suites/rgw/thrash/workload/rgw_user_quota.yaml create mode 100644 qa/suites/rgw/tools/+ create mode 120000 qa/suites/rgw/tools/.qa create mode 120000 qa/suites/rgw/tools/centos_latest.yaml create mode 100644 qa/suites/rgw/tools/cluster.yaml create mode 100644 qa/suites/rgw/tools/tasks.yaml create mode 100644 qa/suites/rgw/verify/% create mode 120000 qa/suites/rgw/verify/.qa create mode 120000 qa/suites/rgw/verify/clusters/.qa create mode 120000 qa/suites/rgw/verify/clusters/fixed-2.yaml create mode 120000 qa/suites/rgw/verify/frontend create mode 120000 qa/suites/rgw/verify/msgr-failures/.qa create mode 100644 qa/suites/rgw/verify/msgr-failures/few.yaml create mode 120000 qa/suites/rgw/verify/objectstore create mode 100644 qa/suites/rgw/verify/overrides.yaml create mode 120000 qa/suites/rgw/verify/proto/.qa create mode 100644 qa/suites/rgw/verify/proto/http.yaml create mode 100644 qa/suites/rgw/verify/proto/https.yaml create mode 120000 qa/suites/rgw/verify/rgw_pool_type create mode 100644 qa/suites/rgw/verify/striping$/stripe-equals-chunk.yaml create mode 100644 qa/suites/rgw/verify/striping$/stripe-greater-than-chunk.yaml create mode 100644 qa/suites/rgw/verify/tasks/+ create mode 120000 qa/suites/rgw/verify/tasks/.qa create mode 100644 qa/suites/rgw/verify/tasks/0-install.yaml create mode 100644 qa/suites/rgw/verify/tasks/cls.yaml create mode 100644 qa/suites/rgw/verify/tasks/ragweed.yaml create mode 100644 qa/suites/rgw/verify/tasks/s3tests.yaml create mode 100644 qa/suites/rgw/verify/tasks/swift.yaml create mode 120000 qa/suites/rgw/verify/validater/.qa create mode 100644 qa/suites/rgw/verify/validater/lockdep.yaml create mode 100644 qa/suites/rgw/verify/validater/valgrind.yaml create mode 100644 qa/suites/samba/% create mode 120000 qa/suites/samba/.qa create mode 120000 qa/suites/samba/clusters/.qa create mode 100644 qa/suites/samba/clusters/samba-basic.yaml create mode 120000 qa/suites/samba/install/.qa create mode 100644 qa/suites/samba/install/install.yaml create mode 120000 qa/suites/samba/mount/.qa create mode 100644 qa/suites/samba/mount/fuse.yaml create mode 100644 qa/suites/samba/mount/kclient.yaml create mode 100644 qa/suites/samba/mount/native.yaml create mode 100644 qa/suites/samba/mount/noceph.yaml create mode 120000 qa/suites/samba/objectstore create mode 120000 qa/suites/samba/workload/.qa create mode 100644 qa/suites/samba/workload/cifs-dbench.yaml create mode 100644 qa/suites/samba/workload/cifs-fsstress.yaml create mode 100644 qa/suites/samba/workload/cifs-kernel-build.yaml.disabled create mode 100644 qa/suites/samba/workload/smbtorture.yaml create mode 120000 qa/suites/smoke/.qa create mode 100644 qa/suites/smoke/basic/% create mode 120000 qa/suites/smoke/basic/.qa create mode 100644 qa/suites/smoke/basic/clusters/+ create mode 120000 qa/suites/smoke/basic/clusters/.qa create mode 120000 qa/suites/smoke/basic/clusters/fixed-3-cephfs.yaml create mode 100644 qa/suites/smoke/basic/clusters/openstack.yaml create mode 120000 qa/suites/smoke/basic/objectstore/.qa create mode 120000 qa/suites/smoke/basic/objectstore/bluestore-bitmap.yaml create mode 120000 qa/suites/smoke/basic/tasks/.qa create mode 100644 qa/suites/smoke/basic/tasks/cfuse_workunit_suites_blogbench.yaml create mode 100644 qa/suites/smoke/basic/tasks/cfuse_workunit_suites_fsstress.yaml create mode 100644 qa/suites/smoke/basic/tasks/cfuse_workunit_suites_iozone.yaml create mode 100644 qa/suites/smoke/basic/tasks/cfuse_workunit_suites_pjd.yaml create mode 100644 qa/suites/smoke/basic/tasks/kclient_workunit_direct_io.yaml create mode 100644 qa/suites/smoke/basic/tasks/kclient_workunit_suites_dbench.yaml create mode 100644 qa/suites/smoke/basic/tasks/kclient_workunit_suites_fsstress.yaml create mode 100644 qa/suites/smoke/basic/tasks/kclient_workunit_suites_pjd.yaml create mode 100644 qa/suites/smoke/basic/tasks/libcephfs_interface_tests.yaml create mode 100644 qa/suites/smoke/basic/tasks/mon_thrash.yaml create mode 100644 qa/suites/smoke/basic/tasks/rados_api_tests.yaml create mode 100644 qa/suites/smoke/basic/tasks/rados_bench.yaml create mode 100644 qa/suites/smoke/basic/tasks/rados_cache_snaps.yaml create mode 100644 qa/suites/smoke/basic/tasks/rados_cls_all.yaml create mode 100644 qa/suites/smoke/basic/tasks/rados_ec_snaps.yaml create mode 100644 qa/suites/smoke/basic/tasks/rados_python.yaml create mode 100644 qa/suites/smoke/basic/tasks/rados_workunit_loadgen_mix.yaml create mode 100644 qa/suites/smoke/basic/tasks/rbd_api_tests.yaml create mode 100644 qa/suites/smoke/basic/tasks/rbd_cli_import_export.yaml create mode 100644 qa/suites/smoke/basic/tasks/rbd_fsx.yaml create mode 100644 qa/suites/smoke/basic/tasks/rbd_python_api_tests.yaml create mode 100644 qa/suites/smoke/basic/tasks/rbd_workunit_suites_iozone.yaml create mode 100644 qa/suites/smoke/basic/tasks/rgw_ec_s3tests.yaml create mode 100644 qa/suites/smoke/basic/tasks/rgw_s3tests.yaml create mode 100644 qa/suites/smoke/basic/tasks/rgw_swift.yaml create mode 120000 qa/suites/stress/.qa create mode 100644 qa/suites/stress/bench/% create mode 120000 qa/suites/stress/bench/.qa create mode 120000 qa/suites/stress/bench/clusters/.qa create mode 120000 qa/suites/stress/bench/clusters/fixed-3-cephfs.yaml create mode 120000 qa/suites/stress/bench/tasks/.qa create mode 100644 qa/suites/stress/bench/tasks/cfuse_workunit_snaps.yaml create mode 100644 qa/suites/stress/bench/tasks/kclient_workunit_suites_fsx.yaml create mode 100644 qa/suites/stress/thrash/% create mode 120000 qa/suites/stress/thrash/.qa create mode 120000 qa/suites/stress/thrash/clusters/.qa create mode 100644 qa/suites/stress/thrash/clusters/16-osd.yaml create mode 100644 qa/suites/stress/thrash/clusters/3-osd-1-machine.yaml create mode 100644 qa/suites/stress/thrash/clusters/8-osd.yaml create mode 120000 qa/suites/stress/thrash/thrashers/.qa create mode 100644 qa/suites/stress/thrash/thrashers/default.yaml create mode 100644 qa/suites/stress/thrash/thrashers/fast.yaml create mode 100644 qa/suites/stress/thrash/thrashers/more-down.yaml create mode 120000 qa/suites/stress/thrash/workloads/.qa create mode 100644 qa/suites/stress/thrash/workloads/bonnie_cfuse.yaml create mode 100644 qa/suites/stress/thrash/workloads/iozone_cfuse.yaml create mode 100644 qa/suites/stress/thrash/workloads/radosbench.yaml create mode 100644 qa/suites/stress/thrash/workloads/readwrite.yaml create mode 120000 qa/suites/teuthology/.qa create mode 100644 qa/suites/teuthology/buildpackages/% create mode 120000 qa/suites/teuthology/buildpackages/.qa create mode 120000 qa/suites/teuthology/buildpackages/supported-all-distro create mode 120000 qa/suites/teuthology/buildpackages/tasks/.qa create mode 100644 qa/suites/teuthology/buildpackages/tasks/branch.yaml create mode 100644 qa/suites/teuthology/buildpackages/tasks/default.yaml create mode 100644 qa/suites/teuthology/buildpackages/tasks/tag.yaml create mode 100644 qa/suites/teuthology/ceph/% create mode 120000 qa/suites/teuthology/ceph/.qa create mode 120000 qa/suites/teuthology/ceph/clusters/.qa create mode 100644 qa/suites/teuthology/ceph/clusters/single.yaml create mode 120000 qa/suites/teuthology/ceph/distros create mode 120000 qa/suites/teuthology/ceph/tasks/.qa create mode 100644 qa/suites/teuthology/ceph/tasks/teuthology.yaml create mode 100644 qa/suites/teuthology/integration.yaml create mode 100644 qa/suites/teuthology/multi-cluster/% create mode 120000 qa/suites/teuthology/multi-cluster/.qa create mode 120000 qa/suites/teuthology/multi-cluster/all/.qa create mode 100644 qa/suites/teuthology/multi-cluster/all/ceph.yaml create mode 100644 qa/suites/teuthology/multi-cluster/all/thrashosds.yaml create mode 100644 qa/suites/teuthology/multi-cluster/all/upgrade.yaml create mode 100644 qa/suites/teuthology/multi-cluster/all/workunit.yaml create mode 100644 qa/suites/teuthology/no-ceph/% create mode 120000 qa/suites/teuthology/no-ceph/.qa create mode 120000 qa/suites/teuthology/no-ceph/clusters/.qa create mode 100644 qa/suites/teuthology/no-ceph/clusters/single.yaml create mode 120000 qa/suites/teuthology/no-ceph/tasks/.qa create mode 100644 qa/suites/teuthology/no-ceph/tasks/teuthology.yaml create mode 100644 qa/suites/teuthology/nop/% create mode 120000 qa/suites/teuthology/nop/.qa create mode 120000 qa/suites/teuthology/nop/all/.qa create mode 100644 qa/suites/teuthology/nop/all/nop.yaml create mode 100644 qa/suites/teuthology/rgw/% create mode 120000 qa/suites/teuthology/rgw/.qa create mode 120000 qa/suites/teuthology/rgw/distros create mode 120000 qa/suites/teuthology/rgw/tasks/.qa create mode 100644 qa/suites/teuthology/rgw/tasks/s3tests-civetweb.yaml create mode 100644 qa/suites/teuthology/rgw/tasks/s3tests-fastcgi.yaml create mode 100644 qa/suites/teuthology/rgw/tasks/s3tests-fcgi.yaml create mode 120000 qa/suites/teuthology/workunits/.qa create mode 100644 qa/suites/teuthology/workunits/yes.yaml create mode 120000 qa/suites/tgt/.qa create mode 100644 qa/suites/tgt/basic/% create mode 120000 qa/suites/tgt/basic/.qa create mode 120000 qa/suites/tgt/basic/clusters/.qa create mode 100644 qa/suites/tgt/basic/clusters/fixed-3.yaml create mode 120000 qa/suites/tgt/basic/msgr-failures/.qa create mode 100644 qa/suites/tgt/basic/msgr-failures/few.yaml create mode 100644 qa/suites/tgt/basic/msgr-failures/many.yaml create mode 120000 qa/suites/tgt/basic/tasks/.qa create mode 100644 qa/suites/tgt/basic/tasks/blogbench.yaml create mode 100644 qa/suites/tgt/basic/tasks/bonnie.yaml create mode 100644 qa/suites/tgt/basic/tasks/dbench-short.yaml create mode 100644 qa/suites/tgt/basic/tasks/dbench.yaml create mode 100644 qa/suites/tgt/basic/tasks/ffsb.yaml create mode 100644 qa/suites/tgt/basic/tasks/fio.yaml create mode 100644 qa/suites/tgt/basic/tasks/fsstress.yaml create mode 100644 qa/suites/tgt/basic/tasks/fsx.yaml create mode 100644 qa/suites/tgt/basic/tasks/fsync-tester.yaml create mode 100644 qa/suites/tgt/basic/tasks/iogen.yaml create mode 100644 qa/suites/tgt/basic/tasks/iozone-sync.yaml create mode 100644 qa/suites/tgt/basic/tasks/iozone.yaml create mode 100644 qa/suites/tgt/basic/tasks/pjd.yaml create mode 120000 qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/.qa create mode 100644 qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/% create mode 120000 qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/.qa create mode 100644 qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/0-cluster/+ create mode 120000 qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/0-cluster/.qa create mode 100644 qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/0-cluster/openstack.yaml create mode 100644 qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/0-cluster/start.yaml create mode 120000 qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/1-install/.qa create mode 100644 qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/1-install/nautilus-client-x.yaml create mode 120000 qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/2-features/.qa create mode 100644 qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/2-features/defaults.yaml create mode 100644 qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/2-features/layering.yaml create mode 120000 qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/3-workload/.qa create mode 100644 qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/3-workload/rbd_notification_tests.yaml create mode 120000 qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/supported/.qa create mode 120000 qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/supported/ubuntu_18.04.yaml create mode 120000 qa/suites/upgrade-clients/client-upgrade-nautilus/.qa create mode 120000 qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/.qa create mode 100644 qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/% create mode 120000 qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/.qa create mode 100644 qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/0-cluster/+ create mode 120000 qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/0-cluster/.qa create mode 100644 qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/0-cluster/openstack.yaml create mode 100644 qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/0-cluster/start.yaml create mode 120000 qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/1-install/.qa create mode 100644 qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/1-install/nautilus-client-x.yaml create mode 120000 qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/2-workload/.qa create mode 100644 qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/2-workload/devstack-tempest-gate.yaml create mode 120000 qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/supported/.qa create mode 120000 qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/supported/centos_7.6.yaml create mode 120000 qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/supported/rhel_7.6.yaml create mode 120000 qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/supported/ubuntu_16.04.yaml create mode 120000 qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/supported/ubuntu_18.04.yaml create mode 100644 qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/% create mode 120000 qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/.qa create mode 100644 qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/0-cluster/+ create mode 120000 qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/0-cluster/.qa create mode 100644 qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/0-cluster/openstack.yaml create mode 100644 qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/0-cluster/start.yaml create mode 120000 qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/1-install/.qa create mode 100644 qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/1-install/nautilus-client-x.yaml create mode 120000 qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/2-features/.qa create mode 100644 qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/2-features/defaults.yaml create mode 100644 qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/2-features/layering.yaml create mode 120000 qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/3-workload/.qa create mode 100644 qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/3-workload/rbd_notification_tests.yaml create mode 120000 qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/supported/.qa create mode 120000 qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/supported/centos_7.6.yaml create mode 120000 qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/supported/rhel_7.6.yaml create mode 120000 qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/supported/ubuntu_16.04.yaml create mode 120000 qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/supported/ubuntu_18.04.yaml create mode 120000 qa/suites/upgrade/.qa create mode 120000 qa/suites/upgrade/luminous-x/.qa create mode 100644 qa/suites/upgrade/luminous-x/parallel/% create mode 120000 qa/suites/upgrade/luminous-x/parallel/.qa create mode 100644 qa/suites/upgrade/luminous-x/parallel/0-cluster/+ create mode 120000 qa/suites/upgrade/luminous-x/parallel/0-cluster/.qa create mode 100644 qa/suites/upgrade/luminous-x/parallel/0-cluster/openstack.yaml create mode 100644 qa/suites/upgrade/luminous-x/parallel/0-cluster/start.yaml create mode 120000 qa/suites/upgrade/luminous-x/parallel/1-ceph-install/.qa create mode 100644 qa/suites/upgrade/luminous-x/parallel/1-ceph-install/luminous.yaml create mode 100644 qa/suites/upgrade/luminous-x/parallel/1.1-pg-log-overrides/normal_pg_log.yaml create mode 100644 qa/suites/upgrade/luminous-x/parallel/1.1-pg-log-overrides/short_pg_log.yaml create mode 100644 qa/suites/upgrade/luminous-x/parallel/2-workload/+ create mode 120000 qa/suites/upgrade/luminous-x/parallel/2-workload/.qa create mode 100644 qa/suites/upgrade/luminous-x/parallel/2-workload/blogbench.yaml create mode 100644 qa/suites/upgrade/luminous-x/parallel/2-workload/ec-rados-default.yaml create mode 100644 qa/suites/upgrade/luminous-x/parallel/2-workload/rados_api.yaml create mode 100644 qa/suites/upgrade/luminous-x/parallel/2-workload/rados_loadgenbig.yaml create mode 100644 qa/suites/upgrade/luminous-x/parallel/2-workload/rgw_ragweed_prepare.yaml create mode 100644 qa/suites/upgrade/luminous-x/parallel/2-workload/test_rbd_api.yaml create mode 100644 qa/suites/upgrade/luminous-x/parallel/2-workload/test_rbd_python.yaml create mode 120000 qa/suites/upgrade/luminous-x/parallel/3-upgrade-sequence/.qa create mode 100644 qa/suites/upgrade/luminous-x/parallel/3-upgrade-sequence/upgrade-all.yaml create mode 100644 qa/suites/upgrade/luminous-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml create mode 100644 qa/suites/upgrade/luminous-x/parallel/4-msgr2.yaml create mode 120000 qa/suites/upgrade/luminous-x/parallel/4-nautilus.yaml create mode 100644 qa/suites/upgrade/luminous-x/parallel/5-final-workload/+ create mode 120000 qa/suites/upgrade/luminous-x/parallel/5-final-workload/.qa create mode 100644 qa/suites/upgrade/luminous-x/parallel/5-final-workload/blogbench.yaml create mode 100644 qa/suites/upgrade/luminous-x/parallel/5-final-workload/rados-snaps-few-objects.yaml create mode 100644 qa/suites/upgrade/luminous-x/parallel/5-final-workload/rados_loadgenmix.yaml create mode 100644 qa/suites/upgrade/luminous-x/parallel/5-final-workload/rados_mon_thrash.yaml create mode 100644 qa/suites/upgrade/luminous-x/parallel/5-final-workload/rbd_cls.yaml create mode 100644 qa/suites/upgrade/luminous-x/parallel/5-final-workload/rbd_import_export.yaml create mode 100644 qa/suites/upgrade/luminous-x/parallel/5-final-workload/rgw.yaml create mode 100644 qa/suites/upgrade/luminous-x/parallel/5-final-workload/rgw_ragweed_check.yaml create mode 100644 qa/suites/upgrade/luminous-x/parallel/5-final-workload/rgw_swift.yaml create mode 120000 qa/suites/upgrade/luminous-x/parallel/objectstore create mode 120000 qa/suites/upgrade/luminous-x/parallel/supported-all-distro create mode 100644 qa/suites/upgrade/luminous-x/stress-split-erasure-code/% create mode 120000 qa/suites/upgrade/luminous-x/stress-split-erasure-code/.qa create mode 120000 qa/suites/upgrade/luminous-x/stress-split-erasure-code/0-cluster create mode 120000 qa/suites/upgrade/luminous-x/stress-split-erasure-code/1-luminous-install create mode 100644 qa/suites/upgrade/luminous-x/stress-split-erasure-code/1.1-pg-log-overrides/normal_pg_log.yaml create mode 100644 qa/suites/upgrade/luminous-x/stress-split-erasure-code/1.1-pg-log-overrides/short_pg_log.yaml create mode 120000 qa/suites/upgrade/luminous-x/stress-split-erasure-code/2-partial-upgrade create mode 120000 qa/suites/upgrade/luminous-x/stress-split-erasure-code/3-thrash/.qa create mode 100644 qa/suites/upgrade/luminous-x/stress-split-erasure-code/3-thrash/default.yaml create mode 100644 qa/suites/upgrade/luminous-x/stress-split-erasure-code/4-ec-workload.yaml create mode 120000 qa/suites/upgrade/luminous-x/stress-split-erasure-code/5-finish-upgrade.yaml create mode 100644 qa/suites/upgrade/luminous-x/stress-split-erasure-code/7-final-workload.yaml create mode 120000 qa/suites/upgrade/luminous-x/stress-split-erasure-code/objectstore create mode 120000 qa/suites/upgrade/luminous-x/stress-split-erasure-code/supported-all-distro create mode 120000 qa/suites/upgrade/luminous-x/stress-split-erasure-code/thrashosds-health.yaml create mode 100644 qa/suites/upgrade/luminous-x/stress-split/% create mode 120000 qa/suites/upgrade/luminous-x/stress-split/.qa create mode 100644 qa/suites/upgrade/luminous-x/stress-split/0-cluster/+ create mode 120000 qa/suites/upgrade/luminous-x/stress-split/0-cluster/.qa create mode 100644 qa/suites/upgrade/luminous-x/stress-split/0-cluster/openstack.yaml create mode 100644 qa/suites/upgrade/luminous-x/stress-split/0-cluster/start.yaml create mode 120000 qa/suites/upgrade/luminous-x/stress-split/1-ceph-install/.qa create mode 100644 qa/suites/upgrade/luminous-x/stress-split/1-ceph-install/luminous.yaml create mode 100644 qa/suites/upgrade/luminous-x/stress-split/1.1-pg-log-overrides/normal_pg_log.yaml create mode 100644 qa/suites/upgrade/luminous-x/stress-split/1.1-pg-log-overrides/short_pg_log.yaml create mode 120000 qa/suites/upgrade/luminous-x/stress-split/2-partial-upgrade/.qa create mode 100644 qa/suites/upgrade/luminous-x/stress-split/2-partial-upgrade/firsthalf.yaml create mode 120000 qa/suites/upgrade/luminous-x/stress-split/3-thrash/.qa create mode 100644 qa/suites/upgrade/luminous-x/stress-split/3-thrash/default.yaml create mode 100644 qa/suites/upgrade/luminous-x/stress-split/4-workload/+ create mode 120000 qa/suites/upgrade/luminous-x/stress-split/4-workload/.qa create mode 100644 qa/suites/upgrade/luminous-x/stress-split/4-workload/radosbench.yaml create mode 100644 qa/suites/upgrade/luminous-x/stress-split/4-workload/rbd-cls.yaml create mode 100644 qa/suites/upgrade/luminous-x/stress-split/4-workload/rbd-import-export.yaml create mode 100644 qa/suites/upgrade/luminous-x/stress-split/4-workload/rbd_api.yaml create mode 100644 qa/suites/upgrade/luminous-x/stress-split/4-workload/readwrite.yaml create mode 100644 qa/suites/upgrade/luminous-x/stress-split/4-workload/snaps-few-objects.yaml create mode 100644 qa/suites/upgrade/luminous-x/stress-split/5-finish-upgrade.yaml create mode 100644 qa/suites/upgrade/luminous-x/stress-split/6-msgr2.yaml create mode 120000 qa/suites/upgrade/luminous-x/stress-split/6-nautilus.yaml create mode 100644 qa/suites/upgrade/luminous-x/stress-split/7-final-workload/+ create mode 120000 qa/suites/upgrade/luminous-x/stress-split/7-final-workload/.qa create mode 100644 qa/suites/upgrade/luminous-x/stress-split/7-final-workload/rgw-swift.yaml create mode 100644 qa/suites/upgrade/luminous-x/stress-split/7-final-workload/snaps-many-objects.yaml create mode 120000 qa/suites/upgrade/luminous-x/stress-split/objectstore/.qa create mode 120000 qa/suites/upgrade/luminous-x/stress-split/objectstore/bluestore-bitmap.yaml create mode 120000 qa/suites/upgrade/luminous-x/stress-split/objectstore/filestore-xfs.yaml create mode 120000 qa/suites/upgrade/luminous-x/stress-split/supported-all-distro create mode 120000 qa/suites/upgrade/luminous-x/stress-split/thrashosds-health.yaml create mode 100644 qa/suites/upgrade/mimic-x-singleton/% create mode 120000 qa/suites/upgrade/mimic-x-singleton/.qa create mode 100644 qa/suites/upgrade/mimic-x-singleton/0-cluster/+ create mode 120000 qa/suites/upgrade/mimic-x-singleton/0-cluster/.qa create mode 100644 qa/suites/upgrade/mimic-x-singleton/0-cluster/openstack.yaml create mode 100644 qa/suites/upgrade/mimic-x-singleton/0-cluster/start.yaml create mode 120000 qa/suites/upgrade/mimic-x-singleton/1-install/.qa create mode 100644 qa/suites/upgrade/mimic-x-singleton/1-install/mimic.yaml create mode 120000 qa/suites/upgrade/mimic-x-singleton/2-partial-upgrade/.qa create mode 100644 qa/suites/upgrade/mimic-x-singleton/2-partial-upgrade/firsthalf.yaml create mode 120000 qa/suites/upgrade/mimic-x-singleton/3-thrash/.qa create mode 100644 qa/suites/upgrade/mimic-x-singleton/3-thrash/default.yaml create mode 100644 qa/suites/upgrade/mimic-x-singleton/4-workload/+ create mode 120000 qa/suites/upgrade/mimic-x-singleton/4-workload/.qa create mode 100644 qa/suites/upgrade/mimic-x-singleton/4-workload/rbd-cls.yaml create mode 100644 qa/suites/upgrade/mimic-x-singleton/4-workload/rbd-import-export.yaml create mode 100644 qa/suites/upgrade/mimic-x-singleton/4-workload/readwrite.yaml create mode 100644 qa/suites/upgrade/mimic-x-singleton/4-workload/snaps-few-objects.yaml create mode 100644 qa/suites/upgrade/mimic-x-singleton/5-workload/+ create mode 120000 qa/suites/upgrade/mimic-x-singleton/5-workload/.qa create mode 100644 qa/suites/upgrade/mimic-x-singleton/5-workload/radosbench.yaml create mode 100644 qa/suites/upgrade/mimic-x-singleton/5-workload/rbd_api.yaml create mode 100644 qa/suites/upgrade/mimic-x-singleton/6-finish-upgrade.yaml create mode 120000 qa/suites/upgrade/mimic-x-singleton/7-nautilus.yaml create mode 100644 qa/suites/upgrade/mimic-x-singleton/8-workload/+ create mode 120000 qa/suites/upgrade/mimic-x-singleton/8-workload/.qa create mode 100644 qa/suites/upgrade/mimic-x-singleton/8-workload/rbd-python.yaml create mode 100644 qa/suites/upgrade/mimic-x-singleton/8-workload/rgw-swift.yaml create mode 100644 qa/suites/upgrade/mimic-x-singleton/8-workload/snaps-many-objects.yaml create mode 120000 qa/suites/upgrade/mimic-x-singleton/supported-random-distro$ create mode 120000 qa/suites/upgrade/mimic-x-singleton/thrashosds-health.yaml create mode 120000 qa/suites/upgrade/mimic-x/.qa create mode 100644 qa/suites/upgrade/mimic-x/parallel/% create mode 120000 qa/suites/upgrade/mimic-x/parallel/.qa create mode 100644 qa/suites/upgrade/mimic-x/parallel/0-cluster/+ create mode 120000 qa/suites/upgrade/mimic-x/parallel/0-cluster/.qa create mode 100644 qa/suites/upgrade/mimic-x/parallel/0-cluster/openstack.yaml create mode 100644 qa/suites/upgrade/mimic-x/parallel/0-cluster/start.yaml create mode 120000 qa/suites/upgrade/mimic-x/parallel/1-ceph-install/.qa create mode 100644 qa/suites/upgrade/mimic-x/parallel/1-ceph-install/mimic.yaml create mode 100644 qa/suites/upgrade/mimic-x/parallel/1.1-pg-log-overrides/normal_pg_log.yaml create mode 100644 qa/suites/upgrade/mimic-x/parallel/1.1-pg-log-overrides/short_pg_log.yaml create mode 100644 qa/suites/upgrade/mimic-x/parallel/2-workload/+ create mode 120000 qa/suites/upgrade/mimic-x/parallel/2-workload/.qa create mode 100644 qa/suites/upgrade/mimic-x/parallel/2-workload/blogbench.yaml create mode 100644 qa/suites/upgrade/mimic-x/parallel/2-workload/ec-rados-default.yaml create mode 100644 qa/suites/upgrade/mimic-x/parallel/2-workload/rados_api.yaml create mode 100644 qa/suites/upgrade/mimic-x/parallel/2-workload/rados_loadgenbig.yaml create mode 100644 qa/suites/upgrade/mimic-x/parallel/2-workload/rgw_ragweed_prepare.yaml create mode 100644 qa/suites/upgrade/mimic-x/parallel/2-workload/test_rbd_api.yaml create mode 100644 qa/suites/upgrade/mimic-x/parallel/2-workload/test_rbd_python.yaml create mode 120000 qa/suites/upgrade/mimic-x/parallel/3-upgrade-sequence/.qa create mode 100644 qa/suites/upgrade/mimic-x/parallel/3-upgrade-sequence/upgrade-all.yaml create mode 100644 qa/suites/upgrade/mimic-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml create mode 100644 qa/suites/upgrade/mimic-x/parallel/4-msgr2.yaml create mode 120000 qa/suites/upgrade/mimic-x/parallel/4-nautilus.yaml create mode 100644 qa/suites/upgrade/mimic-x/parallel/5-final-workload/+ create mode 120000 qa/suites/upgrade/mimic-x/parallel/5-final-workload/.qa create mode 100644 qa/suites/upgrade/mimic-x/parallel/5-final-workload/blogbench.yaml create mode 100644 qa/suites/upgrade/mimic-x/parallel/5-final-workload/rados-snaps-few-objects.yaml create mode 100644 qa/suites/upgrade/mimic-x/parallel/5-final-workload/rados_loadgenmix.yaml create mode 100644 qa/suites/upgrade/mimic-x/parallel/5-final-workload/rados_mon_thrash.yaml create mode 100644 qa/suites/upgrade/mimic-x/parallel/5-final-workload/rbd_cls.yaml create mode 100644 qa/suites/upgrade/mimic-x/parallel/5-final-workload/rbd_import_export.yaml create mode 100644 qa/suites/upgrade/mimic-x/parallel/5-final-workload/rgw.yaml create mode 100644 qa/suites/upgrade/mimic-x/parallel/5-final-workload/rgw_ragweed_check.yaml create mode 100644 qa/suites/upgrade/mimic-x/parallel/5-final-workload/rgw_swift.yaml create mode 120000 qa/suites/upgrade/mimic-x/parallel/objectstore create mode 120000 qa/suites/upgrade/mimic-x/parallel/supported-all-distro create mode 100644 qa/suites/upgrade/mimic-x/stress-split-erasure-code/% create mode 120000 qa/suites/upgrade/mimic-x/stress-split-erasure-code/.qa create mode 120000 qa/suites/upgrade/mimic-x/stress-split-erasure-code/0-cluster create mode 120000 qa/suites/upgrade/mimic-x/stress-split-erasure-code/1-luminous-install create mode 100644 qa/suites/upgrade/mimic-x/stress-split-erasure-code/1.1-pg-log-overrides/normal_pg_log.yaml create mode 100644 qa/suites/upgrade/mimic-x/stress-split-erasure-code/1.1-pg-log-overrides/short_pg_log.yaml create mode 120000 qa/suites/upgrade/mimic-x/stress-split-erasure-code/2-partial-upgrade create mode 120000 qa/suites/upgrade/mimic-x/stress-split-erasure-code/3-thrash/.qa create mode 100644 qa/suites/upgrade/mimic-x/stress-split-erasure-code/3-thrash/default.yaml create mode 100644 qa/suites/upgrade/mimic-x/stress-split-erasure-code/4-ec-workload.yaml create mode 120000 qa/suites/upgrade/mimic-x/stress-split-erasure-code/5-finish-upgrade.yaml create mode 100644 qa/suites/upgrade/mimic-x/stress-split-erasure-code/7-final-workload.yaml create mode 120000 qa/suites/upgrade/mimic-x/stress-split-erasure-code/objectstore create mode 120000 qa/suites/upgrade/mimic-x/stress-split-erasure-code/supported-all-distro create mode 120000 qa/suites/upgrade/mimic-x/stress-split-erasure-code/thrashosds-health.yaml create mode 100644 qa/suites/upgrade/mimic-x/stress-split/% create mode 120000 qa/suites/upgrade/mimic-x/stress-split/.qa create mode 100644 qa/suites/upgrade/mimic-x/stress-split/0-cluster/+ create mode 120000 qa/suites/upgrade/mimic-x/stress-split/0-cluster/.qa create mode 100644 qa/suites/upgrade/mimic-x/stress-split/0-cluster/openstack.yaml create mode 100644 qa/suites/upgrade/mimic-x/stress-split/0-cluster/start.yaml create mode 120000 qa/suites/upgrade/mimic-x/stress-split/1-ceph-install/.qa create mode 100644 qa/suites/upgrade/mimic-x/stress-split/1-ceph-install/mimic.yaml create mode 100644 qa/suites/upgrade/mimic-x/stress-split/1.1-pg-log-overrides/normal_pg_log.yaml create mode 100644 qa/suites/upgrade/mimic-x/stress-split/1.1-pg-log-overrides/short_pg_log.yaml create mode 120000 qa/suites/upgrade/mimic-x/stress-split/2-partial-upgrade/.qa create mode 100644 qa/suites/upgrade/mimic-x/stress-split/2-partial-upgrade/firsthalf.yaml create mode 120000 qa/suites/upgrade/mimic-x/stress-split/3-thrash/.qa create mode 100644 qa/suites/upgrade/mimic-x/stress-split/3-thrash/default.yaml create mode 100644 qa/suites/upgrade/mimic-x/stress-split/4-workload/+ create mode 120000 qa/suites/upgrade/mimic-x/stress-split/4-workload/.qa create mode 100644 qa/suites/upgrade/mimic-x/stress-split/4-workload/radosbench.yaml create mode 100644 qa/suites/upgrade/mimic-x/stress-split/4-workload/rbd-cls.yaml create mode 100644 qa/suites/upgrade/mimic-x/stress-split/4-workload/rbd-import-export.yaml create mode 100644 qa/suites/upgrade/mimic-x/stress-split/4-workload/rbd_api.yaml create mode 100644 qa/suites/upgrade/mimic-x/stress-split/4-workload/readwrite.yaml create mode 100644 qa/suites/upgrade/mimic-x/stress-split/4-workload/rgw_ragweed_prepare.yaml create mode 100644 qa/suites/upgrade/mimic-x/stress-split/4-workload/snaps-few-objects.yaml create mode 100644 qa/suites/upgrade/mimic-x/stress-split/5-finish-upgrade.yaml create mode 120000 qa/suites/upgrade/mimic-x/stress-split/6-nautilus.yaml create mode 100644 qa/suites/upgrade/mimic-x/stress-split/6.1-msgr2.yaml create mode 100644 qa/suites/upgrade/mimic-x/stress-split/7-final-workload/+ create mode 120000 qa/suites/upgrade/mimic-x/stress-split/7-final-workload/.qa create mode 100644 qa/suites/upgrade/mimic-x/stress-split/7-final-workload/rbd-python.yaml create mode 100644 qa/suites/upgrade/mimic-x/stress-split/7-final-workload/rgw-swift-ragweed_check.yaml create mode 100644 qa/suites/upgrade/mimic-x/stress-split/7-final-workload/snaps-many-objects.yaml create mode 120000 qa/suites/upgrade/mimic-x/stress-split/objectstore/.qa create mode 120000 qa/suites/upgrade/mimic-x/stress-split/objectstore/bluestore-bitmap.yaml create mode 120000 qa/suites/upgrade/mimic-x/stress-split/objectstore/filestore-xfs.yaml create mode 120000 qa/suites/upgrade/mimic-x/stress-split/supported-all-distro create mode 120000 qa/suites/upgrade/mimic-x/stress-split/thrashosds-health.yaml create mode 120000 qa/suites/upgrade/nautilus-p2p/.qa create mode 100644 qa/suites/upgrade/nautilus-p2p/nautilus-p2p-parallel/% create mode 100644 qa/suites/upgrade/nautilus-p2p/nautilus-p2p-parallel/point-to-point-upgrade.yaml create mode 120000 qa/suites/upgrade/nautilus-p2p/nautilus-p2p-parallel/supported-all-distro create mode 100644 qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/% create mode 120000 qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/.qa create mode 100644 qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/0-cluster/+ create mode 120000 qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/0-cluster/.qa create mode 100644 qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/0-cluster/openstack.yaml create mode 100644 qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/0-cluster/start.yaml create mode 120000 qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/1-ceph-install/.qa create mode 100644 qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/1-ceph-install/nautilus.yaml create mode 120000 qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/1.1.short_pg_log.yaml create mode 120000 qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/2-partial-upgrade/.qa create mode 100644 qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/2-partial-upgrade/firsthalf.yaml create mode 120000 qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/3-thrash/.qa create mode 100644 qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/3-thrash/default.yaml create mode 100644 qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/4-workload/+ create mode 120000 qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/4-workload/.qa create mode 100644 qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/4-workload/fsx.yaml create mode 100644 qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/4-workload/radosbench.yaml create mode 100644 qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/4-workload/rbd-cls.yaml create mode 100644 qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/4-workload/rbd-import-export.yaml create mode 100644 qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/4-workload/rbd_api.yaml create mode 100644 qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/4-workload/readwrite.yaml create mode 100644 qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/4-workload/snaps-few-objects.yaml create mode 100644 qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/5-finish-upgrade.yaml create mode 100644 qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/7-final-workload/+ create mode 120000 qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/7-final-workload/.qa create mode 100644 qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/7-final-workload/rbd-python.yaml create mode 100644 qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/7-final-workload/rgw-swift.yaml create mode 100644 qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/7-final-workload/snaps-many-objects.yaml create mode 120000 qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/objectstore/.qa create mode 120000 qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/objectstore/bluestore-bitmap.yaml create mode 100644 qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/objectstore/default.yaml create mode 120000 qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/objectstore/filestore-xfs.yaml create mode 120000 qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/supported-all-distro create mode 120000 qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/thrashosds-health.yaml create mode 100644 qa/tasks/__init__.py create mode 100644 qa/tasks/admin_socket.py create mode 100644 qa/tasks/autotest.py create mode 100644 qa/tasks/aver.py create mode 100644 qa/tasks/blktrace.py create mode 100644 qa/tasks/boto.cfg.template create mode 100644 qa/tasks/cbt.py create mode 100644 qa/tasks/ceph.conf.template create mode 100644 qa/tasks/ceph.py create mode 100644 qa/tasks/ceph_client.py create mode 100644 qa/tasks/ceph_deploy.py create mode 100644 qa/tasks/ceph_fuse.py create mode 100644 qa/tasks/ceph_manager.py create mode 100644 qa/tasks/ceph_objectstore_tool.py create mode 100644 qa/tasks/ceph_test_case.py create mode 100644 qa/tasks/cephfs/__init__.py create mode 100644 qa/tasks/cephfs/cephfs_test_case.py create mode 100644 qa/tasks/cephfs/filesystem.py create mode 100644 qa/tasks/cephfs/fuse_mount.py create mode 100644 qa/tasks/cephfs/kernel_mount.py create mode 100644 qa/tasks/cephfs/mount.py create mode 100644 qa/tasks/cephfs/test_admin.py create mode 100644 qa/tasks/cephfs/test_auto_repair.py create mode 100644 qa/tasks/cephfs/test_backtrace.py create mode 100644 qa/tasks/cephfs/test_cap_flush.py create mode 100644 qa/tasks/cephfs/test_cephfs_shell.py create mode 100644 qa/tasks/cephfs/test_client_limits.py create mode 100644 qa/tasks/cephfs/test_client_recovery.py create mode 100644 qa/tasks/cephfs/test_damage.py create mode 100644 qa/tasks/cephfs/test_data_scan.py create mode 100644 qa/tasks/cephfs/test_dump_tree.py create mode 100644 qa/tasks/cephfs/test_exports.py create mode 100644 qa/tasks/cephfs/test_failover.py create mode 100644 qa/tasks/cephfs/test_flush.py create mode 100644 qa/tasks/cephfs/test_forward_scrub.py create mode 100644 qa/tasks/cephfs/test_fragment.py create mode 100644 qa/tasks/cephfs/test_full.py create mode 100644 qa/tasks/cephfs/test_journal_migration.py create mode 100644 qa/tasks/cephfs/test_journal_repair.py create mode 100644 qa/tasks/cephfs/test_mantle.py create mode 100644 qa/tasks/cephfs/test_misc.py create mode 100644 qa/tasks/cephfs/test_openfiletable.py create mode 100644 qa/tasks/cephfs/test_pool_perm.py create mode 100644 qa/tasks/cephfs/test_quota.py create mode 100644 qa/tasks/cephfs/test_readahead.py create mode 100644 qa/tasks/cephfs/test_recovery_pool.py create mode 100644 qa/tasks/cephfs/test_scrub.py create mode 100644 qa/tasks/cephfs/test_scrub_checks.py create mode 100644 qa/tasks/cephfs/test_sessionmap.py create mode 100644 qa/tasks/cephfs/test_snapshots.py create mode 100644 qa/tasks/cephfs/test_strays.py create mode 100644 qa/tasks/cephfs/test_volume_client.py create mode 100644 qa/tasks/cephfs/test_volumes.py create mode 100644 qa/tasks/cephfs_test_runner.py create mode 100644 qa/tasks/cephfs_upgrade_snap.py create mode 100644 qa/tasks/check_counter.py create mode 100644 qa/tasks/cifs_mount.py create mode 100644 qa/tasks/cram.py create mode 100644 qa/tasks/create_verify_lfn_objects.py create mode 100644 qa/tasks/devstack.py create mode 100644 qa/tasks/die_on_err.py create mode 100644 qa/tasks/divergent_priors.py create mode 100644 qa/tasks/divergent_priors2.py create mode 100644 qa/tasks/dnsmasq.py create mode 100644 qa/tasks/dump_stuck.py create mode 100644 qa/tasks/ec_lost_unfound.py create mode 100644 qa/tasks/exec_on_cleanup.py create mode 100644 qa/tasks/filestore_idempotent.py create mode 100644 qa/tasks/fs.py create mode 100644 qa/tasks/kclient.py create mode 100644 qa/tasks/keystone.py create mode 100755 qa/tasks/locktest.py create mode 100644 qa/tasks/logrotate.conf create mode 100644 qa/tasks/lost_unfound.py create mode 100644 qa/tasks/manypools.py create mode 100644 qa/tasks/mds_creation_failure.py create mode 100644 qa/tasks/mds_pre_upgrade.py create mode 100644 qa/tasks/mds_thrash.py create mode 100644 qa/tasks/metadata.yaml create mode 100644 qa/tasks/mgr/__init__.py create mode 100644 qa/tasks/mgr/dashboard/__init__.py create mode 100644 qa/tasks/mgr/dashboard/helper.py create mode 100644 qa/tasks/mgr/dashboard/test_auth.py create mode 100644 qa/tasks/mgr/dashboard/test_cephfs.py create mode 100644 qa/tasks/mgr/dashboard/test_cluster_configuration.py create mode 100644 qa/tasks/mgr/dashboard/test_erasure_code_profile.py create mode 100644 qa/tasks/mgr/dashboard/test_ganesha.py create mode 100644 qa/tasks/mgr/dashboard/test_health.py create mode 100644 qa/tasks/mgr/dashboard/test_host.py create mode 100644 qa/tasks/mgr/dashboard/test_logs.py create mode 100644 qa/tasks/mgr/dashboard/test_mgr_module.py create mode 100644 qa/tasks/mgr/dashboard/test_monitor.py create mode 100644 qa/tasks/mgr/dashboard/test_osd.py create mode 100644 qa/tasks/mgr/dashboard/test_perf_counters.py create mode 100644 qa/tasks/mgr/dashboard/test_pool.py create mode 100644 qa/tasks/mgr/dashboard/test_rbd.py create mode 100644 qa/tasks/mgr/dashboard/test_rbd_mirroring.py create mode 100644 qa/tasks/mgr/dashboard/test_requests.py create mode 100644 qa/tasks/mgr/dashboard/test_rgw.py create mode 100644 qa/tasks/mgr/dashboard/test_role.py create mode 100644 qa/tasks/mgr/dashboard/test_settings.py create mode 100644 qa/tasks/mgr/dashboard/test_summary.py create mode 100644 qa/tasks/mgr/dashboard/test_user.py create mode 100644 qa/tasks/mgr/mgr_test_case.py create mode 100644 qa/tasks/mgr/test_crash.py create mode 100644 qa/tasks/mgr/test_dashboard.py create mode 100644 qa/tasks/mgr/test_failover.py create mode 100644 qa/tasks/mgr/test_insights.py create mode 100644 qa/tasks/mgr/test_module_selftest.py create mode 100644 qa/tasks/mgr/test_orchestrator_cli.py create mode 100644 qa/tasks/mgr/test_progress.py create mode 100644 qa/tasks/mgr/test_prometheus.py create mode 100644 qa/tasks/mgr/test_ssh_orchestrator.py create mode 100644 qa/tasks/mon_clock_skew_check.py create mode 100644 qa/tasks/mon_recovery.py create mode 100644 qa/tasks/mon_thrash.py create mode 100644 qa/tasks/multibench.py create mode 100644 qa/tasks/netem.py create mode 100644 qa/tasks/object_source_down.py create mode 100644 qa/tasks/omapbench.py create mode 100644 qa/tasks/openssl_keys.py create mode 100644 qa/tasks/osd_backfill.py create mode 100644 qa/tasks/osd_failsafe_enospc.py create mode 100644 qa/tasks/osd_max_pg_per_osd.py create mode 100644 qa/tasks/osd_recovery.py create mode 100644 qa/tasks/peer.py create mode 100644 qa/tasks/peering_speed_test.py create mode 100644 qa/tasks/populate_rbd_pool.py create mode 100644 qa/tasks/qemu.py create mode 100644 qa/tasks/rados.py create mode 100644 qa/tasks/radosbench.py create mode 100644 qa/tasks/radosbenchsweep.py create mode 100644 qa/tasks/radosgw_admin.py create mode 100644 qa/tasks/radosgw_admin_rest.py create mode 100644 qa/tasks/ragweed.py create mode 100644 qa/tasks/rbd.py create mode 100644 qa/tasks/rbd_fio.py create mode 100644 qa/tasks/rbd_fsx.py create mode 100644 qa/tasks/rbd_mirror.py create mode 100644 qa/tasks/rbd_mirror_thrash.py create mode 100644 qa/tasks/rebuild_mondb.py create mode 100644 qa/tasks/reg11184.py create mode 100644 qa/tasks/rep_lost_unfound_delete.py create mode 100644 qa/tasks/repair_test.py create mode 100644 qa/tasks/resolve_stuck_peering.py create mode 100644 qa/tasks/restart.py create mode 100644 qa/tasks/rgw.py create mode 100644 qa/tasks/rgw_logsocket.py create mode 120000 qa/tasks/rgw_multi create mode 100644 qa/tasks/rgw_multisite.py create mode 100644 qa/tasks/rgw_multisite_tests.py create mode 100644 qa/tasks/s3a_hadoop.py create mode 100644 qa/tasks/s3readwrite.py create mode 100644 qa/tasks/s3roundtrip.py create mode 100644 qa/tasks/s3tests.py create mode 100644 qa/tasks/samba.py create mode 100644 qa/tasks/scrub.py create mode 100644 qa/tasks/scrub_test.py create mode 100644 qa/tasks/swift.py create mode 100644 qa/tasks/systemd.py create mode 100644 qa/tasks/tempest.py create mode 100644 qa/tasks/tests/__init__.py create mode 100644 qa/tasks/tests/test_devstack.py create mode 100644 qa/tasks/tests/test_radosgw_admin.py create mode 100644 qa/tasks/teuthology_integration.py create mode 100644 qa/tasks/tgt.py create mode 100644 qa/tasks/thrash_pool_snaps.py create mode 100644 qa/tasks/thrashosds-health.yaml create mode 100644 qa/tasks/thrashosds.py create mode 100644 qa/tasks/tox.py create mode 100644 qa/tasks/userdata_setup.yaml create mode 100644 qa/tasks/userdata_teardown.yaml create mode 100644 qa/tasks/util/__init__.py create mode 100644 qa/tasks/util/rados.py create mode 100644 qa/tasks/util/rgw.py create mode 100644 qa/tasks/util/test/__init__.py create mode 100644 qa/tasks/util/test/test_rados.py create mode 100644 qa/tasks/util/workunit.py create mode 100644 qa/tasks/vstart_runner.py create mode 100644 qa/tasks/watch_notify_same_primary.py create mode 100644 qa/tasks/watch_notify_stress.py create mode 100644 qa/tasks/workunit.py create mode 100644 qa/timezone/eastern.yaml create mode 100644 qa/timezone/pacific.yaml create mode 100644 qa/timezone/random.yaml create mode 100644 qa/tox.ini create mode 100644 qa/valgrind.supp create mode 100644 qa/workunits/Makefile create mode 100755 qa/workunits/caps/mon_commands.sh create mode 100755 qa/workunits/ceph-helpers-root.sh create mode 100755 qa/workunits/ceph-tests/ceph-admin-commands.sh create mode 100755 qa/workunits/cephtool/test.sh create mode 100755 qa/workunits/cephtool/test_daemon.sh create mode 100755 qa/workunits/cephtool/test_kvstore_tool.sh create mode 100755 qa/workunits/cls/test_cls_hello.sh create mode 100755 qa/workunits/cls/test_cls_journal.sh create mode 100755 qa/workunits/cls/test_cls_lock.sh create mode 100755 qa/workunits/cls/test_cls_log.sh create mode 100755 qa/workunits/cls/test_cls_numops.sh create mode 100755 qa/workunits/cls/test_cls_rbd.sh create mode 100755 qa/workunits/cls/test_cls_refcount.sh create mode 100755 qa/workunits/cls/test_cls_rgw.sh create mode 100755 qa/workunits/cls/test_cls_sdk.sh create mode 100644 qa/workunits/direct_io/.gitignore create mode 100644 qa/workunits/direct_io/Makefile create mode 100755 qa/workunits/direct_io/big.sh create mode 100644 qa/workunits/direct_io/direct_io_test.c create mode 100755 qa/workunits/direct_io/misc.sh create mode 100644 qa/workunits/direct_io/test_short_dio_read.c create mode 100644 qa/workunits/direct_io/test_sync_io.c create mode 100644 qa/workunits/erasure-code/.gitignore create mode 100644 qa/workunits/erasure-code/bench.html create mode 100755 qa/workunits/erasure-code/bench.sh create mode 100755 qa/workunits/erasure-code/encode-decode-non-regression.sh create mode 100644 qa/workunits/erasure-code/examples.css create mode 100644 qa/workunits/erasure-code/jquery.flot.categories.js create mode 100644 qa/workunits/erasure-code/jquery.flot.js create mode 100644 qa/workunits/erasure-code/jquery.js create mode 100644 qa/workunits/erasure-code/plot.js create mode 100644 qa/workunits/false.sh create mode 100644 qa/workunits/fs/.gitignore create mode 100644 qa/workunits/fs/Makefile create mode 100755 qa/workunits/fs/misc/acl.sh create mode 100755 qa/workunits/fs/misc/chmod.sh create mode 100755 qa/workunits/fs/misc/direct_io.py create mode 100755 qa/workunits/fs/misc/dirfrag.sh create mode 100755 qa/workunits/fs/misc/filelock_deadlock.py create mode 100755 qa/workunits/fs/misc/filelock_interrupt.py create mode 100755 qa/workunits/fs/misc/i_complete_vs_rename.sh create mode 100755 qa/workunits/fs/misc/layout_vxattrs.sh create mode 100755 qa/workunits/fs/misc/mkpool_layout_vxattrs.sh create mode 100755 qa/workunits/fs/misc/multiple_rsync.sh create mode 100755 qa/workunits/fs/misc/rstats.sh create mode 100755 qa/workunits/fs/misc/subvolume.sh create mode 100755 qa/workunits/fs/misc/trivial_sync.sh create mode 100755 qa/workunits/fs/misc/xattrs.sh create mode 100755 qa/workunits/fs/multiclient_sync_read_eof.py create mode 100755 qa/workunits/fs/norstats/kernel_untar_tar.sh create mode 100755 qa/workunits/fs/quota/quota.sh create mode 100755 qa/workunits/fs/snap-hierarchy.sh create mode 100755 qa/workunits/fs/snaps/snap-rm-diff.sh create mode 100755 qa/workunits/fs/snaps/snaptest-0.sh create mode 100755 qa/workunits/fs/snaps/snaptest-1.sh create mode 100755 qa/workunits/fs/snaps/snaptest-2.sh create mode 100755 qa/workunits/fs/snaps/snaptest-authwb.sh create mode 100755 qa/workunits/fs/snaps/snaptest-capwb.sh create mode 100755 qa/workunits/fs/snaps/snaptest-dir-rename.sh create mode 100755 qa/workunits/fs/snaps/snaptest-double-null.sh create mode 100755 qa/workunits/fs/snaps/snaptest-estale.sh create mode 100755 qa/workunits/fs/snaps/snaptest-git-ceph.sh create mode 100755 qa/workunits/fs/snaps/snaptest-hardlink.sh create mode 100755 qa/workunits/fs/snaps/snaptest-intodir.sh create mode 100755 qa/workunits/fs/snaps/snaptest-multiple-capsnaps.sh create mode 100755 qa/workunits/fs/snaps/snaptest-parents.sh create mode 100755 qa/workunits/fs/snaps/snaptest-realm-split.sh create mode 100755 qa/workunits/fs/snaps/snaptest-snap-rename.sh create mode 100755 qa/workunits/fs/snaps/snaptest-snap-rm-cmp.sh create mode 100755 qa/workunits/fs/snaps/snaptest-upchildrealms.sh create mode 100755 qa/workunits/fs/snaps/snaptest-xattrwb.sh create mode 100755 qa/workunits/fs/snaps/untar_snap_rm.sh create mode 100644 qa/workunits/fs/test_o_trunc.c create mode 100755 qa/workunits/fs/test_o_trunc.sh create mode 100755 qa/workunits/fs/test_python.sh create mode 100755 qa/workunits/fs/upgrade/volume_client create mode 100755 qa/workunits/hadoop/repl.sh create mode 100755 qa/workunits/hadoop/terasort.sh create mode 100755 qa/workunits/hadoop/wordcount.sh create mode 100755 qa/workunits/kernel_untar_build.sh create mode 100755 qa/workunits/libcephfs/test.sh create mode 100755 qa/workunits/mgr/test_localpool.sh create mode 100755 qa/workunits/mon/auth_caps.sh create mode 100644 qa/workunits/mon/caps.py create mode 100755 qa/workunits/mon/caps.sh create mode 100755 qa/workunits/mon/config.sh create mode 100755 qa/workunits/mon/crush_ops.sh create mode 100755 qa/workunits/mon/osd.sh create mode 100755 qa/workunits/mon/pg_autoscaler.sh create mode 100755 qa/workunits/mon/ping.py create mode 100755 qa/workunits/mon/pool_ops.sh create mode 100755 qa/workunits/mon/rbd_snaps_ops.sh create mode 100755 qa/workunits/mon/test_config_key_caps.sh create mode 100755 qa/workunits/mon/test_mon_config_key.py create mode 100755 qa/workunits/mon/test_mon_osdmap_prune.sh create mode 100755 qa/workunits/objectstore/test_fuse.sh create mode 100755 qa/workunits/osdc/stress_objectcacher.sh create mode 100755 qa/workunits/post-file.sh create mode 100755 qa/workunits/rados/clone.sh create mode 100755 qa/workunits/rados/load-gen-big.sh create mode 100755 qa/workunits/rados/load-gen-mix-small-long.sh create mode 100755 qa/workunits/rados/load-gen-mix-small.sh create mode 100755 qa/workunits/rados/load-gen-mix.sh create mode 100755 qa/workunits/rados/load-gen-mostlyread.sh create mode 100755 qa/workunits/rados/stress_watch.sh create mode 100755 qa/workunits/rados/test.sh create mode 100755 qa/workunits/rados/test_alloc_hint.sh create mode 100755 qa/workunits/rados/test_cache_pool.sh create mode 100755 qa/workunits/rados/test_crash.sh create mode 100755 qa/workunits/rados/test_dedup_tool.sh create mode 100755 qa/workunits/rados/test_envlibrados_for_rocksdb.sh create mode 100755 qa/workunits/rados/test_hang.sh create mode 100755 qa/workunits/rados/test_health_warnings.sh create mode 100755 qa/workunits/rados/test_large_omap_detection.py create mode 100755 qa/workunits/rados/test_librados_build.sh create mode 100755 qa/workunits/rados/test_pool_access.sh create mode 100755 qa/workunits/rados/test_pool_quota.sh create mode 100755 qa/workunits/rados/test_python.sh create mode 100755 qa/workunits/rados/test_rados_timeouts.sh create mode 100755 qa/workunits/rados/test_rados_tool.sh create mode 100755 qa/workunits/rbd/cli_generic.sh create mode 100755 qa/workunits/rbd/concurrent.sh create mode 100755 qa/workunits/rbd/diff.sh create mode 100755 qa/workunits/rbd/diff_continuous.sh create mode 100755 qa/workunits/rbd/huge-tickets.sh create mode 100755 qa/workunits/rbd/image_read.sh create mode 100755 qa/workunits/rbd/import_export.sh create mode 100755 qa/workunits/rbd/issue-20295.sh create mode 100755 qa/workunits/rbd/journal.sh create mode 100755 qa/workunits/rbd/kernel.sh create mode 100755 qa/workunits/rbd/krbd_data_pool.sh create mode 100755 qa/workunits/rbd/krbd_exclusive_option.sh create mode 100755 qa/workunits/rbd/krbd_fallocate.sh create mode 100755 qa/workunits/rbd/krbd_latest_osdmap_on_map.sh create mode 100755 qa/workunits/rbd/krbd_namespaces.sh create mode 100755 qa/workunits/rbd/krbd_stable_writes.sh create mode 100755 qa/workunits/rbd/krbd_udev_enumerate.sh create mode 100755 qa/workunits/rbd/krbd_udev_netlink_enobufs.sh create mode 100755 qa/workunits/rbd/krbd_udev_netns.sh create mode 100755 qa/workunits/rbd/krbd_udev_symlinks.sh create mode 100755 qa/workunits/rbd/map-snapshot-io.sh create mode 100755 qa/workunits/rbd/map-unmap.sh create mode 100755 qa/workunits/rbd/merge_diff.sh create mode 100755 qa/workunits/rbd/notify_master.sh create mode 100755 qa/workunits/rbd/notify_slave.sh create mode 100755 qa/workunits/rbd/permissions.sh create mode 100755 qa/workunits/rbd/qemu-iotests.sh create mode 100755 qa/workunits/rbd/qemu_dynamic_features.sh create mode 100755 qa/workunits/rbd/qemu_rebuild_object_map.sh create mode 100755 qa/workunits/rbd/rbd-ggate.sh create mode 100755 qa/workunits/rbd/rbd-nbd.sh create mode 100755 qa/workunits/rbd/rbd_groups.sh create mode 100755 qa/workunits/rbd/rbd_mirror.sh create mode 100755 qa/workunits/rbd/rbd_mirror_bootstrap.sh create mode 100755 qa/workunits/rbd/rbd_mirror_fsx_compare.sh create mode 100755 qa/workunits/rbd/rbd_mirror_fsx_prepare.sh create mode 100755 qa/workunits/rbd/rbd_mirror_ha.sh create mode 100755 qa/workunits/rbd/rbd_mirror_helpers.sh create mode 100755 qa/workunits/rbd/rbd_mirror_stress.sh create mode 100755 qa/workunits/rbd/read-flags.sh create mode 100755 qa/workunits/rbd/simple_big.sh create mode 100755 qa/workunits/rbd/test_admin_socket.sh create mode 100755 qa/workunits/rbd/test_librbd.sh create mode 100755 qa/workunits/rbd/test_librbd_python.sh create mode 100755 qa/workunits/rbd/test_lock_fence.sh create mode 100755 qa/workunits/rbd/test_rbd_mirror.sh create mode 100755 qa/workunits/rbd/test_rbd_tasks.sh create mode 100755 qa/workunits/rbd/test_rbdmap_RBDMAPFILE.sh create mode 100755 qa/workunits/rbd/verify_pool.sh create mode 100755 qa/workunits/rename/all.sh create mode 100755 qa/workunits/rename/dir_pri_nul.sh create mode 100755 qa/workunits/rename/dir_pri_pri.sh create mode 100644 qa/workunits/rename/plan.txt create mode 100755 qa/workunits/rename/prepare.sh create mode 100755 qa/workunits/rename/pri_nul.sh create mode 100755 qa/workunits/rename/pri_pri.sh create mode 100755 qa/workunits/rename/pri_rem.sh create mode 100755 qa/workunits/rename/rem_nul.sh create mode 100755 qa/workunits/rename/rem_pri.sh create mode 100755 qa/workunits/rename/rem_rem.sh create mode 100755 qa/workunits/rest/test-restful.sh create mode 100755 qa/workunits/rest/test_mgr_rest_api.py create mode 100755 qa/workunits/restart/test-backtraces.py create mode 100755 qa/workunits/rgw/run-s3tests.sh create mode 100755 qa/workunits/rgw/s3_bucket_quota.pl create mode 100755 qa/workunits/rgw/s3_multipart_upload.pl create mode 100755 qa/workunits/rgw/s3_user_quota.pl create mode 100644 qa/workunits/rgw/s3_utilities.pm create mode 100755 qa/workunits/rgw/test_rgw_orphan_list.sh create mode 100755 qa/workunits/suites/blogbench.sh create mode 100755 qa/workunits/suites/bonnie.sh create mode 100755 qa/workunits/suites/cephfs_journal_tool_smoke.sh create mode 100755 qa/workunits/suites/dbench-short.sh create mode 100755 qa/workunits/suites/dbench.sh create mode 100644 qa/workunits/suites/ffsb.patch create mode 100755 qa/workunits/suites/ffsb.sh create mode 100755 qa/workunits/suites/fio.sh create mode 100755 qa/workunits/suites/fsstress.sh create mode 100755 qa/workunits/suites/fsx.sh create mode 100755 qa/workunits/suites/fsync-tester.sh create mode 100755 qa/workunits/suites/iogen.sh create mode 100755 qa/workunits/suites/iozone-sync.sh create mode 100755 qa/workunits/suites/iozone.sh create mode 100755 qa/workunits/suites/pjd.sh create mode 100644 qa/workunits/suites/random_write.32.ffsb create mode 100755 qa/workunits/suites/wac.sh create mode 100755 qa/workunits/true.sh (limited to 'qa') diff --git a/qa/.gitignore b/qa/.gitignore new file mode 100644 index 00000000..e80d9d42 --- /dev/null +++ b/qa/.gitignore @@ -0,0 +1,4 @@ +*~ +.*.sw[nmop] +*.pyc +.tox diff --git a/qa/.teuthology_branch b/qa/.teuthology_branch new file mode 100644 index 00000000..1f7391f9 --- /dev/null +++ b/qa/.teuthology_branch @@ -0,0 +1 @@ +master diff --git a/qa/Makefile b/qa/Makefile new file mode 100644 index 00000000..ad655b7e --- /dev/null +++ b/qa/Makefile @@ -0,0 +1,4 @@ +DIRS= workunits btrfs + +all: + for d in $(DIRS) ; do ( cd $$d ; $(MAKE) all ) ; done diff --git a/qa/README b/qa/README new file mode 100644 index 00000000..6b3ec220 --- /dev/null +++ b/qa/README @@ -0,0 +1,64 @@ +ceph-qa-suite +------------- + +clusters/ - some predefined cluster layouts +suites/ - set suite + +The suites directory has a hierarchical collection of tests. This can be +freeform, but generally follows the convention of + + suites///... + +A test is described by a yaml fragment. + +A test can exist as a single .yaml file in the directory tree. For example: + + suites/foo/one.yaml + suites/foo/two.yaml + +is a simple group of two tests. + +A directory with a magic '+' file represents a test that combines all +other items in the directory into a single yaml fragment. For example: + + suites/foo/bar/+ + suites/foo/bar/a.yaml + suites/foo/bar/b.yaml + suites/foo/bar/c.yaml + +is a single test consisting of a + b + c. + +A directory with a magic '%' file represents a test matrix formed from +all other items in the directory. For example, + + suites/baz/% + suites/baz/a.yaml + suites/baz/b/b1.yaml + suites/baz/b/b2.yaml + suites/baz/c.yaml + suites/baz/d/d1.yaml + suites/baz/d/d2.yaml + +is a 4-dimensional test matrix. Two dimensions (a, c) are trivial (1 +item), so this is really 2x2 = 4 tests, which are + + a + b1 + c + d1 + a + b1 + c + d2 + a + b2 + c + d1 + a + b2 + c + d2 + +A directory with a magic '$' file represents a test where one of the other +items is chosen randomly. For example, + +suites/foo/$ +suites/foo/a.yaml +suites/foo/b.yaml +suites/foo/c.yaml + +is a single test. It will be either a.yaml, b.yaml or c.yaml. This can be +used in conjunction with the '%' file in other directories to run a series of +tests without causing an unwanted increase in the total number of jobs run. + +Symlinks are okay. + +The teuthology code can be found in https://github.com/ceph/teuthology.git diff --git a/qa/archs/aarch64.yaml b/qa/archs/aarch64.yaml new file mode 100644 index 00000000..6399b995 --- /dev/null +++ b/qa/archs/aarch64.yaml @@ -0,0 +1 @@ +arch: aarch64 diff --git a/qa/archs/armv7.yaml b/qa/archs/armv7.yaml new file mode 100644 index 00000000..c261ebd5 --- /dev/null +++ b/qa/archs/armv7.yaml @@ -0,0 +1 @@ +arch: armv7l diff --git a/qa/archs/i686.yaml b/qa/archs/i686.yaml new file mode 100644 index 00000000..a920e5a9 --- /dev/null +++ b/qa/archs/i686.yaml @@ -0,0 +1 @@ +arch: i686 diff --git a/qa/archs/x86_64.yaml b/qa/archs/x86_64.yaml new file mode 100644 index 00000000..c2409f5d --- /dev/null +++ b/qa/archs/x86_64.yaml @@ -0,0 +1 @@ +arch: x86_64 diff --git a/qa/btrfs/.gitignore b/qa/btrfs/.gitignore new file mode 100644 index 00000000..530c1b5b --- /dev/null +++ b/qa/btrfs/.gitignore @@ -0,0 +1,3 @@ +/clone_range +/test_async_snap +/create_async_snap diff --git a/qa/btrfs/Makefile b/qa/btrfs/Makefile new file mode 100644 index 00000000..be95ecfd --- /dev/null +++ b/qa/btrfs/Makefile @@ -0,0 +1,11 @@ +CFLAGS = -Wall -Wextra -D_GNU_SOURCE + +TARGETS = clone_range test_async_snap create_async_snap + +.c: + $(CC) $(CFLAGS) $@.c -o $@ + +all: $(TARGETS) + +clean: + rm $(TARGETS) diff --git a/qa/btrfs/clone_range.c b/qa/btrfs/clone_range.c new file mode 100644 index 00000000..0a88e160 --- /dev/null +++ b/qa/btrfs/clone_range.c @@ -0,0 +1,35 @@ +#include +#include +#include +#include + +#include +#include "../../src/os/btrfs_ioctl.h" +#include +#include + +int main(int argc, char **argv) +{ + struct btrfs_ioctl_clone_range_args ca; + int dfd; + int r; + + if (argc < 6) { + printf("usage: %s \n", argv[0]); + exit(1); + } + + ca.src_fd = open(argv[1], O_RDONLY); + ca.src_offset = atoi(argv[2]); + ca.src_length = atoi(argv[3]); + dfd = open(argv[4], O_WRONLY|O_CREAT); + ca.dest_offset = atoi(argv[5]); + + r = ioctl(dfd, BTRFS_IOC_CLONE_RANGE, &ca); + printf("clone_range %s %lld %lld~%lld to %s %d %lld = %d %s\n", + argv[1], ca.src_fd, + ca.src_offset, ca.src_length, + argv[4], dfd, + ca.dest_offset, r, strerror(errno)); + return r; +} diff --git a/qa/btrfs/create_async_snap.c b/qa/btrfs/create_async_snap.c new file mode 100644 index 00000000..2ef22af7 --- /dev/null +++ b/qa/btrfs/create_async_snap.c @@ -0,0 +1,34 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include "../../src/os/btrfs_ioctl.h" + +struct btrfs_ioctl_vol_args_v2 va; + +int main(int argc, char **argv) +{ + int fd; + int r; + + if (argc != 3) { + printf("usage: %s \n", argv[0]); + return 1; + } + printf("creating snap ./%s from %s\n", argv[2], argv[1]); + fd = open(".", O_RDONLY); + va.fd = open(argv[1], O_RDONLY); + va.flags = BTRFS_SUBVOL_CREATE_ASYNC; + strcpy(va.name, argv[2]); + r = ioctl(fd, BTRFS_IOC_SNAP_CREATE_V2, (unsigned long long)&va); + printf("result %d\n", r ? -errno:0); + return r; +} diff --git a/qa/btrfs/test_async_snap.c b/qa/btrfs/test_async_snap.c new file mode 100644 index 00000000..211be95a --- /dev/null +++ b/qa/btrfs/test_async_snap.c @@ -0,0 +1,83 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include "../../src/os/btrfs_ioctl.h" + +struct btrfs_ioctl_vol_args_v2 va; +struct btrfs_ioctl_vol_args vold; +int max = 4; + +void check_return(int r) +{ + if (r < 0) { + printf("********* failed with %d %s ********\n", errno, strerror(errno)); + exit(1); + } +} + +int main(int argc, char **argv) +{ + int num = 1000; + + if (argc > 1) + num = atoi(argv[1]); + printf("will do %d iterations\n", num); + + int cwd = open(".", O_RDONLY); + printf("cwd = %d\n", cwd); + while (num-- > 0) { + if (rand() % 10 == 0) { + __u64 transid; + int r; + printf("sync starting\n"); + r = ioctl(cwd, BTRFS_IOC_START_SYNC, &transid); + check_return(r); + printf("sync started, transid %lld, waiting\n", transid); + r = ioctl(cwd, BTRFS_IOC_WAIT_SYNC, &transid); + check_return(r); + printf("sync finished\n"); + } + + int i = rand() % max; + struct stat st; + va.fd = cwd; + sprintf(va.name, "test.%d", i); + va.transid = 0; + int r = stat(va.name, &st); + if (r < 0) { + if (rand() % 3 == 0) { + printf("snap create (sync) %s\n", va.name); + va.flags = 0; + r = ioctl(cwd, BTRFS_IOC_SNAP_CREATE_V2, &va); + check_return(r); + } else { + printf("snap create (async) %s\n", va.name); + va.flags = BTRFS_SUBVOL_CREATE_ASYNC; + r = ioctl(cwd, BTRFS_IOC_SNAP_CREATE_V2, &va); + check_return(r); + printf("snap created, transid %lld\n", va.transid); + if (rand() % 2 == 0) { + printf("waiting for async snap create\n"); + r = ioctl(cwd, BTRFS_IOC_WAIT_SYNC, &va.transid); + check_return(r); + } + } + } else { + printf("snap remove %s\n", va.name); + vold.fd = va.fd; + strcpy(vold.name, va.name); + r = ioctl(cwd, BTRFS_IOC_SNAP_DESTROY, &vold); + check_return(r); + } + } + return 0; +} diff --git a/qa/btrfs/test_rmdir_async_snap.c b/qa/btrfs/test_rmdir_async_snap.c new file mode 100644 index 00000000..5dafaaca --- /dev/null +++ b/qa/btrfs/test_rmdir_async_snap.c @@ -0,0 +1,62 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include "../../src/os/btrfs_ioctl.h" + +struct btrfs_ioctl_vol_args_v2 va; +struct btrfs_ioctl_vol_args vold; + +int main(int argc, char **argv) +{ + int num = 1000; + int i, r, fd; + char buf[30]; + + if (argc > 1) + num = atoi(argv[1]); + printf("will do %d iterations\n", num); + + fd = open(".", O_RDONLY); + vold.fd = 0; + strcpy(vold.name, "current"); + r = ioctl(fd, BTRFS_IOC_SUBVOL_CREATE, (unsigned long int)&vold); + printf("create current ioctl got %d\n", r ? errno:0); + if (r) + return 1; + + for (i=0; i $mnt/sub/file +client_umount + +mkdir -p $mnt/1 +mkdir -p $mnt/2 +/bin/mount -t ceph $monhost:/sub $mnt/1 +grep sub $mnt/1/file + +/bin/mount -t ceph $monhost:/ $mnt/2 +grep sub $mnt/2/sub/file + +/bin/umount $mnt/1 +grep sub $mnt/2/sub/file + +/bin/umount $mnt/2 diff --git a/qa/client/common.sh b/qa/client/common.sh new file mode 100644 index 00000000..d06368e6 --- /dev/null +++ b/qa/client/common.sh @@ -0,0 +1,58 @@ + +# defaults +[ -z "$bindir" ] && bindir=$PWD # location of init-ceph +[ -z "$conf" ] && conf="$basedir/ceph.conf" +[ -z "$mnt" ] && mnt="/c" +[ -z "$monhost" ] && monhost="cosd0" + +set -e + +mydir=`hostname`_`echo $0 | sed 's/\//_/g'` + +client_mount() +{ + /bin/mount -t ceph $monhost:/ $mnt +} + +client_umount() +{ + /bin/umount $mnt + # look for VFS complaints + if dmesg | tail -n 50 | grep -c "VFS: Busy inodes" ; then + echo "looks like we left inodes pinned" + exit 1 + fi +} + +ceph_start() +{ + $bindir/init-ceph -c $conf start ${1} +} + +ceph_stop() +{ + $bindir/init-ceph -c $conf stop ${1} +} + +ceph_restart() +{ + $bindir/init-ceph -c $conf restart ${1} +} + +ceph_command() +{ + $bindir/ceph -c $conf $* +} + +client_enter_mydir() +{ + pushd . + test -d $mnt/$mydir && rm -r $mnt/$mydir + mkdir $mnt/$mydir + cd $mnt/$mydir +} + +client_leave_mydir() +{ + popd +} diff --git a/qa/client/gen-1774.sh b/qa/client/gen-1774.sh new file mode 100644 index 00000000..3ee5bc90 --- /dev/null +++ b/qa/client/gen-1774.sh @@ -0,0 +1,2068 @@ +#!/usr/bin/env bash +set -e + +mount () { :; } +umount () { :; } + +list="\ +abiword.control +abiword.list +abiword-plugin-latex.control +abiword-plugin-latex.list +abiword-plugin-opendocument.control +abiword-plugin-opendocument.list +abiword-plugin-openxml.control +abiword-plugin-openxml.list +abiword-plugin-pdf.control +abiword-plugin-pdf.list +abiword-plugin-wikipedia.control +abiword-plugin-wikipedia.list +abiword.postinst +aceofpenguins.control +aceofpenguins-launcher.control +aceofpenguins-launcher.list +aceofpenguins.list +aceofpenguins.postinst +alsa-conf-base.control +alsa-conf-base.list +alsa-scenarii-shr.conffiles +alsa-scenarii-shr.control +alsa-scenarii-shr.list +alsa-utils-alsactl.control +alsa-utils-alsactl.list +alsa-utils-alsamixer.control +alsa-utils-alsamixer.list +alsa-utils-amixer.control +alsa-utils-amixer.list +alsa-utils-aplay.control +alsa-utils-aplay.list +angstrom-libc-fixup-hack.control +angstrom-libc-fixup-hack.list +angstrom-libc-fixup-hack.postinst +apmd.control +apmd.list +apmd.postinst +apmd.postrm +apmd.prerm +aspell.control +aspell.list +atd-over-fso.control +atd-over-fso.list +atd-over-fso.postinst +atd-over-fso.postrm +atd-over-fso.prerm +base-files.conffiles +base-files.control +base-files.list +base-passwd.control +base-passwd.list +base-passwd.postinst +bash.control +bash.list +bash.postinst +bluez4.control +bluez4.list +bluez4.postinst +bluez4.postrm +bluez4.prerm +boost-signals.control +boost-signals.list +boost-signals.postinst +busybox.control +busybox.list +busybox-mountall.control +busybox-mountall.list +busybox-mountall.postinst +busybox-mountall.prerm +busybox.postinst +busybox.prerm +busybox-syslog.conffiles +busybox-syslog.control +busybox-syslog.list +busybox-syslog.postinst +busybox-syslog.postrm +busybox-syslog.prerm +ca-certificates.conffiles +ca-certificates.control +ca-certificates.list +ca-certificates.postinst +calc.control +calc.list +connman.control +connman.list +connman-plugin-udhcp.control +connman-plugin-udhcp.list +connman-plugin-wifi.control +connman-plugin-wifi.list +connman.postinst +connman.postrm +connman.prerm +connman-scripts.control +connman-scripts.list +cpio.control +cpio.list +cpio.postinst +cpio.prerm +cpp.control +cpp.list +cpp-symlinks.control +cpp-symlinks.list +cron.control +cron.list +cron.postinst +cron.postrm +cron.prerm +curl.control +curl.list +dbus.conffiles +dbus.control +dbus-daemon-proxy.control +dbus-daemon-proxy.list +dbus-hlid.control +dbus-hlid.list +dbus.list +dbus.postinst +dbus.postrm +dbus.prerm +dbus-x11.control +dbus-x11.list +devmem2.control +devmem2.list +distro-feed-configs.conffiles +distro-feed-configs.control +distro-feed-configs.list +dosfstools.control +dosfstools.list +e2fsprogs-badblocks.control +e2fsprogs-badblocks.list +e2fsprogs.control +e2fsprogs-e2fsck.control +e2fsprogs-e2fsck.list +e2fsprogs-e2fsck.postinst +e2fsprogs-e2fsck.prerm +e2fsprogs.list +e2fsprogs-mke2fs.control +e2fsprogs-mke2fs.list +e2fsprogs-mke2fs.postinst +e2fsprogs-mke2fs.prerm +e2fsprogs.postinst +e2fsprogs.prerm +ecore-con.control +ecore-con.list +ecore-con.postinst +ecore.control +ecore-evas.control +ecore-evas.list +ecore-evas.postinst +ecore-fb.control +ecore-fb.list +ecore-fb.postinst +ecore-file.control +ecore-file.list +ecore-file.postinst +ecore-imf.control +ecore-imf-evas.control +ecore-imf-evas.list +ecore-imf-evas.postinst +ecore-imf.list +ecore-imf.postinst +ecore-input.control +ecore-input.list +ecore-input.postinst +ecore-ipc.control +ecore-ipc.list +ecore-ipc.postinst +ecore.list +ecore.postinst +ecore-x.control +ecore-x.list +ecore-x.postinst +edbus.control +edbus.list +edbus.postinst +edje.control +edje.list +edje.postinst +edje-utils.control +edje-utils.list +efreet.control +efreet.list +efreet.postinst +eggdbus.control +eggdbus.list +eggdbus.postinst +eglibc-binary-localedata-en-us.control +eglibc-binary-localedata-en-us.list +eglibc-charmap-utf-8.control +eglibc-charmap-utf-8.list +eglibc-gconv.control +eglibc-gconv-cp1252.control +eglibc-gconv-cp1252.list +eglibc-gconv-ibm850.control +eglibc-gconv-ibm850.list +eglibc-gconv-iso8859-15.control +eglibc-gconv-iso8859-15.list +eglibc-gconv-iso8859-1.control +eglibc-gconv-iso8859-1.list +eglibc-gconv.list +eglibc-localedata-i18n.control +eglibc-localedata-i18n.list +eglibc-localedata-iso14651-t1-common.control +eglibc-localedata-iso14651-t1-common.list +eglibc-localedata-iso14651-t1.control +eglibc-localedata-iso14651-t1.list +eglibc-localedata-translit-circle.control +eglibc-localedata-translit-circle.list +eglibc-localedata-translit-cjk-compat.control +eglibc-localedata-translit-cjk-compat.list +eglibc-localedata-translit-compat.control +eglibc-localedata-translit-compat.list +eglibc-localedata-translit-font.control +eglibc-localedata-translit-font.list +eglibc-localedata-translit-fraction.control +eglibc-localedata-translit-fraction.list +eglibc-localedata-translit-narrow.control +eglibc-localedata-translit-narrow.list +eglibc-localedata-translit-neutral.control +eglibc-localedata-translit-neutral.list +eglibc-localedata-translit-small.control +eglibc-localedata-translit-small.list +eglibc-localedata-translit-wide.control +eglibc-localedata-translit-wide.list +eglibc-utils.control +eglibc-utils.list +eina.control +eina.list +eina.postinst +eject.control +eject.list +elementary-theme-gry.control +elementary-theme-gry.list +emacs-x11.control +emacs-x11.list +embryo.control +embryo.list +embryo.postinst +embryo-tests.control +embryo-tests.list +enchant.control +enchant.list +enchant.postinst +epdfview.control +epdfview.list +espeak.control +espeak.list +espeak.postinst +evas.control +evas-engine-buffer.control +evas-engine-buffer.list +evas-engine-fb.control +evas-engine-fb.list +evas-engine-software-16.control +evas-engine-software-16.list +evas-engine-software-16-x11.control +evas-engine-software-16-x11.list +evas-engine-software-generic.control +evas-engine-software-generic.list +evas-engine-software-x11.control +evas-engine-software-x11.list +evas-engine-xrender-x11.control +evas-engine-xrender-x11.list +evas.list +evas-loader-eet.control +evas-loader-eet.list +evas-loader-jpeg.control +evas-loader-jpeg.list +evas-loader-png.control +evas-loader-png.list +evas.postinst +evas-saver-eet.control +evas-saver-eet.list +evas-saver-jpeg.control +evas-saver-jpeg.list +evas-saver-png.control +evas-saver-png.list +evtest.control +evtest.list +e-wm-config-default.control +e-wm-config-default.list +e-wm-config-illume2-shr.control +e-wm-config-illume2-shr.list +e-wm-config-illume-shr.control +e-wm-config-illume-shr.list +e-wm.control +e-wm-icons.control +e-wm-icons.list +e-wm-images.control +e-wm-images.list +e-wm-input-methods.control +e-wm-input-methods.list +e-wm.list +e-wm-menu-shr.control +e-wm-menu-shr.list +e-wm-other.control +e-wm-other.list +e-wm.postinst +e-wm.postrm +e-wm-sysactions-shr.control +e-wm-sysactions-shr.list +e-wm-theme-default.control +e-wm-theme-default.list +e-wm-theme-illume-gry.control +e-wm-theme-illume-gry.list +e-wm-theme-illume-shr.control +e-wm-theme-illume-shr.list +e-wm-utils.control +e-wm-utils.list +fbreader.control +fbreader.list +fbreader.postinst +fbset.control +fbset.list +fbset-modes.conffiles +fbset-modes.control +fbset-modes.list +fbset.postinst +fbset.postrm +ffalarms.control +ffalarms.list +file.control +file.list +file.postinst +findutils.control +findutils.list +findutils.postinst +findutils.prerm +flac.control +flac.list +flite.control +flite.list +fontconfig-utils.control +fontconfig-utils.list +font-update-common.control +font-update-common.list +frameworkd-config-shr.conffiles +frameworkd-config-shr.control +frameworkd-config-shr.list +frameworkd.control +frameworkd.list +frameworkd.postinst +frameworkd.postrm +frameworkd.prerm +fso-abyss-config.conffiles +fso-abyss-config.control +fso-abyss-config.list +fso-abyss.control +fso-abyss.list +fso-apm.control +fso-apm.list +fsodatad.control +fsodatad.list +fsodatad.postinst +fsodeviced.control +fsodeviced.list +fsodeviced.postinst +fsodeviced.postrm +fsodeviced.prerm +fso-gpsd.control +fso-gpsd.list +fso-gpsd.postinst +fso-gpsd.postrm +fso-gpsd.prerm +fsogsmd.control +fsogsmd.list +fsogsmd.postinst +fsonetworkd.control +fsonetworkd.list +fsonetworkd.postinst +fsoraw.control +fsoraw.list +fsotdld.control +fsotdld.list +fsotdld.postinst +fsousaged.control +fsousaged.list +fsousaged.postinst +gcc.control +gcc.list +gconf.control +gconf.list +gconf.postinst +g++.control +gdb.control +gdb.list +gdk-pixbuf-loader-gif.control +gdk-pixbuf-loader-gif.list +gdk-pixbuf-loader-gif.postinst +gdk-pixbuf-loader-jpeg.control +gdk-pixbuf-loader-jpeg.list +gdk-pixbuf-loader-jpeg.postinst +gdk-pixbuf-loader-png.control +gdk-pixbuf-loader-png.list +gdk-pixbuf-loader-png.postinst +gdk-pixbuf-loader-xpm.control +gdk-pixbuf-loader-xpm.list +gdk-pixbuf-loader-xpm.postinst +git.control +git.list +g++.list +gnome-pty-helper.control +gnome-pty-helper.list +gnome-vfs.control +gnome-vfs.list +gnome-vfs-plugin-file.control +gnome-vfs-plugin-file.list +gnome-vfs.postinst +gnome-vfs.prerm +gnupg.control +gnupg.list +gpe-icons.control +gpe-icons.list +gpe-icons.postinst +gpe-icons.postrm +gpe-scap.control +gpe-scap.list +gpe-sketchbook.control +gpe-sketchbook.list +gpgv.control +gpgv.list +gridpad.control +gridpad.list +gst-plugin-alsa.control +gst-plugin-alsa.list +gst-plugin-audioconvert.control +gst-plugin-audioconvert.list +gst-plugin-autodetect.control +gst-plugin-autodetect.list +gst-plugin-gconfelements.control +gst-plugin-gconfelements.list +gst-plugin-gconfelements.postinst +gst-plugin-gconfelements.prerm +gst-plugin-mad.control +gst-plugin-mad.list +gstreamer.control +gstreamer.list +gstreamer.postinst +gtk+.control +gtk+.list +gtk+.postinst +hal.control +hal-info.control +hal-info.list +hal.list +hal.postinst +hal.postrm +hdparm.control +hdparm.list +hdparm.postinst +hdparm.prerm +hicolor-icon-theme.control +hicolor-icon-theme.list +hicolor-icon-theme.postinst +hicolor-icon-theme.postrm +htop.control +htop.list +i2c-tools.control +i2c-tools.list +id3lib.control +id3lib.list +id3lib.postinst +iliwi.control +iliwi.list +illume-keyboard-default-alpha.control +illume-keyboard-default-alpha.list +illume-keyboard-default-terminal.control +illume-keyboard-default-terminal.list +illume-keyboard-numeric-alt.control +illume-keyboard-numeric-alt.list +imagemagick.control +imagemagick.list +imagemagick.postinst +initscripts-shr.control +initscripts-shr.list +intone.control +intone.list +iptables.control +iptables.list +iptables.postinst +kernel-2.6.29-rc3.control +kernel-2.6.29-rc3.list +kernel.control +kernel-image-2.6.29-rc3.control +kernel-image-2.6.29-rc3.list +kernel-image-2.6.29-rc3.postinst +kernel.list +kernel-module-ar6000.control +kernel-module-ar6000.list +kernel-module-ar6000.postinst +kernel-module-ar6000.postrm +kernel-module-arc4.control +kernel-module-arc4.list +kernel-module-arc4.postinst +kernel-module-arc4.postrm +kernel-module-asix.control +kernel-module-asix.list +kernel-module-asix.postinst +kernel-module-asix.postrm +kernel-module-bluetooth.control +kernel-module-bluetooth.list +kernel-module-bluetooth.postinst +kernel-module-bluetooth.postrm +kernel-module-bnep.control +kernel-module-bnep.list +kernel-module-bnep.postinst +kernel-module-bnep.postrm +kernel-module-btusb.control +kernel-module-btusb.list +kernel-module-btusb.postinst +kernel-module-btusb.postrm +kernel-module-crc-ccitt.control +kernel-module-crc-ccitt.list +kernel-module-crc-ccitt.postinst +kernel-module-crc-ccitt.postrm +kernel-module-ecb.control +kernel-module-ecb.list +kernel-module-ecb.postinst +kernel-module-ecb.postrm +kernel-module-exportfs.control +kernel-module-exportfs.list +kernel-module-exportfs.postinst +kernel-module-exportfs.postrm +kernel-module-gadgetfs.control +kernel-module-gadgetfs.list +kernel-module-gadgetfs.postinst +kernel-module-gadgetfs.postrm +kernel-module-g-ether.control +kernel-module-g-ether.list +kernel-module-g-ether.postinst +kernel-module-g-ether.postrm +kernel-module-g-file-storage.control +kernel-module-g-file-storage.list +kernel-module-g-file-storage.postinst +kernel-module-g-file-storage.postrm +kernel-module-g-serial.control +kernel-module-g-serial.list +kernel-module-g-serial.postinst +kernel-module-g-serial.postrm +kernel-module-hidp.control +kernel-module-hidp.list +kernel-module-hidp.postinst +kernel-module-hidp.postrm +kernel-module-iptable-filter.control +kernel-module-iptable-filter.list +kernel-module-iptable-filter.postinst +kernel-module-iptable-filter.postrm +kernel-module-iptable-nat.control +kernel-module-iptable-nat.list +kernel-module-iptable-nat.postinst +kernel-module-iptable-nat.postrm +kernel-module-ip-tables.control +kernel-module-ip-tables.list +kernel-module-ip-tables.postinst +kernel-module-ip-tables.postrm +kernel-module-ipt-masquerade.control +kernel-module-ipt-masquerade.list +kernel-module-ipt-masquerade.postinst +kernel-module-ipt-masquerade.postrm +kernel-module-l2cap.control +kernel-module-l2cap.list +kernel-module-l2cap.postinst +kernel-module-l2cap.postrm +kernel-module-lockd.control +kernel-module-lockd.list +kernel-module-lockd.postinst +kernel-module-lockd.postrm +kernel-module-michael-mic.control +kernel-module-michael-mic.list +kernel-module-michael-mic.postinst +kernel-module-michael-mic.postrm +kernel-module-nf-conntrack.control +kernel-module-nf-conntrack-ipv4.control +kernel-module-nf-conntrack-ipv4.list +kernel-module-nf-conntrack-ipv4.postinst +kernel-module-nf-conntrack-ipv4.postrm +kernel-module-nf-conntrack.list +kernel-module-nf-conntrack.postinst +kernel-module-nf-conntrack.postrm +kernel-module-nf-defrag-ipv4.control +kernel-module-nf-defrag-ipv4.list +kernel-module-nf-defrag-ipv4.postinst +kernel-module-nf-defrag-ipv4.postrm +kernel-module-nf-nat.control +kernel-module-nf-nat.list +kernel-module-nf-nat.postinst +kernel-module-nf-nat.postrm +kernel-module-nfs-acl.control +kernel-module-nfs-acl.list +kernel-module-nfs-acl.postinst +kernel-module-nfs-acl.postrm +kernel-module-nfsd.control +kernel-module-nfsd.list +kernel-module-nfsd.postinst +kernel-module-nfsd.postrm +kernel-module-nls-utf8.control +kernel-module-nls-utf8.list +kernel-module-nls-utf8.postinst +kernel-module-nls-utf8.postrm +kernel-module-ohci-hcd.control +kernel-module-ohci-hcd.list +kernel-module-ohci-hcd.postinst +kernel-module-ohci-hcd.postrm +kernel-module-pegasus.control +kernel-module-pegasus.list +kernel-module-pegasus.postinst +kernel-module-pegasus.postrm +kernel-module-ppp-async.control +kernel-module-ppp-async.list +kernel-module-ppp-async.postinst +kernel-module-ppp-async.postrm +kernel-module-ppp-deflate.control +kernel-module-ppp-deflate.list +kernel-module-ppp-deflate.postinst +kernel-module-ppp-deflate.postrm +kernel-module-ppp-generic.control +kernel-module-ppp-generic.list +kernel-module-ppp-generic.postinst +kernel-module-ppp-generic.postrm +kernel-module-ppp-mppe.control +kernel-module-ppp-mppe.list +kernel-module-ppp-mppe.postinst +kernel-module-ppp-mppe.postrm +kernel-module-rfcomm.control +kernel-module-rfcomm.list +kernel-module-rfcomm.postinst +kernel-module-rfcomm.postrm +kernel-module-s3cmci.control +kernel-module-s3cmci.list +kernel-module-s3cmci.postinst +kernel-module-s3cmci.postrm +kernel-module-sco.control +kernel-module-sco.list +kernel-module-sco.postinst +kernel-module-sco.postrm +kernel-module-scsi-mod.control +kernel-module-scsi-mod.list +kernel-module-scsi-mod.postinst +kernel-module-scsi-mod.postrm +kernel-module-sd-mod.control +kernel-module-sd-mod.list +kernel-module-sd-mod.postinst +kernel-module-sd-mod.postrm +kernel-module-slhc.control +kernel-module-slhc.list +kernel-module-slhc.postinst +kernel-module-slhc.postrm +kernel-module-snd.control +kernel-module-snd.list +kernel-module-snd-page-alloc.control +kernel-module-snd-page-alloc.list +kernel-module-snd-page-alloc.postinst +kernel-module-snd-page-alloc.postrm +kernel-module-snd-pcm.control +kernel-module-snd-pcm.list +kernel-module-snd-pcm.postinst +kernel-module-snd-pcm.postrm +kernel-module-snd.postinst +kernel-module-snd.postrm +kernel-module-snd-soc-core.control +kernel-module-snd-soc-core.list +kernel-module-snd-soc-core.postinst +kernel-module-snd-soc-core.postrm +kernel-module-snd-soc-neo1973-gta02-wm8753.control +kernel-module-snd-soc-neo1973-gta02-wm8753.list +kernel-module-snd-soc-neo1973-gta02-wm8753.postinst +kernel-module-snd-soc-neo1973-gta02-wm8753.postrm +kernel-module-snd-soc-s3c24xx.control +kernel-module-snd-soc-s3c24xx-i2s.control +kernel-module-snd-soc-s3c24xx-i2s.list +kernel-module-snd-soc-s3c24xx-i2s.postinst +kernel-module-snd-soc-s3c24xx-i2s.postrm +kernel-module-snd-soc-s3c24xx.list +kernel-module-snd-soc-s3c24xx.postinst +kernel-module-snd-soc-s3c24xx.postrm +kernel-module-snd-soc-wm8753.control +kernel-module-snd-soc-wm8753.list +kernel-module-snd-soc-wm8753.postinst +kernel-module-snd-soc-wm8753.postrm +kernel-module-snd-timer.control +kernel-module-snd-timer.list +kernel-module-snd-timer.postinst +kernel-module-snd-timer.postrm +kernel-module-sunrpc.control +kernel-module-sunrpc.list +kernel-module-sunrpc.postinst +kernel-module-sunrpc.postrm +kernel-module-tun.control +kernel-module-tun.list +kernel-module-tun.postinst +kernel-module-tun.postrm +kernel-module-uinput.control +kernel-module-uinput.list +kernel-module-uinput.postinst +kernel-module-uinput.postrm +kernel-module-usbserial.control +kernel-module-usbserial.list +kernel-module-usbserial.postinst +kernel-module-usbserial.postrm +kernel-module-usb-storage.control +kernel-module-usb-storage.list +kernel-module-usb-storage.postinst +kernel-module-usb-storage.postrm +kernel-module-x-tables.control +kernel-module-x-tables.list +kernel-module-x-tables.postinst +kernel-module-x-tables.postrm +kernel.postinst +kernel.postrm +lame.control +lame.list +liba52-0.control +liba52-0.list +liba52-0.postinst +libacl1.control +libacl1.list +libacl1.postinst +libapm1.control +libapm1.list +libapm1.postinst +libasound2.control +libasound2.list +libasound2.postinst +libaspell15.control +libaspell15.list +libaspell15.postinst +libatk-1.0-0.control +libatk-1.0-0.list +libatk-1.0-0.postinst +libattr1.control +libattr1.list +libattr1.postinst +libavahi-client3.control +libavahi-client3.list +libavahi-client3.postinst +libavahi-common3.control +libavahi-common3.list +libavahi-common3.postinst +libavahi-glib1.control +libavahi-glib1.list +libavahi-glib1.postinst +libavcodec52.control +libavcodec52.list +libavcodec52.postinst +libavformat52.control +libavformat52.list +libavformat52.postinst +libavutil50.control +libavutil50.list +libavutil50.postinst +libblkid1.control +libblkid1.list +libblkid1.postinst +libbz2-1.control +libbz2-1.list +libbz2-1.postinst +libc6.control +libc6.list +libc6.postinst +libcairo2.control +libcairo2.list +libcairo2.postinst +libcanberra0.control +libcanberra0.list +libcanberra0.postinst +libcanberra-alsa.control +libcanberra-alsa.list +libcom-err2.control +libcom-err2.list +libcom-err2.postinst +libcroco.control +libcroco.list +libcroco.postinst +libcrypto0.9.8.control +libcrypto0.9.8.list +libcrypto0.9.8.postinst +libcups2.control +libcups2.list +libcups2.postinst +libcurl4.control +libcurl4.list +libcurl4.postinst +libdbus-1-3.control +libdbus-1-3.list +libdbus-1-3.postinst +libdbus-glib-1-2.control +libdbus-glib-1-2.list +libdbus-glib-1-2.postinst +libdmx1.control +libdmx1.list +libdmx1.postinst +libdrm.control +libdrm.list +libdrm.postinst +libdvdcss2.control +libdvdcss2.list +libdvdcss2.postinst +libdvdread3.control +libdvdread3.list +libdvdread3.postinst +libeet1.control +libeet1.list +libeet1.postinst +libelementary-ver-pre-svn-05-0.control +libelementary-ver-pre-svn-05-0.list +libelementary-ver-pre-svn-05-0.postinst +libelementary-ver-pre-svn-05-themes.control +libelementary-ver-pre-svn-05-themes.list +libelf0.control +libelf0.list +libelf0.postinst +libewebkit0.control +libewebkit0.list +libewebkit0.postinst +libexif12.control +libexif12.list +libexif12.postinst +libexosip2.control +libexosip2.list +libexosip2.postinst +libexpat1.control +libexpat1.list +libexpat1.postinst +libfaac0.control +libfaac0.list +libfaac0.postinst +libfakekey0.control +libfakekey0.list +libfakekey0.postinst +libffi5.control +libffi5.list +libffi5.postinst +libflac8.control +libflac8.list +libflac8.postinst +libfontconfig1.control +libfontconfig1.list +libfontconfig1.postinst +libfontenc1.control +libfontenc1.list +libfontenc1.postinst +libframeworkd-glib0.control +libframeworkd-glib0.list +libframeworkd-glib0.postinst +libfreetype6.control +libfreetype6.list +libfreetype6.postinst +libfribidi0.control +libfribidi0.list +libfribidi0.postinst +libfsobasics0.control +libfsobasics0.list +libfsobasics0.postinst +libfsoframework0.control +libfsoframework0.list +libfsoframework0.postinst +libfso-glib0.control +libfso-glib0.list +libfso-glib0.postinst +libfsoresource0.control +libfsoresource0.list +libfsoresource0.postinst +libfsotransport0.control +libfsotransport0.list +libfsotransport0.postinst +libgcc1.control +libgcc1.list +libgcc1.postinst +libgcrypt11.control +libgcrypt11.list +libgcrypt11.postinst +libgee2.control +libgee2.list +libgee2.postinst +libgio-2.0-0.control +libgio-2.0-0.list +libgio-2.0-0.postinst +libgl1.control +libgl1.list +libgl1.postinst +libglade-2.0-0.control +libglade-2.0-0.list +libglade-2.0-0.postinst +libglib-2.0-0.control +libglib-2.0-0.list +libglib-2.0-0.postinst +libglu1.control +libglu1.list +libglu1.postinst +libgmodule-2.0-0.control +libgmodule-2.0-0.list +libgmodule-2.0-0.postinst +libgmp3.control +libgmp3.list +libgmp3.postinst +libgnt0.control +libgnt0.list +libgnt0.postinst +libgnutls26.control +libgnutls26.list +libgnutls26.postinst +libgnutls-extra26.control +libgnutls-extra26.list +libgnutls-extra26.postinst +libgobject-2.0-0.control +libgobject-2.0-0.list +libgobject-2.0-0.postinst +libgoffice-0.8-8.control +libgoffice-0.8-8.list +libgoffice-0.8-8.postinst +libgoffice-0.8-plugin-plot-barcol.control +libgoffice-0.8-plugin-plot-barcol.list +libgoffice-0.8-plugin-plot-distrib.control +libgoffice-0.8-plugin-plot-distrib.list +libgoffice-0.8-plugin-plot-pie.control +libgoffice-0.8-plugin-plot-pie.list +libgoffice-0.8-plugin-plot-radar.control +libgoffice-0.8-plugin-plot-radar.list +libgoffice-0.8-plugin-plot-surface.control +libgoffice-0.8-plugin-plot-surface.list +libgoffice-0.8-plugin-plot-xy.control +libgoffice-0.8-plugin-plot-xy.list +libgoffice-0.8-plugin-reg-linear.control +libgoffice-0.8-plugin-reg-linear.list +libgoffice-0.8-plugin-reg-logfit.control +libgoffice-0.8-plugin-reg-logfit.list +libgoffice-0.8-plugin-smoothing.control +libgoffice-0.8-plugin-smoothing.list +libgpewidget1.control +libgpewidget1.list +libgpewidget1.postinst +libgpg-error0.control +libgpg-error0.list +libgpg-error0.postinst +libgpgme11.control +libgpgme11.list +libgpgme11.postinst +libgsf.control +libgsf.list +libgsf.postinst +libgsf.prerm +libgsm0710-0.control +libgsm0710-0.list +libgsm0710-0.postinst +libgsm0710mux0.control +libgsm0710mux0.list +libgsm0710mux0.postinst +libgsm1.control +libgsm1.list +libgsm1.postinst +libgstaudio-0.10-0.control +libgstaudio-0.10-0.list +libgstaudio-0.10-0.postinst +libgstfarsight-0.10-0.control +libgstfarsight-0.10-0.list +libgstfarsight-0.10-0.postinst +libgstinterfaces-0.10-0.control +libgstinterfaces-0.10-0.list +libgstinterfaces-0.10-0.postinst +libgstnetbuffer-0.10-0.control +libgstnetbuffer-0.10-0.list +libgstnetbuffer-0.10-0.postinst +libgstpbutils-0.10-0.control +libgstpbutils-0.10-0.list +libgstpbutils-0.10-0.postinst +libgstrtp-0.10-0.control +libgstrtp-0.10-0.list +libgstrtp-0.10-0.postinst +libgsttag-0.10-0.control +libgsttag-0.10-0.list +libgsttag-0.10-0.postinst +libgstvideo-0.10-0.control +libgstvideo-0.10-0.list +libgstvideo-0.10-0.postinst +libgthread-2.0-0.control +libgthread-2.0-0.list +libgthread-2.0-0.postinst +libgypsy0.control +libgypsy0.list +libgypsy0.postinst +libical.control +libical.list +libical.postinst +libice6.control +libice6.list +libice6.postinst +libicudata36.control +libicudata36.list +libicudata36.postinst +libicui18n36.control +libicui18n36.list +libicui18n36.postinst +libicuuc36.control +libicuuc36.list +libicuuc36.postinst +libid3tag0.control +libid3tag0.list +libid3tag0.postinst +libidl-2-0.control +libidl-2-0.list +libidl-2-0.postinst +libidn.control +libidn.list +libidn.postinst +libimlib2-1.control +libimlib2-1.list +libimlib2-1.postinst +libjasper1.control +libjasper1.list +libjasper1.postinst +libjpeg62.control +libjpeg62.list +libjpeg62.postinst +liblinebreak1.control +liblinebreak1.list +liblinebreak1.postinst +liblinphone3.control +liblinphone3.list +liblinphone3.postinst +liblockfile.control +liblockfile.list +liblockfile.postinst +libltdl7.control +libltdl7.list +libltdl7.postinst +liblzo1.control +liblzo1.list +liblzo1.postinst +libmad0.control +libmad0.list +libmad0.postinst +libmediastreamer0.control +libmediastreamer0.list +libmediastreamer0.postinst +libmp3lame0.control +libmp3lame0.list +libmp3lame0.postinst +libmpfr1.control +libmpfr1.list +libmpfr1.postinst +libnice.control +libnice.list +libnice.postinst +libnl2.control +libnl2.list +libnl2.postinst +libnl-genl2.control +libnl-genl2.list +libnl-genl2.postinst +libnl-nf2.control +libnl-nf2.list +libnl-nf2.postinst +libnl-route2.control +libnl-route2.list +libnl-route2.postinst +libode0.control +libode0.list +libode0.postinst +libogg0.control +libogg0.list +libogg0.postinst +liboil.control +liboil.list +liboil.postinst +libopkg0.control +libopkg0.list +libopkg0.postinst +libortp8.control +libortp8.list +libortp8.postinst +libosip2-3.control +libosip2-3.list +libosip2-3.postinst +libpam-base-files.control +libpam-base-files.list +libpam.control +libpam.list +libpam-meta.control +libpam-meta.list +libpam.postinst +libpcap.control +libpcap.list +libpcap.postinst +libpciaccess0.control +libpciaccess0.list +libpciaccess0.postinst +libperl5.control +libperl5.list +libperl5.postinst +libphone-ui0.conffiles +libphone-ui0.control +libphone-ui0.list +libphone-ui0.postinst +libphone-ui-shr.control +libphone-ui-shr.list +libphone-utils0.conffiles +libphone-utils0.control +libphone-utils0.list +libphone-utils0.postinst +libpixman-1-0.control +libpixman-1-0.list +libpixman-1-0.postinst +libpng12-0.control +libpng12-0.list +libpng12-0.postinst +libpng.control +libpng.list +libpoppler5.control +libpoppler5.list +libpoppler5.postinst +libpoppler-glib4.control +libpoppler-glib4.list +libpoppler-glib4.postinst +libpopt0.control +libpopt0.list +libpopt0.postinst +libportaudio2.control +libportaudio2.list +libportaudio2.postinst +libpostproc51.control +libpostproc51.list +libpostproc51.postinst +libpthread-stubs0.control +libpthread-stubs0.list +libpthread-stubs0.postinst +libpurple.control +libpurple.list +libpurple-plugin-ssl.control +libpurple-plugin-ssl-gnutls.control +libpurple-plugin-ssl-gnutls.list +libpurple-plugin-ssl.list +libpurple.postinst +libpurple.prerm +libpurple-protocol-icq.control +libpurple-protocol-icq.list +libpurple-protocol-irc.control +libpurple-protocol-irc.list +libpurple-protocol-msn.control +libpurple-protocol-msn.list +libpurple-protocol-xmpp.control +libpurple-protocol-xmpp.list +libpyglib-2.0-python0.control +libpyglib-2.0-python0.list +libpyglib-2.0-python0.postinst +libpython2.6-1.0.control +libpython2.6-1.0.list +libpython2.6-1.0.postinst +libreadline5.control +libreadline5.list +libreadline5.postinst +librsvg-2-2.control +librsvg-2-2.list +librsvg-2-2.postinst +librsvg-2-gtk.control +librsvg-2-gtk.list +librsvg-2-gtk.postinst +libschroedinger-1.0-0.control +libschroedinger-1.0-0.list +libschroedinger-1.0-0.postinst +libsdl-1.2-0.control +libsdl-1.2-0.list +libsdl-1.2-0.postinst +libsdl-image-1.2-0.control +libsdl-image-1.2-0.list +libsdl-image-1.2-0.postinst +libsdl-mixer-1.2-0.control +libsdl-mixer-1.2-0.list +libsdl-mixer-1.2-0.postinst +libsdl-ttf-2.0-0.control +libsdl-ttf-2.0-0.list +libsdl-ttf-2.0-0.postinst +libsm6.control +libsm6.list +libsm6.postinst +libsoup-2.2-8.control +libsoup-2.2-8.list +libsoup-2.2-8.postinst +libsoup-2.4-1.control +libsoup-2.4-1.list +libsoup-2.4-1.postinst +libspeex1.control +libspeex1.list +libspeex1.postinst +libspeexdsp1.control +libspeexdsp1.list +libspeexdsp1.postinst +libsqlite0.control +libsqlite0.list +libsqlite0.postinst +libsqlite3-0.control +libsqlite3-0.list +libsqlite3-0.postinst +libss2.control +libss2.list +libss2.postinst +libssl0.9.8.control +libssl0.9.8.list +libssl0.9.8.postinst +libstartup-notification-1-0.control +libstartup-notification-1-0.list +libstartup-notification-1-0.postinst +libstdc++6.control +libstdc++6.list +libstdc++6.postinst +libswscale0.control +libswscale0.list +libswscale0.postinst +libsysfs2.control +libsysfs2.list +libsysfs2.postinst +libtheora0.control +libtheora0.list +libtheora0.postinst +libthread-db1.control +libthread-db1.list +libthread-db1.postinst +libtiff5.control +libtiff5.list +libtiff5.postinst +libts-1.0-0.control +libts-1.0-0.list +libts-1.0-0.postinst +libungif4.control +libungif4.list +libungif4.postinst +libusb-0.1-4.control +libusb-0.1-4.list +libusb-0.1-4.postinst +libuuid1.control +libuuid1.list +libuuid1.postinst +libvorbis0.control +libvorbis0.list +libvorbis0.postinst +libvte9.control +libvte9.list +libvte9.postinst +libwebkit-1.0-2.control +libwebkit-1.0-2.list +libwebkit-1.0-2.postinst +libwrap0.control +libwrap0.list +libwrap0.postinst +libx11-6.control +libx11-6.list +libx11-6.postinst +libx11-locale.control +libx11-locale.list +libxau6.control +libxau6.list +libxau6.postinst +libxaw7-7.control +libxaw7-7.list +libxaw7-7.postinst +libxcalibrate0.control +libxcalibrate0.list +libxcalibrate0.postinst +libxcomposite1.control +libxcomposite1.list +libxcomposite1.postinst +libxcursor1.control +libxcursor1.list +libxcursor1.postinst +libxdamage1.control +libxdamage1.list +libxdamage1.postinst +libxdmcp6.control +libxdmcp6.list +libxdmcp6.postinst +libxext6.control +libxext6.list +libxext6.postinst +libxfixes3.control +libxfixes3.list +libxfixes3.postinst +libxfont1.control +libxfont1.list +libxfont1.postinst +libxfontcache1.control +libxfontcache1.list +libxfontcache1.postinst +libxft2.control +libxft2.list +libxft2.postinst +libxi6.control +libxi6.list +libxi6.postinst +libxinerama1.control +libxinerama1.list +libxinerama1.postinst +libxkbfile1.control +libxkbfile1.list +libxkbfile1.postinst +libxml2.control +libxml2.list +libxml2.postinst +libxmu6.control +libxmu6.list +libxmu6.postinst +libxmuu1.control +libxmuu1.list +libxmuu1.postinst +libxp6.control +libxp6.list +libxp6.postinst +libxpm4.control +libxpm4.list +libxpm4.postinst +libxrandr2.control +libxrandr2.list +libxrandr2.postinst +libxrender1.control +libxrender1.list +libxrender1.postinst +libxslt.control +libxslt.list +libxslt.postinst +libxss1.control +libxss1.list +libxss1.postinst +libxt6.control +libxt6.list +libxt6.postinst +libxtst6.control +libxtst6.list +libxtst6.postinst +libxv1.control +libxv1.list +libxv1.postinst +libxxf86dga1.control +libxxf86dga1.list +libxxf86dga1.postinst +libxxf86misc1.control +libxxf86misc1.list +libxxf86misc1.postinst +libxxf86vm1.control +libxxf86vm1.list +libxxf86vm1.postinst +libyaml-0-2.control +libyaml-0-2.list +libyaml-0-2.postinst +libz1.control +libz1.list +libz1.postinst +linphone.control +linphone.list +locale-base-en-us.control +locale-base-en-us.list +logrotate.conffiles +logrotate.control +logrotate.list +logrotate.postinst +logrotate.postrm +lsof.control +lsof.list +ltrace.control +ltrace.list +make.control +make.list +matchbox-keyboard-im.control +matchbox-keyboard-im.list +matchbox-keyboard-im.postinst +matchbox-keyboard-im.postrm +mbuffer.control +mbuffer.list +mdbus2.control +mdbus2.list +mesa-dri.control +mesa-dri.list +mesa-dri.postinst +mime-support.control +mime-support.list +mioctl.control +mioctl.list +mkdump.control +mkdump.list +mobile-broadband-provider-info.control +mobile-broadband-provider-info.list +module-init-tools.control +module-init-tools-depmod.control +module-init-tools-depmod.list +module-init-tools-depmod.postinst +module-init-tools-depmod.prerm +module-init-tools.list +module-init-tools.postinst +module-init-tools.prerm +modutils-initscripts.control +modutils-initscripts.list +modutils-initscripts.postinst +modutils-initscripts.postrm +modutils-initscripts.prerm +mokomaze.control +mokomaze.list +mplayer-common.control +mplayer-common.list +mplayer.conffiles +mplayer.control +mplayer.list +mtd-utils.control +mtd-utils.list +mterm2.control +mterm2.list +nano.control +nano.list +navit.conffiles +navit.control +navit-icons.control +navit-icons.list +navit.list +ncurses.control +ncurses.list +ncurses.postinst +netbase.conffiles +netbase.control +netbase.list +netbase.postinst +netbase.postrm +netbase.prerm +nfs-utils-client.control +nfs-utils-client.list +nmon.control +nmon.list +numptyphysics.control +numptyphysics.list +openssh.control +openssh-keygen.control +openssh-keygen.list +openssh.list +openssh-scp.control +openssh-scp.list +openssh-scp.postinst +openssh-scp.postrm +openssh-sftp-server.control +openssh-sftp-server.list +openssh-ssh.conffiles +openssh-ssh.control +openssh-sshd.conffiles +openssh-sshd.control +openssh-sshd.list +openssh-sshd.postinst +openssh-sshd.postrm +openssh-ssh.list +openssh-ssh.postinst +openssh-ssh.postrm +openssl.control +openssl.list +openvpn.control +openvpn.list +opimd-utils-cli.control +opimd-utils-cli.list +opimd-utils-data.control +opimd-utils-data.list +opimd-utils-notes.control +opimd-utils-notes.list +opkg-collateral.conffiles +opkg-collateral.control +opkg-collateral.list +opkg.control +opkg.list +opkg.postinst +opkg.postrm +orbit2.control +orbit2.list +orbit2.postinst +pam-plugin-access.control +pam-plugin-access.list +pam-plugin-debug.control +pam-plugin-debug.list +pam-plugin-deny.control +pam-plugin-deny.list +pam-plugin-echo.control +pam-plugin-echo.list +pam-plugin-env.control +pam-plugin-env.list +pam-plugin-exec.control +pam-plugin-exec.list +pam-plugin-faildelay.control +pam-plugin-faildelay.list +pam-plugin-filter.control +pam-plugin-filter.list +pam-plugin-ftp.control +pam-plugin-ftp.list +pam-plugin-group.control +pam-plugin-group.list +pam-plugin-issue.control +pam-plugin-issue.list +pam-plugin-keyinit.control +pam-plugin-keyinit.list +pam-plugin-lastlog.control +pam-plugin-lastlog.list +pam-plugin-limits.control +pam-plugin-limits.list +pam-plugin-listfile.control +pam-plugin-listfile.list +pam-plugin-localuser.control +pam-plugin-localuser.list +pam-plugin-loginuid.control +pam-plugin-loginuid.list +pam-plugin-mail.control +pam-plugin-mail.list +pam-plugin-mkhomedir.control +pam-plugin-mkhomedir.list +pam-plugin-motd.control +pam-plugin-motd.list +pam-plugin-namespace.control +pam-plugin-namespace.list +pam-plugin-nologin.control +pam-plugin-nologin.list +pam-plugin-permit.control +pam-plugin-permit.list +pam-plugin-pwhistory.control +pam-plugin-pwhistory.list +pam-plugin-rhosts.control +pam-plugin-rhosts.list +pam-plugin-rootok.control +pam-plugin-rootok.list +pam-plugin-securetty.control +pam-plugin-securetty.list +pam-plugin-shells.control +pam-plugin-shells.list +pam-plugin-stress.control +pam-plugin-stress.list +pam-plugin-succeed-if.control +pam-plugin-succeed-if.list +pam-plugin-tally2.control +pam-plugin-tally2.list +pam-plugin-tally.control +pam-plugin-tally.list +pam-plugin-time.control +pam-plugin-time.list +pam-plugin-timestamp.control +pam-plugin-timestamp.list +pam-plugin-umask.control +pam-plugin-umask.list +pam-plugin-unix.control +pam-plugin-unix.list +pam-plugin-warn.control +pam-plugin-warn.list +pam-plugin-wheel.control +pam-plugin-wheel.list +pam-plugin-xauth.control +pam-plugin-xauth.list +pango.control +pango.list +pango-module-basic-fc.control +pango-module-basic-fc.list +pango-module-basic-fc.postinst +pango-module-basic-x.control +pango-module-basic-x.list +pango-module-basic-x.postinst +pango.postinst +perl.control +perl.list +perl-module-carp.control +perl-module-carp.list +perl-module-exporter.control +perl-module-exporter.list +perl-module-file-basename.control +perl-module-file-basename.list +perl-module-file-path.control +perl-module-file-path.list +perl-module-strict.control +perl-module-strict.list +perl-module-warnings.control +perl-module-warnings.list +phonefsod.conffiles +phonefsod.control +phonefsod.list +phonefsod.postinst +phonefsod.postrm +phonefsod.prerm +phoneui-apps-contacts.control +phoneui-apps-contacts.list +phoneui-apps-dialer.control +phoneui-apps-dialer.list +phoneui-apps-messages.control +phoneui-apps-messages.list +phoneui-apps-quick-settings.control +phoneui-apps-quick-settings.list +phoneuid.conffiles +phoneuid.control +phoneuid.list +pidgin.control +pidgin-data.control +pidgin-data.list +pidgin.list +pingus.control +pingus.list +pointercal.control +pointercal.list +policykit.control +policykit.list +policykit.postinst +policykit.postrm +poppler-data.control +poppler-data.list +portmap.control +portmap.list +portmap.postinst +portmap.postrm +portmap.prerm +powertop.control +powertop.list +ppp.conffiles +ppp.control +ppp-dialin.control +ppp-dialin.list +ppp-dialin.postinst +ppp-dialin.postrm +ppp.list +ppp.postinst +procps.conffiles +procps.control +procps.list +procps.postinst +procps.postrm +procps.prerm +pth.control +pth.list +pth.postinst +pxaregs.control +pxaregs.list +pyefl-sudoku.control +pyefl-sudoku.list +pyphonelog.control +pyphonelog.list +python-codecs.control +python-codecs.list +python-core.control +python-core.list +python-crypt.control +python-crypt.list +python-ctypes.control +python-ctypes.list +python-datetime.control +python-datetime.list +python-dateutil.control +python-dateutil.list +python-dbus.control +python-dbus.list +python-difflib.control +python-difflib.list +python-ecore.control +python-ecore.list +python-edbus.control +python-edbus.list +python-edje.control +python-edje.list +python-elementary.control +python-elementary.list +python-evas.control +python-evas.list +python-fcntl.control +python-fcntl.list +python-gst.control +python-gst.list +python-io.control +python-io.list +python-lang.control +python-lang.list +python-logging.control +python-logging.list +python-math.control +python-math.list +python-multiprocessing.control +python-multiprocessing.list +python-pexpect.control +python-pexpect.list +python-phoneutils.control +python-phoneutils.list +python-pickle.control +python-pickle.list +python-pprint.control +python-pprint.list +python-pyalsaaudio.control +python-pyalsaaudio.list +python-pycairo.control +python-pycairo.list +python-pygobject.control +python-pygobject.list +python-pygtk.control +python-pygtk.list +python-pyrtc.control +python-pyrtc.list +python-pyserial.control +python-pyserial.list +python-pyyaml.control +python-pyyaml.list +python-readline.control +python-readline.list +python-re.control +python-re.list +python-resource.control +python-resource.list +python-shell.control +python-shell.list +python-sqlite3.control +python-sqlite3.list +python-stringold.control +python-stringold.list +python-subprocess.control +python-subprocess.list +python-syslog.control +python-syslog.list +python-terminal.control +python-terminal.list +python-textutils.control +python-textutils.list +python-threading.control +python-threading.list +python-vobject.control +python-vobject.list +python-xml.control +python-xml.list +python-zlib.control +python-zlib.list +rgb.control +rgb.list +rsync.control +rsync.list +s3c24xx-gpio.control +s3c24xx-gpio.list +s3c64xx-gpio.control +s3c64xx-gpio.list +screen.control +screen.list +sed.control +sed.list +sed.postinst +sed.prerm +serial-forward.control +serial-forward.list +shared-mime-info.control +shared-mime-info.list +shr-settings-addons-illume.control +shr-settings-addons-illume.list +shr-settings-backup-configuration.conffiles +shr-settings-backup-configuration.control +shr-settings-backup-configuration.list +shr-settings.control +shr-settings.list +shr-splash.control +shr-splash.list +shr-splash.postinst +shr-splash.postrm +shr-splash.prerm +shr-splash-theme-simple.control +shr-splash-theme-simple.list +shr-splash-theme-simple.postinst +shr-splash-theme-simple.postrm +shr-theme.control +shr-theme-gry.control +shr-theme-gry.list +shr-theme-gtk-e17lookalike.control +shr-theme-gtk-e17lookalike.list +shr-theme-gtk-e17lookalike.postinst +shr-theme-gtk-e17lookalike.postrm +shr-theme.list +shr-wizard.control +shr-wizard.list +socat.control +socat.list +strace.control +strace.list +synergy.control +synergy.list +sysfsutils.control +sysfsutils.list +sysstat.control +sysstat.list +sysvinit.control +sysvinit-inittab.conffiles +sysvinit-inittab.control +sysvinit-inittab.list +sysvinit.list +sysvinit-pidof.control +sysvinit-pidof.list +sysvinit-pidof.postinst +sysvinit-pidof.prerm +sysvinit.postinst +sysvinit.postrm +sysvinit.prerm +sysvinit-utils.control +sysvinit-utils.list +sysvinit-utils.postinst +sysvinit-utils.prerm +tangogps.control +tangogps.list +task-base-apm.control +task-base-apm.list +task-base-bluetooth.control +task-base-bluetooth.list +task-base.control +task-base-ext2.control +task-base-ext2.list +task-base-kernel26.control +task-base-kernel26.list +task-base.list +task-base-ppp.control +task-base-ppp.list +task-base-usbgadget.control +task-base-usbgadget.list +task-base-usbhost.control +task-base-usbhost.list +task-base-vfat.control +task-base-vfat.list +task-base-wifi.control +task-base-wifi.list +task-boot.control +task-boot.list +task-cli-tools.control +task-cli-tools-debug.control +task-cli-tools-debug.list +task-cli-tools.list +task-distro-base.control +task-distro-base.list +task-fonts-truetype-core.control +task-fonts-truetype-core.list +task-fso2-compliance.control +task-fso2-compliance.list +task-machine-base.control +task-machine-base.list +task-shr-apps.control +task-shr-apps.list +task-shr-cli.control +task-shr-cli.list +task-shr-games.control +task-shr-games.list +task-shr-gtk.control +task-shr-gtk.list +task-shr-minimal-apps.control +task-shr-minimal-apps.list +task-shr-minimal-audio.control +task-shr-minimal-audio.list +task-shr-minimal-base.control +task-shr-minimal-base.list +task-shr-minimal-cli.control +task-shr-minimal-cli.list +task-shr-minimal-fso.control +task-shr-minimal-fso.list +task-shr-minimal-gtk.control +task-shr-minimal-gtk.list +task-shr-minimal-x.control +task-shr-minimal-x.list +task-x11-illume.control +task-x11-illume.list +task-x11-server.control +task-x11-server.list +task-x11-utils.control +task-x11-utils.list +tcpdump.control +tcpdump.list +tinylogin.control +tinylogin.list +tinylogin.postinst +tinylogin.prerm +tslib-calibrate.control +tslib-calibrate.list +tslib-conf.control +tslib-conf.list +ttf-dejavu-common.control +ttf-dejavu-common.list +ttf-dejavu-common.postinst +ttf-dejavu-common.postrm +ttf-dejavu-sans.control +ttf-dejavu-sans.list +ttf-dejavu-sans-mono.control +ttf-dejavu-sans-mono.list +ttf-dejavu-sans-mono.postinst +ttf-dejavu-sans-mono.postrm +ttf-dejavu-sans.postinst +ttf-dejavu-sans.postrm +ttf-liberation-mono.control +ttf-liberation-mono.list +ttf-liberation-mono.postinst +ttf-liberation-mono.postrm +tzdata-africa.control +tzdata-africa.list +tzdata-americas.control +tzdata-americas.list +tzdata-asia.control +tzdata-asia.list +tzdata-australia.control +tzdata-australia.list +tzdata.conffiles +tzdata.control +tzdata-europe.control +tzdata-europe.list +tzdata.list +udev.control +udev.list +udev.postinst +udev.postrm +udev.prerm +udev-utils.control +udev-utils.list +update-modules.control +update-modules.list +update-modules.postinst +update-rc.d.control +update-rc.d.list +usb-gadget-mode.control +usb-gadget-mode.list +usb-gadget-mode.postinst +usb-gadget-mode.postrm +usbutils.control +usbutils.list +util-linux-ng-blkid.control +util-linux-ng-blkid.list +util-linux-ng-blkid.postinst +util-linux-ng-blkid.prerm +util-linux-ng-cfdisk.control +util-linux-ng-cfdisk.list +util-linux-ng.control +util-linux-ng-fdisk.control +util-linux-ng-fdisk.list +util-linux-ng-fdisk.postinst +util-linux-ng-fdisk.prerm +util-linux-ng-fsck.control +util-linux-ng-fsck.list +util-linux-ng-fsck.postinst +util-linux-ng-fsck.prerm +util-linux-ng.list +util-linux-ng-losetup.control +util-linux-ng-losetup.list +util-linux-ng-losetup.postinst +util-linux-ng-losetup.prerm +util-linux-ng-mountall.control +util-linux-ng-mountall.list +util-linux-ng-mountall.postinst +util-linux-ng-mountall.prerm +util-linux-ng-mount.control +util-linux-ng-mount.list +util-linux-ng-mount.postinst +util-linux-ng-mount.prerm +util-linux-ng.postinst +util-linux-ng.prerm +util-linux-ng-readprofile.control +util-linux-ng-readprofile.list +util-linux-ng-readprofile.postinst +util-linux-ng-readprofile.prerm +util-linux-ng-sfdisk.control +util-linux-ng-sfdisk.list +util-linux-ng-swaponoff.control +util-linux-ng-swaponoff.list +util-linux-ng-swaponoff.postinst +util-linux-ng-swaponoff.prerm +util-linux-ng-umount.control +util-linux-ng-umount.list +util-linux-ng-umount.postinst +util-linux-ng-umount.prerm +vagalume.control +vagalume.list +vala-terminal.control +vala-terminal.list +ventura.control +ventura.list +vnc.control +vnc.list +vpnc.conffiles +vpnc.control +vpnc.list +vte-termcap.control +vte-termcap.list +wireless-tools.control +wireless-tools.list +wmiconfig.control +wmiconfig.list +wpa-supplicant.control +wpa-supplicant.list +wpa-supplicant-passphrase.control +wpa-supplicant-passphrase.list +wv.control +wv.list +wv.postinst +x11vnc.control +x11vnc.list +xauth.control +xauth.list +xcursor-transparent-theme.control +xcursor-transparent-theme.list +xdpyinfo.control +xdpyinfo.list +xf86-input-evdev.control +xf86-input-evdev.list +xf86-input-keyboard.control +xf86-input-keyboard.list +xf86-input-mouse.control +xf86-input-mouse.list +xf86-input-tslib.control +xf86-input-tslib.list +xf86-video-glamo.control +xf86-video-glamo.list +xhost.control +xhost.list +xinit.control +xinit.list +xinput-calibrator.control +xinput-calibrator.list +xinput.control +xinput.list +xkbcomp.control +xkbcomp.list +xkeyboard-config.control +xkeyboard-config.list +xmodmap.control +xmodmap.list +xorg-minimal-fonts.control +xorg-minimal-fonts.list +xrandr.control +xrandr.list +xserver-kdrive-common.control +xserver-kdrive-common.list +xserver-nodm-init.control +xserver-nodm-init.list +xserver-nodm-init.postinst +xserver-nodm-init.postrm +xserver-nodm-init.prerm +xserver-xorg-conf.conffiles +xserver-xorg-conf.control +xserver-xorg-conf.list +xserver-xorg.control +xserver-xorg-extension-dri2.control +xserver-xorg-extension-dri2.list +xserver-xorg-extension-dri.control +xserver-xorg-extension-dri.list +xserver-xorg-extension-glx.control +xserver-xorg-extension-glx.list +xserver-xorg.list +xset.control +xset.list +xtscal.control +xtscal.list" + +mount /mnt/ceph-fuse +: cd /mnt/ceph-fuse + +mkdir test-1774 +cd test-1774 +for f in $list; do + touch $f +done + +cd +umount /mnt/ceph-fuse +mount /mnt/ceph-fuse +cd - + +# this worked before the 1774 fix +diff <(ls) <(echo "$list") + +# but this failed, because we cached the dirlist wrong +# update-modules.postinst used to be the missing file, +# the last one in the first dirent set passed to ceph-fuse +diff <(ls) <(echo "$list") + +cd .. +rm -rf test-1774 + +cd +umount /mnt/ceph-fuse diff --git a/qa/clusters/2-node-mgr.yaml b/qa/clusters/2-node-mgr.yaml new file mode 100644 index 00000000..b1c29a86 --- /dev/null +++ b/qa/clusters/2-node-mgr.yaml @@ -0,0 +1,10 @@ +roles: +- [mgr.x, mon.a, mon.c, mds.a, mds.c, osd.0, client.0] +- [mgr.y, mgr.z, mon.b, mds.b, osd.1, osd.2, osd.3, client.1] +log-rotate: + ceph-mds: 10G + ceph-osd: 10G +openstack: + - volumes: # attached to each instance + count: 2 + size: 30 # GB diff --git a/qa/clusters/extra-client.yaml b/qa/clusters/extra-client.yaml new file mode 100644 index 00000000..33fa505b --- /dev/null +++ b/qa/clusters/extra-client.yaml @@ -0,0 +1,14 @@ +roles: +- [mon.a, mon.c, osd.0, osd.1, osd.2] +- [mon.b, mgr.x, mds.a, osd.3, osd.4, osd.5] +- [client.0] +- [client.1] +openstack: +- volumes: # attached to each instance + count: 3 + size: 10 # GB +overrides: + ceph: + conf: + osd: + osd shutdown pgref assert: true \ No newline at end of file diff --git a/qa/clusters/fixed-1.yaml b/qa/clusters/fixed-1.yaml new file mode 100644 index 00000000..d8e5898b --- /dev/null +++ b/qa/clusters/fixed-1.yaml @@ -0,0 +1,14 @@ +overrides: + ceph-deploy: + conf: + global: + osd pool default size: 2 + osd crush chooseleaf type: 0 + osd pool default pg num: 128 + osd pool default pgp num: 128 + ceph: + conf: + osd: + osd shutdown pgref assert: true +roles: +- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0] diff --git a/qa/clusters/fixed-2.yaml b/qa/clusters/fixed-2.yaml new file mode 100644 index 00000000..5d5fcca9 --- /dev/null +++ b/qa/clusters/fixed-2.yaml @@ -0,0 +1,12 @@ +roles: +- [mon.a, mon.c, mgr.y, osd.0, osd.1, osd.2, osd.3, client.0] +- [mon.b, mgr.x, osd.4, osd.5, osd.6, osd.7, client.1] +openstack: +- volumes: # attached to each instance + count: 4 + size: 10 # GB +overrides: + ceph: + conf: + osd: + osd shutdown pgref assert: true diff --git a/qa/clusters/fixed-3-cephfs.yaml b/qa/clusters/fixed-3-cephfs.yaml new file mode 100644 index 00000000..9e021b3b --- /dev/null +++ b/qa/clusters/fixed-3-cephfs.yaml @@ -0,0 +1,16 @@ +roles: +- [mon.a, mds.a, mgr.x, osd.0, osd.1] +- [mon.b, mds.b, mon.c, mgr.y, osd.2, osd.3] +- [client.0] +openstack: +- volumes: # attached to each instance + count: 2 + size: 10 # GB +log-rotate: + ceph-mds: 10G + ceph-osd: 10G +overrides: + ceph: + conf: + osd: + osd shutdown pgref assert: true diff --git a/qa/clusters/fixed-3.yaml b/qa/clusters/fixed-3.yaml new file mode 100644 index 00000000..ddc79a84 --- /dev/null +++ b/qa/clusters/fixed-3.yaml @@ -0,0 +1,13 @@ +roles: +- [mon.a, mon.c, mgr.x, osd.0, osd.1, osd.2, osd.3] +- [mon.b, mgr.y, osd.4, osd.5, osd.6, osd.7] +- [client.0] +openstack: +- volumes: # attached to each instance + count: 4 + size: 10 # GB +overrides: + ceph: + conf: + osd: + osd shutdown pgref assert: true diff --git a/qa/clusters/fixed-4.yaml b/qa/clusters/fixed-4.yaml new file mode 100644 index 00000000..df767f35 --- /dev/null +++ b/qa/clusters/fixed-4.yaml @@ -0,0 +1,10 @@ +roles: +- [mon.a, mgr.y, osd.0, osd.4, osd.8, osd.12] +- [mon.b, osd.1, osd.5, osd.9, osd.13] +- [mon.c, osd.2, osd.6, osd.10, osd.14] +- [mgr.x, osd.3, osd.7, osd.11, osd.15, client.0] +overrides: + ceph: + conf: + osd: + osd shutdown pgref assert: true \ No newline at end of file diff --git a/qa/config/rados.yaml b/qa/config/rados.yaml new file mode 100644 index 00000000..e468e126 --- /dev/null +++ b/qa/config/rados.yaml @@ -0,0 +1,10 @@ +overrides: + ceph: + conf: + osd: + osd op queue: debug_random + osd op queue cut off: debug_random + osd debug verify missing on start: true + osd debug verify cached snaps: true + mon: + mon scrub interval: 300 diff --git a/qa/crontab/teuthology-cronjobs b/qa/crontab/teuthology-cronjobs new file mode 100644 index 00000000..db7e410c --- /dev/null +++ b/qa/crontab/teuthology-cronjobs @@ -0,0 +1,182 @@ +PATH=/home/teuthology/src/teuthology_master/virtualenv/bin:/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin +MAILTO="ceph-infra@redhat.com;yweinste@redhat.com" +CEPH_QA_EMAIL="ceph-qa@lists.ceph.com" + +### !!!!!!!!!!!!!!!!!!!!!!!!!! +## THIS CRONTAB MUST NOT BE EDITED MANUALLY !!!! +## AUTOMATED CRONTAB UPDATING +## https://code.google.com/archive/p/chkcrontab/wikis/CheckCrontab.wiki +## https://github.com/ceph/ceph-cm-ansible/pull/391 +## crontab is in https://github.com/ceph/ceph/master/qa/crontab/teuthology-cronjobs +# chkcrontab: disable-msg=INVALID_USER +# chkcrontab: disable-msg=USER_NOT_FOUND +@daily /bin/bash /home/teuthology/bin/update-crontab.sh +### !!!!!!!!!!!!!!!!!!!!!!!!!! + + +# Ensure teuthology is up-to-date +@daily cd /home/teuthology/src/teuthology_master && /home/teuthology/bin/cron_wrapper git pull +@daily cd /home/teuthology/src/git.ceph.com_ceph_master && /home/teuthology/bin/cron_wrapper git pull +# Ensure ceph-sepia-secrets is up-to-date +*/5 * * * * cd /home/teuthology/ceph-sepia-secrets && /home/teuthology/bin/cron_wrapper git pull + + +#Publish this crontab to the Tracker page http://tracker.ceph.com/projects/ceph-releases/wiki/Crontab +@daily crontab=$(crontab -l | perl -p -e 's//>/g; s/&/&/g') ; header=$(echo h3. Crontab ; echo) ; curl --verbose -X PUT --header 'Content-type: application/xml' --data-binary ''"$header"'<pre>'"$crontab"'</pre>' http://tracker.ceph.com/projects/ceph-releases/wiki/sepia.xml?key=$(cat /etc/redmine-key) + +## This is an example only, don't remove ! +## to see result open http://tracker.ceph.com/projects/ceph-qa-suite/wiki/ceph-ansible +@daily SUITE_NAME=~/src/ceph-qa-suite_master/suites/ceph-ansible; crontab=$(teuthology-describe-tests --show-facet no $SUITE_NAME | perl -p -e 's//>/g; s/&/&/g') ; header=$(echo h4. $SUITE_NAME ; echo " "; echo " ") ; curl --verbose -X PUT --header 'Content-type: application/xml' --data-binary ''"$header"'<pre>'"$crontab"'</pre>' http://tracker.ceph.com/projects/ceph-qa-suite/wiki/ceph-ansible.xml?key=$(cat /etc/redmine-key) + + +## ********** smoke tests on master branch +0 5 * * * CEPH_BRANCH=master; MACHINE_NAME=ovh; /home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH -n 7 -m $MACHINE_NAME -s smoke -k testing -p 71 -e $CEPH_QA_EMAIL --distro ubuntu --distro-version 16.04 +0 7 * * * CEPH_BRANCH=master; MACHINE_NAME=ovh; /home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH -n 7 -m $MACHINE_NAME -s smoke -k testing -p 70 -e $CEPH_QA_EMAIL --distro rhel --distro-version 7.5 ~/rhel_only_on_ovh.yaml +# run one time per week on rhel7.4 +@weekly CEPH_BRANCH=master; MACHINE_NAME=ovh; /home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH -n 7 -m $MACHINE_NAME -s smoke -k testing -p 70 -e $CEPH_QA_EMAIL --distro rhel --distro-version 7.4 ~/rhel_only_on_ovh.yaml +2 7 * * * CEPH_BRANCH=master; MACHINE_NAME=ovh; /home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH -n 7 -m $MACHINE_NAME -s smoke -k testing -p 70 -e $CEPH_QA_EMAIL --distro centos --distro-version 7.4 +2 5 * * * CEPH_BRANCH=master; MACHINE_NAME=ovh; /home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH -n 7 -m $MACHINE_NAME -s smoke -k testing -p 70 -e $CEPH_QA_EMAIL + + +## master branch runs +## suites rados, rbd and multimds use --subset arg and must be call with schedule_subset.sh +## see script in https://github.com/ceph/ceph/tree/master/qa/machine_types +01 02 * * 1 CEPH_BRANCH=master; MACHINE_NAME=smithi; SUITE_NAME=rbd; KERNEL=distro; /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_subset.sh 0 $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL +01 02 * * 2 CEPH_BRANCH=master; MACHINE_NAME=smithi; SUITE_NAME=rbd; KERNEL=distro; /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_subset.sh 1 $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL +01 02 * * 3 CEPH_BRANCH=master; MACHINE_NAME=smithi; SUITE_NAME=rbd; KERNEL=distro; /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_subset.sh 2 $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL +01 02 * * 4 CEPH_BRANCH=master; MACHINE_NAME=smithi; SUITE_NAME=rbd; KERNEL=distro; /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_subset.sh 3 $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL +01 02 * * 5 CEPH_BRANCH=master; MACHINE_NAME=smithi; SUITE_NAME=rbd; KERNEL=distro; /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_subset.sh 4 $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL +01 02 * * 6 CEPH_BRANCH=master; MACHINE_NAME=smithi; SUITE_NAME=rbd; KERNEL=distro; /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_subset.sh 5 $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL +01 02 * * 7 CEPH_BRANCH=master; MACHINE_NAME=smithi; SUITE_NAME=rbd; KERNEL=distro; /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_subset.sh 6 $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL + +15 03 * * 1 CEPH_BRANCH=master; MACHINE_NAME=smithi; SUITE_NAME=fs; KERNEL=distro; /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_subset.sh 0 $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL +15 03 * * 2 CEPH_BRANCH=master; MACHINE_NAME=smithi; SUITE_NAME=fs; KERNEL=distro; /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_subset.sh 1 $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL +15 03 * * 3 CEPH_BRANCH=master; MACHINE_NAME=smithi; SUITE_NAME=fs; KERNEL=distro; /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_subset.sh 2 $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL +15 03 * * 4 CEPH_BRANCH=master; MACHINE_NAME=smithi; SUITE_NAME=fs; KERNEL=distro; /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_subset.sh 3 $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL +15 03 * * 5 CEPH_BRANCH=master; MACHINE_NAME=smithi; SUITE_NAME=fs; KERNEL=distro; /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_subset.sh 4 $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL +15 03 * * 6 CEPH_BRANCH=master; MACHINE_NAME=smithi; SUITE_NAME=fs; KERNEL=distro; /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_subset.sh 5 $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL +15 03 * * 7 CEPH_BRANCH=master; MACHINE_NAME=smithi; SUITE_NAME=fs; KERNEL=distro; /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_subset.sh 6 $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL + +15 04 * * 1 CEPH_BRANCH=master; MACHINE_NAME=smithi; SUITE_NAME=multimds; KERNEL=testing; /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_subset.sh 0 $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL +15 04 * * 2 CEPH_BRANCH=master; MACHINE_NAME=smithi; SUITE_NAME=multimds; KERNEL=testing; /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_subset.sh 1 $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL +15 04 * * 3 CEPH_BRANCH=master; MACHINE_NAME=smithi; SUITE_NAME=multimds; KERNEL=testing; /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_subset.sh 2 $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL +15 04 * * 4 CEPH_BRANCH=master; MACHINE_NAME=smithi; SUITE_NAME=multimds; KERNEL=testing; /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_subset.sh 3 $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL +15 04 * * 5 CEPH_BRANCH=master; MACHINE_NAME=smithi; SUITE_NAME=multimds; KERNEL=testing; /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_subset.sh 4 $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL +15 04 * * 6 CEPH_BRANCH=master; MACHINE_NAME=smithi; SUITE_NAME=multimds; KERNEL=testing; /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_subset.sh 5 $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL +15 04 * * 7 CEPH_BRANCH=master; MACHINE_NAME=smithi; SUITE_NAME=multimds; KERNEL=testing; /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_subset.sh 6 $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL + +05 03 * * 1,6 CEPH_BRANCH=master; MACHINE_NAME=smithi; /home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH -n 7 -m $MACHINE_NAME -s rgw -k distro -e $CEPH_QA_EMAIL +20 03 * * 1,6 CEPH_BRANCH=master; MACHINE_NAME=smithi; /home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH -n 7 -m $MACHINE_NAME -s krbd -k testing -e $CEPH_QA_EMAIL +25 03 * * 1,6 CEPH_BRANCH=master; MACHINE_NAME=smithi; /home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH -n 7 -m $MACHINE_NAME -s kcephfs -k testing -e $CEPH_QA_EMAIL +#45 03 * * 1,6 CEPH_BRANCH=master; MACHINE_NAME=mira; /home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH -n 7 -m $MACHINE_NAME -s hadoop -e $CEPH_QA_EMAIL +#50 03 * * 1,6 CEPH_BRANCH=master; MACHINE_NAME=smithi;/home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH -n 7 -m $MACHINE_NAME -s samba -e $CEPH_QA_EMAIL +59 03 * * 1,6 CEPH_BRANCH=master; MACHINE_NAME=mira; /home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH -n 7 -m $MACHINE_NAME -s ceph-deploy -k distro -e $CEPH_QA_EMAIL +05 04 * * 1,6 CEPH_BRANCH=master; MACHINE_NAME=ovh; /home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH -n 7 -m $MACHINE_NAME -s ceph-ansible -k distro -e $CEPH_QA_EMAIL +### The suite below must run on bare-metal because it's performance suite and run 3 times to produce more data points +57 03 * * * CEPH_BRANCH=master; MACHINE_NAME=smithi; /home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH -n 7 -m $MACHINE_NAME -s perf-basic -k distro -e $CEPH_QA_EMAIL -N 3 +09 03 * * 6 CEPH_BRANCH=master; MACHINE_NAME=smithi; /home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH -n 7 -m $MACHINE_NAME -s powercycle -k distro -e $CEPH_QA_EMAIL + +#********** luminous branch + +## run rados, rbd and fs only 2 times a week + +30 01 * * 6 CEPH_BRANCH=luminous; MACHINE_NAME=smithi; SUITE_NAME=rados; KERNEL=distro; /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_subset.sh 5 $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL +30 01 * * 7 CEPH_BRANCH=luminous; MACHINE_NAME=smithi; SUITE_NAME=rados; KERNEL=distro; /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_subset.sh 6 $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL + +00 04 * * 6 CEPH_BRANCH=luminous; MACHINE_NAME=smithi; SUITE_NAME=rbd; KERNEL=distro; /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_subset.sh 5 $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL +00 04 * * 7 CEPH_BRANCH=luminous; MACHINE_NAME=smithi; SUITE_NAME=rbd; KERNEL=distro; /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_subset.sh 6 $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL + +10 04 * * 6 CEPH_BRANCH=luminous; MACHINE_NAME=smithi; SUITE_NAME=fs; KERNEL=distro; /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_subset.sh 5 $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL +10 04 * * 7 CEPH_BRANCH=luminous; MACHINE_NAME=smithi; SUITE_NAME=fs; KERNEL=distro; /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_subset.sh 6 $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL + + +05 05 * * 6 CEPH_BRANCH=luminous; MACHINE_NAME=smithi; /home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH -m $MACHINE_NAME -s rgw -k distro -e $CEPH_QA_EMAIL +15 05 * * 6 CEPH_BRANCH=luminous; MACHINE_NAME=smithi; /home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH -m $MACHINE_NAME -s krbd -k testing -e $CEPH_QA_EMAIL +20 05 * * 6 CEPH_BRANCH=luminous; MACHINE_NAME=smithi; /home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH -m $MACHINE_NAME -s kcephfs -k testing -e $CEPH_QA_EMAIL +#30 05 * * 6 CEPH_BRANCH=luminous; MACHINE_NAME=smithi;/home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH -m $MACHINE_NAME -s hadoop -e $CEPH_QA_EMAIL +#35 05 * * 6 CEPH_BRANCH=luminous; MACHINE_NAME=smithi;/home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH -m $MACHINE_NAME -s samba -e $CEPH_QA_EMAIL +55 05 * * 6 CEPH_BRANCH=luminous; MACHINE_NAME=mira; /home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH -m $MACHINE_NAME -s ceph-deploy -k distro -e $CEPH_QA_EMAIL +10 05 * * 6 CEPH_BRANCH=luminous; MACHINE_NAME=ovh; /home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH -m $MACHINE_NAME -s ceph-disk -k distro -e $CEPH_QA_EMAIL +15 05 * * 6 CEPH_BRANCH=luminous; MACHINE_NAME=ovh; /home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH -m $MACHINE_NAME -s ceph-ansible -k distro -e $CEPH_QA_EMAIL + + +## upgrades suites for on luminous +## !!!! three suites below MUST use --suite-branch hammer, kraken OR jewel +## --filter "ubuntu_14.04,ubuntu_16.04,centos_7.4" == test only supported distro +45 05 * * 7 CEPH_BRANCH=luminous; MACHINE_NAME=smithi; /home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH -m $MACHINE_NAME -s upgrade/client-upgrade-hammer -k distro -e $CEPH_QA_EMAIL --suite-branch hammer +47 05 * * 7 CEPH_BRANCH=luminous; MACHINE_NAME=smithi; /home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH -m $MACHINE_NAME -s upgrade/client-upgrade-jewel -k distro -e $CEPH_QA_EMAIL --suite-branch jewel --filter "ubuntu_14.04,ubuntu_16.04,centos_7.4" +50 05 * * 7 CEPH_BRANCH=luminous; MACHINE_NAME=smithi; /home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH -m $MACHINE_NAME -s upgrade/client-upgrade-kraken -k distro -e $CEPH_QA_EMAIL --suite-branch kraken +## point-to-point upgrades suites on luminous +30 05 * * 7 CEPH_BRANCH=luminous; MACHINE_NAME=smithi;/home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH -m $MACHINE_NAME -s upgrade/luminous-p2p -k distro -e $CEPH_QA_EMAIL + + +########################## + +#********** mimic branch START +30 02 * * 1 CEPH_BRANCH=mimic; MACHINE_NAME=smithi; SUITE_NAME=rados; KERNEL=distro; /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_subset.sh 0 $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL +30 02 * * 2 CEPH_BRANCH=mimic; MACHINE_NAME=smithi; SUITE_NAME=rados; KERNEL=distro; /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_subset.sh 1 $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL +30 02 * * 3 CEPH_BRANCH=mimic; MACHINE_NAME=smithi; SUITE_NAME=rados; KERNEL=distro; /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_subset.sh 2 $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL +30 02 * * 4 CEPH_BRANCH=mimic; MACHINE_NAME=smithi; SUITE_NAME=rados; KERNEL=distro; /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_subset.sh 3 $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL +30 02 * * 5 CEPH_BRANCH=mimic; MACHINE_NAME=smithi; SUITE_NAME=rados; KERNEL=distro; /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_subset.sh 4 $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL +30 02 * * 6 CEPH_BRANCH=mimic; MACHINE_NAME=smithi; SUITE_NAME=rados; KERNEL=distro; /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_subset.sh 5 $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL +30 02 * * 7 CEPH_BRANCH=mimic; MACHINE_NAME=smithi; SUITE_NAME=rados; KERNEL=distro; /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_subset.sh 6 $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL + +00 05 * * 1 CEPH_BRANCH=mimic; MACHINE_NAME=smithi; SUITE_NAME=rbd; KERNEL=distro; /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_subset.sh 0 $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL +00 05 * * 2 CEPH_BRANCH=mimic; MACHINE_NAME=smithi; SUITE_NAME=rbd; KERNEL=distro; /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_subset.sh 1 $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL +00 05 * * 3 CEPH_BRANCH=mimic; MACHINE_NAME=smithi; SUITE_NAME=rbd; KERNEL=distro; /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_subset.sh 2 $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL +00 05 * * 4 CEPH_BRANCH=mimic; MACHINE_NAME=smithi; SUITE_NAME=rbd; KERNEL=distro; /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_subset.sh 3 $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL +00 05 * * 5 CEPH_BRANCH=mimic; MACHINE_NAME=smithi; SUITE_NAME=rbd; KERNEL=distro; /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_subset.sh 4 $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL +00 05 * * 6 CEPH_BRANCH=mimic; MACHINE_NAME=smithi; SUITE_NAME=rbd; KERNEL=distro; /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_subset.sh 5 $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL +00 05 * * 7 CEPH_BRANCH=mimic; MACHINE_NAME=smithi; SUITE_NAME=rbd; KERNEL=distro; /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_subset.sh 6 $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL + +10 05 * * 1 CEPH_BRANCH=mimic; MACHINE_NAME=smithi; SUITE_NAME=fs; KERNEL=distro; /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_subset.sh 0 $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL +10 05 * * 2 CEPH_BRANCH=mimic; MACHINE_NAME=smithi; SUITE_NAME=fs; KERNEL=distro; /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_subset.sh 1 $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL +10 05 * * 3 CEPH_BRANCH=mimic; MACHINE_NAME=smithi; SUITE_NAME=fs; KERNEL=distro; /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_subset.sh 2 $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL +10 05 * * 4 CEPH_BRANCH=mimic; MACHINE_NAME=smithi; SUITE_NAME=fs; KERNEL=distro; /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_subset.sh 3 $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL +10 05 * * 5 CEPH_BRANCH=mimic; MACHINE_NAME=smithi; SUITE_NAME=fs; KERNEL=distro; /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_subset.sh 4 $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL +10 05 * * 6 CEPH_BRANCH=mimic; MACHINE_NAME=smithi; SUITE_NAME=fs; KERNEL=distro; /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_subset.sh 5 $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL +10 05 * * 7 CEPH_BRANCH=mimic; MACHINE_NAME=smithi; SUITE_NAME=fs; KERNEL=distro; /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_subset.sh 6 $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL + +15 14 * * 1 CEPH_BRANCH=mimic; MACHINE_NAME=smithi; SUITE_NAME=multimds; KERNEL=testing; /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_subset.sh 0 $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL +15 14 * * 2 CEPH_BRANCH=mimic; MACHINE_NAME=smithi; SUITE_NAME=multimds; KERNEL=testing; /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_subset.sh 1 $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL +15 14 * * 3 CEPH_BRANCH=mimic; MACHINE_NAME=smithi; SUITE_NAME=multimds; KERNEL=testing; /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_subset.sh 2 $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL +15 14 * * 4 CEPH_BRANCH=mimic; MACHINE_NAME=smithi; SUITE_NAME=multimds; KERNEL=testing; /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_subset.sh 3 $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL +15 14 * * 5 CEPH_BRANCH=mimic; MACHINE_NAME=smithi; SUITE_NAME=multimds; KERNEL=testing; /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_subset.sh 4 $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL +15 14 * * 6 CEPH_BRANCH=mimic; MACHINE_NAME=smithi; SUITE_NAME=multimds; KERNEL=testing; /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_subset.sh 5 $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL +15 14 * * 7 CEPH_BRANCH=mimic; MACHINE_NAME=smithi; SUITE_NAME=multimds; KERNEL=testing; /home/teuthology/bin/cron_wrapper /home/teuthology/bin/schedule_subset.sh 6 $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL + +05 05 * * 1,3,5 CEPH_BRANCH=mimic; MACHINE_NAME=smithi; /home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH -n 7 -m $MACHINE_NAME -s rgw -k distro -e $CEPH_QA_EMAIL +15 05 * * 1,3,5 CEPH_BRANCH=mimic; MACHINE_NAME=smithi; /home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH -n 7 -m $MACHINE_NAME -s krbd -k testing -e $CEPH_QA_EMAIL +20 05 * * 1,3,5 CEPH_BRANCH=mimic; MACHINE_NAME=smithi; /home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH -n 7 -m $MACHINE_NAME -s kcephfs -k testing -e $CEPH_QA_EMAIL +#30 05 * * 1,3,5 CEPH_BRANCH=mimic; MACHINE_NAME=smithi;/home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH -n 7 -m $MACHINE_NAME -s hadoop -e $CEPH_QA_EMAIL +#35 05 * * 1,3,5 CEPH_BRANCH=mimic; MACHINE_NAME=smithi;/home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH -n 7 -m $MACHINE_NAME -s samba -e $CEPH_QA_EMAIL +55 05 * * 1,3,5 CEPH_BRANCH=mimic; MACHINE_NAME=mira; /home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH -n 7 -m $MACHINE_NAME -s ceph-deploy -k distro -e $CEPH_QA_EMAIL +10 05 * * 1,3,5 CEPH_BRANCH=mimic; MACHINE_NAME=ovh; /home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH -n 7 -m $MACHINE_NAME -s ceph-disk -k distro -e $CEPH_QA_EMAIL +15 05 * * 1,3,5 CEPH_BRANCH=mimic; MACHINE_NAME=ovh; /home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH -n 7 -m $MACHINE_NAME -s ceph-ansible -k distro -e $CEPH_QA_EMAIL +07 05 * * 6 CEPH_BRANCH=mimic; MACHINE_NAME=smithi; /home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH -n 7 -m $MACHINE_NAME -s powercycle -k distro -e $CEPH_QA_EMAIL + +25 02 * * * CEPH_BRANCH=mimic; MACHINE_NAME=smithi; /home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH -k distro -n 7 -m $MACHINE_NAME -s upgrade/luminous-x -e $CEPH_QA_EMAIL --suite-branch $CEPH_BRANCH -p 90 --filter ubuntu_latest,centos +30 05 * * 2,4,5 CEPH_BRANCH=mimic; MACHINE_NAME=smithi;/home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH -m $MACHINE_NAME -s upgrade/mimic-p2p -k distro -e $CEPH_QA_EMAIL + + +## upgrades suites for on mimic +## !!!! three suites below MUST use --suite-branch hammer, krakel, jewel, luminous (see https://tracker.ceph.com/issues/24021) +## --filter "ubuntu_16.04,ubuntu_18.04,centos_7.4,rhel_7.5" - test ONLY supported distro +## to run on ovh use ~/rhel_only_on_ovh.yaml, we will run on smithi nodes +DISTRO_MIMIC="ubuntu_16.04,ubuntu_18.04,centos_7.4,rhel_7.5" +47 01 * * * CEPH_BRANCH=mimic; MACHINE_NAME=smithi; /home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH -n 7 -m $MACHINE_NAME -s upgrade/client-upgrade-jewel -k distro -e $CEPH_QA_EMAIL --suite-branch jewel --filter $DISTRO_MIMIC +50 01 * * * CEPH_BRANCH=mimic; MACHINE_NAME=smithi; /home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH -n 7 -m $MACHINE_NAME -s upgrade/client-upgrade-luminous -k distro -e $CEPH_QA_EMAIL --suite-branch luminous --filter $DISTRO_MIMIC +#********** mimic branch END + +#********** nautilus branch START + +#change to `nautilus` from master when it's ready +30 02 * * 2,4,5,7 CEPH_BRANCH=master; MACHINE_NAME=smithi;/home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH -m $MACHINE_NAME -s upgrade/mimic-x -k distro -e $CEPH_QA_EMAIL -n 7 +#********** nautilus branch END + +### upgrade runs on old releases +###### on smithi + +23 04 * * * CEPH_BRANCH=luminous; MACHINE_NAME=smithi; /home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH -k distro -n 7 -m $MACHINE_NAME -s upgrade/jewel-x -e $CEPH_QA_EMAIL +25 02 * * * CEPH_BRANCH=master; MACHINE_NAME=smithi; /home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH -k distro -n 7 -m $MACHINE_NAME -s upgrade/luminous-x -e $CEPH_QA_EMAIL --suite-branch luminous -p 90 --filter ubuntu_latest,centos + diff --git a/qa/debug/buildpackages.yaml b/qa/debug/buildpackages.yaml new file mode 100644 index 00000000..527ed662 --- /dev/null +++ b/qa/debug/buildpackages.yaml @@ -0,0 +1,6 @@ +tasks: + - buildpackages: + machine: + disk: 40 # GB + ram: 15000 # MB + cpus: 16 diff --git a/qa/debug/mds_client.yaml b/qa/debug/mds_client.yaml new file mode 100644 index 00000000..c6fec3fc --- /dev/null +++ b/qa/debug/mds_client.yaml @@ -0,0 +1,9 @@ +overrides: + ceph: + conf: + mds: + debug ms: 1 + debug mds: 20 + client: + debug ms: 1 + debug client: 20 \ No newline at end of file diff --git a/qa/debug/mgr.yaml b/qa/debug/mgr.yaml new file mode 100644 index 00000000..1f8e9cbc --- /dev/null +++ b/qa/debug/mgr.yaml @@ -0,0 +1,17 @@ +overrides: + ceph: + conf: + mon: + debug mon: 20 + mgr: + debug mgr: 20 + debug ms: 1 + debug client: 20 + client: + debug client: 20 + debug mgrc: 20 + debug ms: 1 + osd: + debug mgrc: 20 + mds: + debug mgrc: 20 diff --git a/qa/debug/openstack-15G.yaml b/qa/debug/openstack-15G.yaml new file mode 100644 index 00000000..857ad22a --- /dev/null +++ b/qa/debug/openstack-15G.yaml @@ -0,0 +1,3 @@ +openstack: + - machine: + ram: 15000 # MB diff --git a/qa/debug/openstack-30G.yaml b/qa/debug/openstack-30G.yaml new file mode 100644 index 00000000..da7ed803 --- /dev/null +++ b/qa/debug/openstack-30G.yaml @@ -0,0 +1,3 @@ +openstack: + - machine: + ram: 30000 # MB diff --git a/qa/distros/a-supported-distro.yaml b/qa/distros/a-supported-distro.yaml new file mode 120000 index 00000000..33a40b6e --- /dev/null +++ b/qa/distros/a-supported-distro.yaml @@ -0,0 +1 @@ +all/centos_7.2.yaml \ No newline at end of file diff --git a/qa/distros/all/centos.yaml b/qa/distros/all/centos.yaml new file mode 100644 index 00000000..8f4854b9 --- /dev/null +++ b/qa/distros/all/centos.yaml @@ -0,0 +1 @@ +os_type: centos diff --git a/qa/distros/all/centos_6.3.yaml b/qa/distros/all/centos_6.3.yaml new file mode 100644 index 00000000..32187d6d --- /dev/null +++ b/qa/distros/all/centos_6.3.yaml @@ -0,0 +1,2 @@ +os_type: centos +os_version: "6.3" diff --git a/qa/distros/all/centos_6.4.yaml b/qa/distros/all/centos_6.4.yaml new file mode 100644 index 00000000..02383cd5 --- /dev/null +++ b/qa/distros/all/centos_6.4.yaml @@ -0,0 +1,2 @@ +os_type: centos +os_version: "6.4" diff --git a/qa/distros/all/centos_6.5.yaml b/qa/distros/all/centos_6.5.yaml new file mode 100644 index 00000000..77c9e41f --- /dev/null +++ b/qa/distros/all/centos_6.5.yaml @@ -0,0 +1,2 @@ +os_type: centos +os_version: "6.5" diff --git a/qa/distros/all/centos_7.0.yaml b/qa/distros/all/centos_7.0.yaml new file mode 100644 index 00000000..bccb2860 --- /dev/null +++ b/qa/distros/all/centos_7.0.yaml @@ -0,0 +1,2 @@ +os_type: centos +os_version: "7.0" diff --git a/qa/distros/all/centos_7.1.yaml b/qa/distros/all/centos_7.1.yaml new file mode 100644 index 00000000..74c68f96 --- /dev/null +++ b/qa/distros/all/centos_7.1.yaml @@ -0,0 +1,2 @@ +os_type: centos +os_version: "7.1" diff --git a/qa/distros/all/centos_7.2.yaml b/qa/distros/all/centos_7.2.yaml new file mode 100644 index 00000000..44d2f0ec --- /dev/null +++ b/qa/distros/all/centos_7.2.yaml @@ -0,0 +1,2 @@ +os_type: centos +os_version: "7.2" diff --git a/qa/distros/all/centos_7.3.yaml b/qa/distros/all/centos_7.3.yaml new file mode 100644 index 00000000..9dfcc7f6 --- /dev/null +++ b/qa/distros/all/centos_7.3.yaml @@ -0,0 +1,2 @@ +os_type: centos +os_version: "7.3" diff --git a/qa/distros/all/centos_7.4.yaml b/qa/distros/all/centos_7.4.yaml new file mode 100644 index 00000000..d06bc384 --- /dev/null +++ b/qa/distros/all/centos_7.4.yaml @@ -0,0 +1,2 @@ +os_type: centos +os_version: "7.4" diff --git a/qa/distros/all/centos_7.5.yaml b/qa/distros/all/centos_7.5.yaml new file mode 100644 index 00000000..54324576 --- /dev/null +++ b/qa/distros/all/centos_7.5.yaml @@ -0,0 +1,2 @@ +os_type: centos +os_version: "7.5" diff --git a/qa/distros/all/centos_7.6.yaml b/qa/distros/all/centos_7.6.yaml new file mode 100644 index 00000000..4cb095e6 --- /dev/null +++ b/qa/distros/all/centos_7.6.yaml @@ -0,0 +1,2 @@ +os_type: centos +os_version: "7.6" diff --git a/qa/distros/all/centos_7.8.yaml b/qa/distros/all/centos_7.8.yaml new file mode 100644 index 00000000..57619653 --- /dev/null +++ b/qa/distros/all/centos_7.8.yaml @@ -0,0 +1,2 @@ +os_type: centos +os_version: "7.8" diff --git a/qa/distros/all/debian_6.0.yaml b/qa/distros/all/debian_6.0.yaml new file mode 100644 index 00000000..6820fa3c --- /dev/null +++ b/qa/distros/all/debian_6.0.yaml @@ -0,0 +1,2 @@ +os_type: debian +os_version: "6.0" diff --git a/qa/distros/all/debian_7.0.yaml b/qa/distros/all/debian_7.0.yaml new file mode 100644 index 00000000..8100dc41 --- /dev/null +++ b/qa/distros/all/debian_7.0.yaml @@ -0,0 +1,2 @@ +os_type: debian +os_version: "7.0" diff --git a/qa/distros/all/debian_8.0.yaml b/qa/distros/all/debian_8.0.yaml new file mode 100644 index 00000000..300a4430 --- /dev/null +++ b/qa/distros/all/debian_8.0.yaml @@ -0,0 +1,2 @@ +os_type: debian +os_version: "8.0" diff --git a/qa/distros/all/fedora_17.yaml b/qa/distros/all/fedora_17.yaml new file mode 100644 index 00000000..801053af --- /dev/null +++ b/qa/distros/all/fedora_17.yaml @@ -0,0 +1,2 @@ +os_type: fedora +os_version: "17" diff --git a/qa/distros/all/fedora_18.yaml b/qa/distros/all/fedora_18.yaml new file mode 100644 index 00000000..07872aa7 --- /dev/null +++ b/qa/distros/all/fedora_18.yaml @@ -0,0 +1,2 @@ +os_type: fedora +os_version: "18" diff --git a/qa/distros/all/fedora_19.yaml b/qa/distros/all/fedora_19.yaml new file mode 100644 index 00000000..5bac8ace --- /dev/null +++ b/qa/distros/all/fedora_19.yaml @@ -0,0 +1,2 @@ +os_type: fedora +os_version: "19" diff --git a/qa/distros/all/opensuse_12.2.yaml b/qa/distros/all/opensuse_12.2.yaml new file mode 100644 index 00000000..ee9f877a --- /dev/null +++ b/qa/distros/all/opensuse_12.2.yaml @@ -0,0 +1,2 @@ +os_type: opensuse +os_version: "12.2" diff --git a/qa/distros/all/opensuse_13.2.yaml b/qa/distros/all/opensuse_13.2.yaml new file mode 100644 index 00000000..7551e81f --- /dev/null +++ b/qa/distros/all/opensuse_13.2.yaml @@ -0,0 +1,2 @@ +os_type: opensuse +os_version: "13.2" diff --git a/qa/distros/all/opensuse_15.0.yaml b/qa/distros/all/opensuse_15.0.yaml new file mode 100644 index 00000000..42898695 --- /dev/null +++ b/qa/distros/all/opensuse_15.0.yaml @@ -0,0 +1,2 @@ +os_type: opensuse +os_version: "15.0" diff --git a/qa/distros/all/opensuse_42.1.yaml b/qa/distros/all/opensuse_42.1.yaml new file mode 100644 index 00000000..48c789db --- /dev/null +++ b/qa/distros/all/opensuse_42.1.yaml @@ -0,0 +1,2 @@ +os_type: opensuse +os_version: "42.1" diff --git a/qa/distros/all/opensuse_42.2.yaml b/qa/distros/all/opensuse_42.2.yaml new file mode 100644 index 00000000..10e87026 --- /dev/null +++ b/qa/distros/all/opensuse_42.2.yaml @@ -0,0 +1,2 @@ +os_type: opensuse +os_version: "42.2" diff --git a/qa/distros/all/opensuse_42.3.yaml b/qa/distros/all/opensuse_42.3.yaml new file mode 100644 index 00000000..148ab7ac --- /dev/null +++ b/qa/distros/all/opensuse_42.3.yaml @@ -0,0 +1,2 @@ +os_type: opensuse +os_version: "42.3" diff --git a/qa/distros/all/rhel_6.3.yaml b/qa/distros/all/rhel_6.3.yaml new file mode 100644 index 00000000..6a8edcd5 --- /dev/null +++ b/qa/distros/all/rhel_6.3.yaml @@ -0,0 +1,2 @@ +os_type: rhel +os_version: "6.3" diff --git a/qa/distros/all/rhel_6.4.yaml b/qa/distros/all/rhel_6.4.yaml new file mode 100644 index 00000000..52254958 --- /dev/null +++ b/qa/distros/all/rhel_6.4.yaml @@ -0,0 +1,2 @@ +os_type: rhel +os_version: "6.4" diff --git a/qa/distros/all/rhel_6.5.yaml b/qa/distros/all/rhel_6.5.yaml new file mode 100644 index 00000000..7db54bea --- /dev/null +++ b/qa/distros/all/rhel_6.5.yaml @@ -0,0 +1,2 @@ +os_type: rhel +os_version: "6.5" diff --git a/qa/distros/all/rhel_7.0.yaml b/qa/distros/all/rhel_7.0.yaml new file mode 100644 index 00000000..c87c0bc1 --- /dev/null +++ b/qa/distros/all/rhel_7.0.yaml @@ -0,0 +1,2 @@ +os_type: rhel +os_version: "7.0" diff --git a/qa/distros/all/rhel_7.5.yaml b/qa/distros/all/rhel_7.5.yaml new file mode 100644 index 00000000..e5aaf3d3 --- /dev/null +++ b/qa/distros/all/rhel_7.5.yaml @@ -0,0 +1,2 @@ +os_type: rhel +os_version: "7.5" diff --git a/qa/distros/all/rhel_7.6.yaml b/qa/distros/all/rhel_7.6.yaml new file mode 100644 index 00000000..62f52e8e --- /dev/null +++ b/qa/distros/all/rhel_7.6.yaml @@ -0,0 +1,2 @@ +os_type: rhel +os_version: "7.6" diff --git a/qa/distros/all/rhel_7.8.yaml b/qa/distros/all/rhel_7.8.yaml new file mode 100644 index 00000000..5c5fce71 --- /dev/null +++ b/qa/distros/all/rhel_7.8.yaml @@ -0,0 +1,2 @@ +os_type: rhel +os_version: "7.8" diff --git a/qa/distros/all/rhel_7.9.yaml b/qa/distros/all/rhel_7.9.yaml new file mode 100644 index 00000000..ce0853dc --- /dev/null +++ b/qa/distros/all/rhel_7.9.yaml @@ -0,0 +1,2 @@ +os_type: rhel +os_version: "7.9" diff --git a/qa/distros/all/rhel_7.yaml b/qa/distros/all/rhel_7.yaml new file mode 120000 index 00000000..db371779 --- /dev/null +++ b/qa/distros/all/rhel_7.yaml @@ -0,0 +1 @@ +rhel_7.9.yaml \ No newline at end of file diff --git a/qa/distros/all/sle_12.2.yaml b/qa/distros/all/sle_12.2.yaml new file mode 100644 index 00000000..2a4a28c0 --- /dev/null +++ b/qa/distros/all/sle_12.2.yaml @@ -0,0 +1,2 @@ +os_type: sle +os_version: "12.2" diff --git a/qa/distros/all/ubuntu_12.04.yaml b/qa/distros/all/ubuntu_12.04.yaml new file mode 100644 index 00000000..dbc3a8d9 --- /dev/null +++ b/qa/distros/all/ubuntu_12.04.yaml @@ -0,0 +1,2 @@ +os_type: ubuntu +os_version: "12.04" diff --git a/qa/distros/all/ubuntu_12.10.yaml b/qa/distros/all/ubuntu_12.10.yaml new file mode 100644 index 00000000..ab655676 --- /dev/null +++ b/qa/distros/all/ubuntu_12.10.yaml @@ -0,0 +1,2 @@ +os_type: ubuntu +os_version: "12.10" diff --git a/qa/distros/all/ubuntu_14.04.yaml b/qa/distros/all/ubuntu_14.04.yaml new file mode 100644 index 00000000..309e989f --- /dev/null +++ b/qa/distros/all/ubuntu_14.04.yaml @@ -0,0 +1,2 @@ +os_type: ubuntu +os_version: "14.04" diff --git a/qa/distros/all/ubuntu_14.04_aarch64.yaml b/qa/distros/all/ubuntu_14.04_aarch64.yaml new file mode 100644 index 00000000..9dfbcb51 --- /dev/null +++ b/qa/distros/all/ubuntu_14.04_aarch64.yaml @@ -0,0 +1,3 @@ +os_type: ubuntu +os_version: "14.04" +arch: aarch64 diff --git a/qa/distros/all/ubuntu_14.04_i686.yaml b/qa/distros/all/ubuntu_14.04_i686.yaml new file mode 100644 index 00000000..4a0652e7 --- /dev/null +++ b/qa/distros/all/ubuntu_14.04_i686.yaml @@ -0,0 +1,3 @@ +os_type: ubuntu +os_version: "14.04" +arch: i686 diff --git a/qa/distros/all/ubuntu_16.04.yaml b/qa/distros/all/ubuntu_16.04.yaml new file mode 100644 index 00000000..a459fddf --- /dev/null +++ b/qa/distros/all/ubuntu_16.04.yaml @@ -0,0 +1,2 @@ +os_type: ubuntu +os_version: "16.04" diff --git a/qa/distros/all/ubuntu_18.04.yaml b/qa/distros/all/ubuntu_18.04.yaml new file mode 100644 index 00000000..4d446488 --- /dev/null +++ b/qa/distros/all/ubuntu_18.04.yaml @@ -0,0 +1,2 @@ +os_type: ubuntu +os_version: "18.04" diff --git a/qa/distros/supported-all-distro/centos_latest.yaml b/qa/distros/supported-all-distro/centos_latest.yaml new file mode 120000 index 00000000..22299ab8 --- /dev/null +++ b/qa/distros/supported-all-distro/centos_latest.yaml @@ -0,0 +1 @@ +../all/centos_7.8.yaml \ No newline at end of file diff --git a/qa/distros/supported-all-distro/rhel_7.yaml b/qa/distros/supported-all-distro/rhel_7.yaml new file mode 120000 index 00000000..8e67cf2c --- /dev/null +++ b/qa/distros/supported-all-distro/rhel_7.yaml @@ -0,0 +1 @@ +../all/rhel_7.yaml \ No newline at end of file diff --git a/qa/distros/supported-all-distro/ubuntu_16.04.yaml b/qa/distros/supported-all-distro/ubuntu_16.04.yaml new file mode 120000 index 00000000..69ebbd49 --- /dev/null +++ b/qa/distros/supported-all-distro/ubuntu_16.04.yaml @@ -0,0 +1 @@ +../all/ubuntu_16.04.yaml \ No newline at end of file diff --git a/qa/distros/supported-all-distro/ubuntu_latest.yaml b/qa/distros/supported-all-distro/ubuntu_latest.yaml new file mode 120000 index 00000000..64a66d9a --- /dev/null +++ b/qa/distros/supported-all-distro/ubuntu_latest.yaml @@ -0,0 +1 @@ +../all/ubuntu_18.04.yaml \ No newline at end of file diff --git a/qa/distros/supported-random-distro$/centos_latest.yaml b/qa/distros/supported-random-distro$/centos_latest.yaml new file mode 120000 index 00000000..22299ab8 --- /dev/null +++ b/qa/distros/supported-random-distro$/centos_latest.yaml @@ -0,0 +1 @@ +../all/centos_7.8.yaml \ No newline at end of file diff --git a/qa/distros/supported-random-distro$/rhel_7.yaml b/qa/distros/supported-random-distro$/rhel_7.yaml new file mode 120000 index 00000000..8e67cf2c --- /dev/null +++ b/qa/distros/supported-random-distro$/rhel_7.yaml @@ -0,0 +1 @@ +../all/rhel_7.yaml \ No newline at end of file diff --git a/qa/distros/supported-random-distro$/ubuntu_16.04.yaml b/qa/distros/supported-random-distro$/ubuntu_16.04.yaml new file mode 120000 index 00000000..69ebbd49 --- /dev/null +++ b/qa/distros/supported-random-distro$/ubuntu_16.04.yaml @@ -0,0 +1 @@ +../all/ubuntu_16.04.yaml \ No newline at end of file diff --git a/qa/distros/supported-random-distro$/ubuntu_latest.yaml b/qa/distros/supported-random-distro$/ubuntu_latest.yaml new file mode 120000 index 00000000..64a66d9a --- /dev/null +++ b/qa/distros/supported-random-distro$/ubuntu_latest.yaml @@ -0,0 +1 @@ +../all/ubuntu_18.04.yaml \ No newline at end of file diff --git a/qa/distros/supported/centos_latest.yaml b/qa/distros/supported/centos_latest.yaml new file mode 120000 index 00000000..22299ab8 --- /dev/null +++ b/qa/distros/supported/centos_latest.yaml @@ -0,0 +1 @@ +../all/centos_7.8.yaml \ No newline at end of file diff --git a/qa/distros/supported/rhel_latest.yaml b/qa/distros/supported/rhel_latest.yaml new file mode 120000 index 00000000..8e67cf2c --- /dev/null +++ b/qa/distros/supported/rhel_latest.yaml @@ -0,0 +1 @@ +../all/rhel_7.yaml \ No newline at end of file diff --git a/qa/distros/supported/ubuntu_latest.yaml b/qa/distros/supported/ubuntu_latest.yaml new file mode 120000 index 00000000..64a66d9a --- /dev/null +++ b/qa/distros/supported/ubuntu_latest.yaml @@ -0,0 +1 @@ +../all/ubuntu_18.04.yaml \ No newline at end of file diff --git a/qa/erasure-code/ec-feature-plugins-v2.yaml b/qa/erasure-code/ec-feature-plugins-v2.yaml new file mode 100644 index 00000000..f2d374dd --- /dev/null +++ b/qa/erasure-code/ec-feature-plugins-v2.yaml @@ -0,0 +1,98 @@ +# +# Test the expected behavior of the +# +# CEPH_FEATURE_ERASURE_CODE_PLUGINS_V2 +# +# feature. +# +roles: +- - mon.a + - mon.b + - osd.0 + - osd.1 +- - osd.2 + - mon.c + - mgr.x +tasks: +# +# Install firefly +# +- install: + branch: firefly +- ceph: + fs: xfs +# +# We don't need mon.c for now: it will be used later to make sure an old +# mon cannot join the quorum once the feature has been activated +# +- ceph.stop: + daemons: [mon.c] +- exec: + mon.a: + - |- + ceph osd erasure-code-profile set WRONG plugin=WRONG + ceph osd pool create poolWRONG 12 12 erasure WRONG 2>&1 | grep "failed to load plugin using profile WRONG" +# +# Partial upgrade, osd.2 is not upgraded +# +- install.upgrade: + osd.0: +# +# a is the leader +# +- ceph.restart: + daemons: [mon.a] + wait-for-healthy: false +- exec: + mon.a: + - |- + ceph osd erasure-code-profile set profile-lrc plugin=lrc 2>&1 | grep "unsupported by: the monitor cluster" +- ceph.restart: + daemons: [mon.b, osd.1, osd.0] + wait-for-healthy: false + wait-for-osds-up: true +# +# The lrc plugin cannot be used because osd.2 is not upgraded yet +# and would crash. +# +- exec: + mon.a: + - |- + ceph osd erasure-code-profile set profile-lrc plugin=lrc 2>&1 | grep "unsupported by: osd.2" +# +# Taking osd.2 out, the rest of the cluster is upgraded +# +- ceph.stop: + daemons: [osd.2] +- sleep: + duration: 60 +# +# Creating an erasure code profile using the lrc plugin now works +# +- exec: + mon.a: + - "ceph osd erasure-code-profile set profile-lrc plugin=lrc" +# +# osd.2 won't be able to join the because is does not support the feature +# +- ceph.restart: + daemons: [osd.2] + wait-for-healthy: false +- sleep: + duration: 60 +- exec: + osd.2: + - |- + grep "protocol feature.*missing 100000000000" /var/log/ceph/ceph-osd.2.log +# +# mon.c won't be able to join the because it does not support the feature +# +- ceph.restart: + daemons: [mon.c] + wait-for-healthy: false +- sleep: + duration: 60 +- exec: + mon.c: + - |- + grep "missing.*feature" /var/log/ceph/ceph-mon.c.log diff --git a/qa/erasure-code/ec-feature-plugins-v3.yaml b/qa/erasure-code/ec-feature-plugins-v3.yaml new file mode 100644 index 00000000..332b9440 --- /dev/null +++ b/qa/erasure-code/ec-feature-plugins-v3.yaml @@ -0,0 +1,98 @@ +# +# Test the expected behavior of the +# +# CEPH_FEATURE_ERASURE_CODE_PLUGINS_V3 +# +# feature. +# +roles: +- - mon.a + - mon.b + - osd.0 + - osd.1 +- - osd.2 + - mon.c + - mgr.x +tasks: +# +# Install hammer +# +- install: + branch: hammer +- ceph: + fs: xfs +# +# We don't need mon.c for now: it will be used later to make sure an old +# mon cannot join the quorum once the feature has been activated +# +- ceph.stop: + daemons: [mon.c] +- exec: + mon.a: + - |- + ceph osd erasure-code-profile set WRONG plugin=WRONG + ceph osd pool create poolWRONG 12 12 erasure WRONG 2>&1 | grep "failed to load plugin using profile WRONG" +# +# Partial upgrade, osd.2 is not upgraded +# +- install.upgrade: + osd.0: +# +# a is the leader +# +- ceph.restart: + daemons: [mon.a] + wait-for-healthy: false +- exec: + mon.a: + - |- + ceph osd erasure-code-profile set profile-shec k=2 m=1 c=1 plugin=shec 2>&1 | grep "unsupported by: the monitor cluster" +- ceph.restart: + daemons: [mon.b, osd.1, osd.0] + wait-for-healthy: false + wait-for-osds-up: true +# +# The shec plugin cannot be used because osd.2 is not upgraded yet +# and would crash. +# +- exec: + mon.a: + - |- + ceph osd erasure-code-profile set profile-shec k=2 m=1 c=1 plugin=shec 2>&1 | grep "unsupported by: osd.2" +# +# Taking osd.2 out, the rest of the cluster is upgraded +# +- ceph.stop: + daemons: [osd.2] +- sleep: + duration: 60 +# +# Creating an erasure code profile using the shec plugin now works +# +- exec: + mon.a: + - "ceph osd erasure-code-profile set profile-shec k=2 m=1 c=1 plugin=shec" +# +# osd.2 won't be able to join the because is does not support the feature +# +- ceph.restart: + daemons: [osd.2] + wait-for-healthy: false +- sleep: + duration: 60 +- exec: + osd.2: + - |- + grep "protocol feature.*missing" /var/log/ceph/ceph-osd.2.log +# +# mon.c won't be able to join the because it does not support the feature +# +- ceph.restart: + daemons: [mon.c] + wait-for-healthy: false +- sleep: + duration: 60 +- exec: + mon.c: + - |- + grep "missing.*feature" /var/log/ceph/ceph-mon.c.log diff --git a/qa/erasure-code/ec-rados-default.yaml b/qa/erasure-code/ec-rados-default.yaml new file mode 100644 index 00000000..cc62371e --- /dev/null +++ b/qa/erasure-code/ec-rados-default.yaml @@ -0,0 +1,19 @@ +tasks: + - rados: + clients: [client.0] + ops: 4000 + objects: 50 + ec_pool: true + write_append_excl: false + op_weights: + read: 100 + write: 0 + append: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 + setattr: 25 + rmattr: 25 + - print: "**** done rados ec task" diff --git a/qa/erasure-code/ec-rados-parallel.yaml b/qa/erasure-code/ec-rados-parallel.yaml new file mode 100644 index 00000000..0f01d842 --- /dev/null +++ b/qa/erasure-code/ec-rados-parallel.yaml @@ -0,0 +1,20 @@ +workload: + parallel: + - rados: + clients: [client.0] + ops: 4000 + objects: 50 + ec_pool: true + write_append_excl: false + op_weights: + read: 100 + write: 0 + append: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 + setattr: 25 + rmattr: 25 + - print: "**** done rados ec parallel" diff --git a/qa/erasure-code/ec-rados-plugin=clay-k=4-m=2.yaml b/qa/erasure-code/ec-rados-plugin=clay-k=4-m=2.yaml new file mode 100644 index 00000000..2efb8543 --- /dev/null +++ b/qa/erasure-code/ec-rados-plugin=clay-k=4-m=2.yaml @@ -0,0 +1,25 @@ +tasks: +- rados: + clients: [client.0] + ops: 4000 + objects: 50 + ec_pool: true + write_append_excl: false + erasure_code_profile: + name: clay42profile + plugin: clay + k: 4 + m: 2 + technique: reed_sol_van + crush-failure-domain: osd + op_weights: + read: 100 + write: 0 + append: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 + setattr: 25 + rmattr: 25 diff --git a/qa/erasure-code/ec-rados-plugin=isa-k=2-m=1.yaml b/qa/erasure-code/ec-rados-plugin=isa-k=2-m=1.yaml new file mode 100644 index 00000000..64b59705 --- /dev/null +++ b/qa/erasure-code/ec-rados-plugin=isa-k=2-m=1.yaml @@ -0,0 +1,26 @@ +tasks: +- rados: + clients: [client.0] + ops: 4000 + objects: 50 + ec_pool: true + min_size: 2 + write_append_excl: false + erasure_code_profile: + name: isaprofile + plugin: isa + k: 2 + m: 1 + technique: reed_sol_van + crush-failure-domain: osd + op_weights: + read: 100 + write: 0 + append: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 + setattr: 25 + rmattr: 25 diff --git a/qa/erasure-code/ec-rados-plugin=jerasure-k=2-m=1.yaml b/qa/erasure-code/ec-rados-plugin=jerasure-k=2-m=1.yaml new file mode 100644 index 00000000..d61b1c8a --- /dev/null +++ b/qa/erasure-code/ec-rados-plugin=jerasure-k=2-m=1.yaml @@ -0,0 +1,25 @@ +tasks: +- rados: + clients: [client.0] + ops: 4000 + objects: 50 + ec_pool: true + write_append_excl: false + erasure_code_profile: + name: jerasure21profile + plugin: jerasure + k: 2 + m: 1 + technique: reed_sol_van + crush-failure-domain: osd + op_weights: + read: 100 + write: 0 + append: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 + setattr: 25 + rmattr: 25 diff --git a/qa/erasure-code/ec-rados-plugin=jerasure-k=3-m=1.yaml b/qa/erasure-code/ec-rados-plugin=jerasure-k=3-m=1.yaml new file mode 100644 index 00000000..2ca53a79 --- /dev/null +++ b/qa/erasure-code/ec-rados-plugin=jerasure-k=3-m=1.yaml @@ -0,0 +1,31 @@ +# +# k=3 implies a stripe_width of 1376*3 = 4128 which is different from +# the default value of 4096 It is also not a multiple of 1024*1024 and +# creates situations where rounding rules during recovery becomes +# necessary. +# +tasks: +- rados: + clients: [client.0] + ops: 4000 + objects: 50 + ec_pool: true + write_append_excl: false + erasure_code_profile: + name: jerasure31profile + plugin: jerasure + k: 3 + m: 1 + technique: reed_sol_van + crush-failure-domain: osd + op_weights: + read: 100 + write: 0 + append: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 + setattr: 25 + rmattr: 25 diff --git a/qa/erasure-code/ec-rados-plugin=jerasure-k=4-m=2.yaml b/qa/erasure-code/ec-rados-plugin=jerasure-k=4-m=2.yaml new file mode 100644 index 00000000..dfcc6160 --- /dev/null +++ b/qa/erasure-code/ec-rados-plugin=jerasure-k=4-m=2.yaml @@ -0,0 +1,25 @@ +tasks: +- rados: + clients: [client.0] + ops: 4000 + objects: 50 + ec_pool: true + write_append_excl: false + erasure_code_profile: + name: jerasure21profile + plugin: jerasure + k: 4 + m: 2 + technique: reed_sol_van + crush-failure-domain: osd + op_weights: + read: 100 + write: 0 + append: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 + setattr: 25 + rmattr: 25 diff --git a/qa/erasure-code/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml b/qa/erasure-code/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml new file mode 100644 index 00000000..86ae0568 --- /dev/null +++ b/qa/erasure-code/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml @@ -0,0 +1,25 @@ +tasks: +- rados: + clients: [client.0] + ops: 400 + objects: 50 + ec_pool: true + write_append_excl: false + erasure_code_profile: + name: lrcprofile + plugin: lrc + k: 4 + m: 2 + l: 3 + crush-failure-domain: osd + op_weights: + read: 100 + write: 0 + append: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 + setattr: 25 + rmattr: 25 diff --git a/qa/erasure-code/ec-rados-plugin=shec-k=4-m=3-c=2.yaml b/qa/erasure-code/ec-rados-plugin=shec-k=4-m=3-c=2.yaml new file mode 100644 index 00000000..ee74c6e9 --- /dev/null +++ b/qa/erasure-code/ec-rados-plugin=shec-k=4-m=3-c=2.yaml @@ -0,0 +1,25 @@ +tasks: +- rados: + clients: [client.0] + ops: 400 + objects: 50 + ec_pool: true + write_append_excl: false + erasure_code_profile: + name: shecprofile + plugin: shec + k: 4 + m: 3 + c: 2 + crush-failure-domain: osd + op_weights: + read: 100 + write: 0 + append: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 + setattr: 25 + rmattr: 25 diff --git a/qa/erasure-code/ec-rados-sequential.yaml b/qa/erasure-code/ec-rados-sequential.yaml new file mode 100644 index 00000000..90536ee6 --- /dev/null +++ b/qa/erasure-code/ec-rados-sequential.yaml @@ -0,0 +1,20 @@ +workload: + sequential: + - rados: + clients: [client.0] + ops: 4000 + objects: 50 + ec_pool: true + write_append_excl: false + op_weights: + read: 100 + write: 0 + append: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 + setattr: 25 + rmattr: 25 + - print: "**** done rados ec sequential" diff --git a/qa/find-used-ports.sh b/qa/find-used-ports.sh new file mode 100755 index 00000000..c57525cd --- /dev/null +++ b/qa/find-used-ports.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +git --no-pager grep -n '127.0.0.1:[0-9]\+' | sed -n 's/.*127.0.0.1:\([0-9]\+\).*/\1/p' | sort -n | uniq -u diff --git a/qa/libceph/Makefile b/qa/libceph/Makefile new file mode 100644 index 00000000..06e1b990 --- /dev/null +++ b/qa/libceph/Makefile @@ -0,0 +1,11 @@ +CFLAGS = -Wall -Wextra -D_GNU_SOURCE -lcephfs -L../../build/lib + +TARGETS = trivial_libceph + +.c: + $(CC) $(CFLAGS) $@.c -o $@ + +all: $(TARGETS) + +clean: + rm $(TARGETS) diff --git a/qa/libceph/trivial_libceph.c b/qa/libceph/trivial_libceph.c new file mode 100644 index 00000000..9093e97e --- /dev/null +++ b/qa/libceph/trivial_libceph.c @@ -0,0 +1,69 @@ +#define _FILE_OFFSET_BITS 64 +#include +#include +#include +#include +#include +#include +#include +#include "../../src/include/cephfs/libcephfs.h" + +#define MB64 (1<<26) + +int main(int argc, const char **argv) +{ + struct ceph_mount_info *cmount; + int ret, fd, len; + char buf[1024]; + + if (argc < 3) { + fprintf(stderr, "usage: ./%s \n", argv[0]); + exit(1); + } + + ret = ceph_create(&cmount, NULL); + if (ret) { + fprintf(stderr, "ceph_create=%d\n", ret); + exit(1); + } + + ret = ceph_conf_read_file(cmount, argv[1]); + if (ret) { + fprintf(stderr, "ceph_conf_read_file=%d\n", ret); + exit(1); + } + + ret = ceph_conf_parse_argv(cmount, argc, argv); + if (ret) { + fprintf(stderr, "ceph_conf_parse_argv=%d\n", ret); + exit(1); + } + + ret = ceph_mount(cmount, NULL); + if (ret) { + fprintf(stderr, "ceph_mount=%d\n", ret); + exit(1); + } + + ret = ceph_chdir(cmount, "/"); + if (ret) { + fprintf(stderr, "ceph_chdir=%d\n", ret); + exit(1); + } + + fd = ceph_open(cmount, argv[2], O_CREAT|O_TRUNC|O_RDWR, 0777); + if (fd < 0) { + fprintf(stderr, "ceph_open=%d\n", fd); + exit(1); + } + + memset(buf, 'a', sizeof(buf)); + + len = ceph_write(cmount, fd, buf, sizeof(buf), 0); + + fprintf(stdout, "wrote %d bytes\n", len); + + ceph_shutdown(cmount); + + return 0; +} diff --git a/qa/loopall.sh b/qa/loopall.sh new file mode 100755 index 00000000..d69e8c72 --- /dev/null +++ b/qa/loopall.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +set -ex + +basedir=`echo $0 | sed 's/[^/]*$//g'`. +testdir="$1" +[ -n "$2" ] && logdir=$2 || logdir=$1 + +[ ${basedir:0:1} == "." ] && basedir=`pwd`/${basedir:1} + +PATH="$basedir/src:$PATH" + +[ -z "$testdir" ] || [ ! -d "$testdir" ] && echo "specify test dir" && exit 1 +cd $testdir + +while true +do + for test in `cd $basedir/workunits && find . -executable -type f | $basedir/../src/script/permute` + do + echo "------ running test $test ------" + pwd + [ -d $test ] && rm -r $test + mkdir -p $test + mkdir -p `dirname $logdir/$test.log` + test -e $logdir/$test.log && rm $logdir/$test.log + sh -c "cd $test && $basedir/workunits/$test" 2>&1 | tee $logdir/$test.log + done +done diff --git a/qa/machine_types/schedule_rados_ovh.sh b/qa/machine_types/schedule_rados_ovh.sh new file mode 100755 index 00000000..cefa98b6 --- /dev/null +++ b/qa/machine_types/schedule_rados_ovh.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env bash + +# $1 - part +# $2 - branch name +# $3 - machine name +# $4 - email address +# $5 - filter out (this arg is to be at the end of the command line for now) + +## example #1 +## (date +%U) week number +## % 2 - mod 2 (e.g. 0,1,0,1 ...) +## * 7 - multiplied by 7 (e.g. 0,7,0,7...) +## $1 day of the week (0-6) +## /14 for 2 weeks + +## example #2 +## (date +%U) week number +## % 4 - mod 4 (e.g. 0,1,2,3,0,1,2,3 ...) +## * 7 - multiplied by 7 (e.g. 0,7,14,21,0,7,14,21...) +## $1 day of the week (0-6) +## /28 for 4 weeks + +echo "Scheduling " $2 " branch" +if [ $2 = "master" ] ; then + # run master branch with --newest option looking for good sha1 7 builds back + teuthology-suite -v -c $2 -m $3 -k distro -s rados --subset $(echo "(($(date +%U) % 4) * 7) + $1" | bc)/28 --newest 7 -e $4 ~/vps.yaml $5 +elif [ $2 = "hammer" ] ; then + # run hammer branch with less jobs + teuthology-suite -v -c $2 -m $3 -k distro -s rados --subset $(echo "(($(date +%U) % 4) * 7) + $1" | bc)/56 -e $4 ~/vps.yaml $5 +elif [ $2 = "jewel" ] ; then + # run jewel branch with /40 jobs + teuthology-suite -v -c $2 -m $3 -k distro -s rados --subset $(echo "(($(date +%U) % 4) * 7) + $1" | bc)/40 -e $4 ~/vps.yaml $5 +else + # run NON master branches without --newest + teuthology-suite -v -c $2 -m $3 -k distro -s rados --subset $(echo "(($(date +%U) % 4) * 7) + $1" | bc)/28 -e $4 ~/vps.yaml $5 +fi + diff --git a/qa/machine_types/schedule_subset.sh b/qa/machine_types/schedule_subset.sh new file mode 100755 index 00000000..005d0dcc --- /dev/null +++ b/qa/machine_types/schedule_subset.sh @@ -0,0 +1,49 @@ +#!/usr/bin/env bash + +#command line => CEPH_BRANCH=; MACHINE_NAME=; SUITE_NAME=; ../schedule_subset.sh $CEPH_BRANCH $MACHINE_NAME $SUITE_NAME $CEPH_QA_EMAIL $KERNEL <$FILTER> + +# $1 - part (day of week) +# $2 - branch name +# $3 - machine name +# $4 - suite name +# $5 - email address +# $6 - kernel (distro or testing) +# $7 - filter out (this arg is to be at the end of the command line for now) + +## example #1 +## (date +%U) week number +## % 2 - mod 2 (e.g. 0,1,0,1 ...) +## * 7 - multiplied by 7 (e.g. 0,7,0,7...) +## $1 day of the week (0-6) +## /14 for 2 weeks + +## example #2 +## (date +%U) week number +## % 4 - mod 4 (e.g. 0,1,2,3,0,1,2,3 ...) +## * 7 - multiplied by 7 (e.g. 0,7,14,21,0,7,14,21...) +## $1 day of the week (0-6) +## /28 for 4 weeks + +echo "Scheduling " $2 " branch" +if [ $2 = "master" ] ; then + # run master branch with --newest option looking for good sha1 7 builds back with /999 jobs + teuthology-suite -v -c $2 -m $3 -k $6 -s $4 --subset $(echo "(($(date +%U) % 4) * 7) + $1" | bc)/999 --newest 7 -e $5 $7 +elif [ $2 = "hammer" ] ; then + # run hammer branch with less jobs + teuthology-suite -v -c $2 -m $3 -k $6 -s $4 --subset $(echo "(($(date +%U) % 4) * 7) + $1" | bc)/56 -e $5 $7 +elif [ $2 = "jewel" ] ; then + # run jewel branch with /40 jobs + teuthology-suite -v -c $2 -m $3 -k $6 -s $4 --subset $(echo "(($(date +%U) % 4) * 7) + $1" | bc)/40 -e $5 $7 +elif [ $2 = "kraken" ] ; then + # run kraken branch with /999 jobs + teuthology-suite -v -c $2 -m $3 -k $6 -s $4 --subset $(echo "(($(date +%U) % 4) * 7) + $1" | bc)/999 -e $5 $7 +elif [ $2 = "luminous" ] ; then + # run luminous branch with /999 jobs + teuthology-suite -v -c $2 -m $3 -k $6 -s $4 --subset $(echo "(($(date +%U) % 4) * 7) + $1" | bc)/999 -e $5 $7 +elif [ $2 = "mimic" ] ; then + # run mimic branch with /999 jobs + teuthology-suite -v -c $2 -m $3 -k $6 -s $4 --subset $(echo "(($(date +%U) % 4) * 7) + $1" | bc)/999 -e $5 $7 +else + # run NON master branches without --newest + teuthology-suite -v -c $2 -m $3 -k $6 -s $4 --subset $(echo "(($(date +%U) % 4) * 7) + $1" | bc)/999 -e $5 $7 +fi diff --git a/qa/machine_types/vps.yaml b/qa/machine_types/vps.yaml new file mode 100644 index 00000000..64a3da47 --- /dev/null +++ b/qa/machine_types/vps.yaml @@ -0,0 +1,14 @@ +overrides: + ceph: + conf: + global: + osd heartbeat grace: 100 + # this line to address issue #1017 + mon lease: 15 + mon lease ack timeout: 25 + s3tests: + idle_timeout: 1200 + ceph-fuse: + client.0: + mount_wait: 60 + mount_timeout: 120 diff --git a/qa/mds/test_anchortable.sh b/qa/mds/test_anchortable.sh new file mode 100755 index 00000000..1bf2494d --- /dev/null +++ b/qa/mds/test_anchortable.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash +set -x + +mkdir links +for f in `seq 1 8` +do + mkdir $f + for g in `seq 1 20` + do + touch $f/$g + ln $f/$g links/$f.$g + done +done + +for f in `seq 1 8` +do + echo testing failure point $f + bash -c "pushd . ; cd $bindir ; sleep 10; ./ceph -c $conf mds tell \* injectargs \"--mds_kill_mdstable_at $f\" ; popd" & + bash -c "pushd . ; cd $bindir ; sleep 11 ; ./init-ceph -c $conf start mds ; popd" & + for g in `seq 1 20` + do + rm $f/$g + rm links/$f.$g + sleep 1 + done +done + diff --git a/qa/mds/test_mdstable_failures.sh b/qa/mds/test_mdstable_failures.sh new file mode 100755 index 00000000..c959995c --- /dev/null +++ b/qa/mds/test_mdstable_failures.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash +set -x + +for f in `seq 1 8` +do + echo testing failure point $f + pushd . ; cd $bindir ; ./ceph -c $conf mds tell \* injectargs "--mds_kill_mdstable_at $f" ; popd + sleep 1 # wait for mds command to go thru + bash -c "pushd . ; cd $bindir ; sleep 10 ; ./init-ceph -c $conf start mds ; popd" & + touch $f + ln $f $f.link + sleep 10 +done + diff --git a/qa/mon/bootstrap/host.sh b/qa/mon/bootstrap/host.sh new file mode 100755 index 00000000..ad4e327d --- /dev/null +++ b/qa/mon/bootstrap/host.sh @@ -0,0 +1,29 @@ +#!/bin/sh -ex + +cwd=`pwd` +cat > conf < conf < conf < conf < conf < conf < conf < conf < conf < conf <> $STDOUT +echo "Running command: $@" >> $STDOUT +"$@" > $STDOUT 2> $STDERR + +# get return code from the command run +code=$? + +if [ $code != 0 ] ; then + # echoing to stdout/stderr makes cron send email + echo "stdout:" + cat $STDOUT + echo "stderr:" + cat $STDERR +else + # normal exit: just log stdout + + # lock $LOG with file descriptor 200 + exec 200>>$LOG + # if $LOG is locked by other process - wait for 20 sec + flock -w 20 200 || LOG=$LOG_LOCK_ERR + echo "stdout:" >> $LOG + cat $STDOUT >> $LOG + echo "stderr:" >> $LOG + cat $STDERR >> $LOG + # unlock + flock -u 200 +fi diff --git a/qa/objectstore/bluestore-bitmap.yaml b/qa/objectstore/bluestore-bitmap.yaml new file mode 100644 index 00000000..b18e04be --- /dev/null +++ b/qa/objectstore/bluestore-bitmap.yaml @@ -0,0 +1,43 @@ +overrides: + thrashosds: + bdev_inject_crash: 2 + bdev_inject_crash_probability: .5 + ceph: + fs: xfs + conf: + osd: + osd objectstore: bluestore + bluestore block size: 96636764160 + debug bluestore: 20 + debug bluefs: 20 + debug rocksdb: 10 + bluestore fsck on mount: true + bluestore allocator: bitmap + # lower the full ratios since we can fill up a 100gb osd so quickly + mon osd full ratio: .9 + mon osd backfillfull_ratio: .85 + mon osd nearfull ratio: .8 + osd failsafe full ratio: .95 +# this doesn't work with failures bc the log writes are not atomic across the two backends +# bluestore bluefs env mirror: true + bdev enable discard: true + bdev async discard: true + ceph-deploy: + fs: xfs + bluestore: yes + conf: + osd: + osd objectstore: bluestore + bluestore block size: 96636764160 + debug bluestore: 20 + debug bluefs: 20 + debug rocksdb: 10 + bluestore fsck on mount: true + # lower the full ratios since we can fill up a 100gb osd so quickly + mon osd full ratio: .9 + mon osd backfillfull_ratio: .85 + mon osd nearfull ratio: .8 + osd failsafe full ratio: .95 + bdev enable discard: true + bdev async discard: true + diff --git a/qa/objectstore/bluestore-comp-lz4.yaml b/qa/objectstore/bluestore-comp-lz4.yaml new file mode 100644 index 00000000..46f993e6 --- /dev/null +++ b/qa/objectstore/bluestore-comp-lz4.yaml @@ -0,0 +1,24 @@ +overrides: + thrashosds: + bdev_inject_crash: 2 + bdev_inject_crash_probability: .5 + ceph: + fs: xfs + conf: + osd: + osd objectstore: bluestore + bluestore block size: 96636764160 + debug bluestore: 20 + debug bluefs: 20 + debug rocksdb: 10 + bluestore compression mode: aggressive + bluestore fsck on mount: true + bluestore compression algorithm: lz4 + # lower the full ratios since we can fill up a 100gb osd so quickly + mon osd full ratio: .9 + mon osd backfillfull_ratio: .85 + mon osd nearfull ratio: .8 + osd failsafe full ratio: .95 + +# this doesn't work with failures bc the log writes are not atomic across the two backends +# bluestore bluefs env mirror: true diff --git a/qa/objectstore/bluestore-comp-snappy.yaml b/qa/objectstore/bluestore-comp-snappy.yaml new file mode 100644 index 00000000..b5d58414 --- /dev/null +++ b/qa/objectstore/bluestore-comp-snappy.yaml @@ -0,0 +1,24 @@ +overrides: + thrashosds: + bdev_inject_crash: 2 + bdev_inject_crash_probability: .5 + ceph: + fs: xfs + conf: + osd: + osd objectstore: bluestore + bluestore block size: 96636764160 + debug bluestore: 20 + debug bluefs: 20 + debug rocksdb: 10 + bluestore compression mode: aggressive + bluestore fsck on mount: true + bluestore compression algorithm: snappy + # lower the full ratios since we can fill up a 100gb osd so quickly + mon osd full ratio: .9 + mon osd backfillfull_ratio: .85 + mon osd nearfull ratio: .8 + osd failsafe full ratio: .95 + +# this doesn't work with failures bc the log writes are not atomic across the two backends +# bluestore bluefs env mirror: true diff --git a/qa/objectstore/bluestore-comp-zlib.yaml b/qa/objectstore/bluestore-comp-zlib.yaml new file mode 100644 index 00000000..b47ebbb7 --- /dev/null +++ b/qa/objectstore/bluestore-comp-zlib.yaml @@ -0,0 +1,24 @@ +overrides: + thrashosds: + bdev_inject_crash: 2 + bdev_inject_crash_probability: .5 + ceph: + fs: xfs + conf: + osd: + osd objectstore: bluestore + bluestore block size: 96636764160 + debug bluestore: 20 + debug bluefs: 20 + debug rocksdb: 10 + bluestore compression mode: aggressive + bluestore fsck on mount: true + bluestore compression algorithm: zlib + # lower the full ratios since we can fill up a 100gb osd so quickly + mon osd full ratio: .9 + mon osd backfillfull_ratio: .85 + mon osd nearfull ratio: .8 + osd failsafe full ratio: .95 + +# this doesn't work with failures bc the log writes are not atomic across the two backends +# bluestore bluefs env mirror: true diff --git a/qa/objectstore/bluestore-comp-zstd.yaml b/qa/objectstore/bluestore-comp-zstd.yaml new file mode 100644 index 00000000..e2f5e4e5 --- /dev/null +++ b/qa/objectstore/bluestore-comp-zstd.yaml @@ -0,0 +1,24 @@ +overrides: + thrashosds: + bdev_inject_crash: 2 + bdev_inject_crash_probability: .5 + ceph: + fs: xfs + conf: + osd: + osd objectstore: bluestore + bluestore block size: 96636764160 + debug bluestore: 20 + debug bluefs: 20 + debug rocksdb: 10 + bluestore compression mode: aggressive + bluestore fsck on mount: true + bluestore compression algorithm: zstd + # lower the full ratios since we can fill up a 100gb osd so quickly + mon osd full ratio: .9 + mon osd backfillfull_ratio: .85 + mon osd nearfull ratio: .8 + osd failsafe full ratio: .95 + +# this doesn't work with failures bc the log writes are not atomic across the two backends +# bluestore bluefs env mirror: true diff --git a/qa/objectstore/bluestore-hybrid.yaml b/qa/objectstore/bluestore-hybrid.yaml new file mode 100644 index 00000000..68b9bc42 --- /dev/null +++ b/qa/objectstore/bluestore-hybrid.yaml @@ -0,0 +1,40 @@ +overrides: + thrashosds: + bdev_inject_crash: 2 + bdev_inject_crash_probability: .5 + ceph: + fs: xfs + conf: + osd: + osd objectstore: bluestore + bluestore block size: 96636764160 + debug bluestore: 20 + debug bluefs: 20 + debug rocksdb: 10 + bluestore fsck on mount: true + bluestore allocator: hybrid + bluefs allocator: hybrid + # lower the full ratios since we can fill up a 100gb osd so quickly + mon osd full ratio: .9 + mon osd backfillfull_ratio: .85 + mon osd nearfull ratio: .8 + osd failsafe full ratio: .95 +# this doesn't work with failures bc the log writes are not atomic across the two backends +# bluestore bluefs env mirror: true + ceph-deploy: + fs: xfs + bluestore: yes + conf: + osd: + osd objectstore: bluestore + bluestore block size: 96636764160 + debug bluestore: 20 + debug bluefs: 20 + debug rocksdb: 10 + bluestore fsck on mount: true + # lower the full ratios since we can fill up a 100gb osd so quickly + mon osd full ratio: .9 + mon osd backfillfull_ratio: .85 + mon osd nearfull ratio: .8 + osd failsafe full ratio: .95 + diff --git a/qa/objectstore/bluestore-stupid.yaml b/qa/objectstore/bluestore-stupid.yaml new file mode 100644 index 00000000..ca811f13 --- /dev/null +++ b/qa/objectstore/bluestore-stupid.yaml @@ -0,0 +1,43 @@ +overrides: + thrashosds: + bdev_inject_crash: 2 + bdev_inject_crash_probability: .5 + ceph: + fs: xfs + conf: + osd: + osd objectstore: bluestore + bluestore block size: 96636764160 + debug bluestore: 20 + debug bluefs: 20 + debug rocksdb: 10 + bluestore fsck on mount: true + bluestore allocator: stupid + # lower the full ratios since we can fill up a 100gb osd so quickly + mon osd full ratio: .9 + mon osd backfillfull_ratio: .85 + mon osd nearfull ratio: .8 + osd failsafe full ratio: .95 +# this doesn't work with failures bc the log writes are not atomic across the two backends +# bluestore bluefs env mirror: true + bdev enable discard: true + bdev async discard: true + ceph-deploy: + fs: xfs + bluestore: yes + conf: + osd: + osd objectstore: bluestore + bluestore block size: 96636764160 + debug bluestore: 20 + debug bluefs: 20 + debug rocksdb: 10 + bluestore fsck on mount: true + # lower the full ratios since we can fill up a 100gb osd so quickly + mon osd full ratio: .9 + mon osd backfillfull_ratio: .85 + mon osd nearfull ratio: .8 + osd failsafe full ratio: .95 + bdev enable discard: true + bdev async discard: true + diff --git a/qa/objectstore/filestore-xfs.yaml b/qa/objectstore/filestore-xfs.yaml new file mode 100644 index 00000000..f7aa0dd7 --- /dev/null +++ b/qa/objectstore/filestore-xfs.yaml @@ -0,0 +1,15 @@ +overrides: + ceph: + fs: xfs + conf: + osd: + osd objectstore: filestore + osd sloppy crc: true + ceph-deploy: + fs: xfs + filestore: True + conf: + osd: + osd objectstore: filestore + osd sloppy crc: true + diff --git a/qa/objectstore_cephfs/bluestore-bitmap.yaml b/qa/objectstore_cephfs/bluestore-bitmap.yaml new file mode 120000 index 00000000..951e65ac --- /dev/null +++ b/qa/objectstore_cephfs/bluestore-bitmap.yaml @@ -0,0 +1 @@ +../objectstore/bluestore-bitmap.yaml \ No newline at end of file diff --git a/qa/objectstore_cephfs/filestore-xfs.yaml b/qa/objectstore_cephfs/filestore-xfs.yaml new file mode 120000 index 00000000..6fd44e0c --- /dev/null +++ b/qa/objectstore_cephfs/filestore-xfs.yaml @@ -0,0 +1 @@ +../objectstore/filestore-xfs.yaml \ No newline at end of file diff --git a/qa/overrides/2-size-1-min-size.yaml b/qa/overrides/2-size-1-min-size.yaml new file mode 100644 index 00000000..d710aee2 --- /dev/null +++ b/qa/overrides/2-size-1-min-size.yaml @@ -0,0 +1,6 @@ +overrides: + ceph: + conf: + global: + osd_pool_default_size: 2 + osd_pool_default_min_size: 1 diff --git a/qa/overrides/2-size-2-min-size.yaml b/qa/overrides/2-size-2-min-size.yaml new file mode 100644 index 00000000..f667a6ae --- /dev/null +++ b/qa/overrides/2-size-2-min-size.yaml @@ -0,0 +1,8 @@ +overrides: + ceph: + conf: + global: + osd_pool_default_size: 2 + osd_pool_default_min_size: 2 + log-whitelist: + - \(REQUEST_STUCK\) diff --git a/qa/overrides/3-size-2-min-size.yaml b/qa/overrides/3-size-2-min-size.yaml new file mode 100644 index 00000000..02579060 --- /dev/null +++ b/qa/overrides/3-size-2-min-size.yaml @@ -0,0 +1,8 @@ +overrides: + thrashosds: + min_in: 4 + ceph: + conf: + global: + osd_pool_default_size: 3 + osd_pool_default_min_size: 2 diff --git a/qa/overrides/more-active-recovery.yaml b/qa/overrides/more-active-recovery.yaml new file mode 100644 index 00000000..bfe86e4d --- /dev/null +++ b/qa/overrides/more-active-recovery.yaml @@ -0,0 +1,6 @@ +overrides: + ceph: + conf: + global: + osd_recovery_max_active: 10 + osd_recovery_max_single_start: 10 diff --git a/qa/overrides/no_client_pidfile.yaml b/qa/overrides/no_client_pidfile.yaml new file mode 100644 index 00000000..4ea02f47 --- /dev/null +++ b/qa/overrides/no_client_pidfile.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + client: + pid file: "" diff --git a/qa/overrides/short_pg_log.yaml b/qa/overrides/short_pg_log.yaml new file mode 100644 index 00000000..fa55e91e --- /dev/null +++ b/qa/overrides/short_pg_log.yaml @@ -0,0 +1,7 @@ +overrides: + ceph: + conf: + global: + osd_min_pg_log_entries: 1 + osd_max_pg_log_entries: 2 + osd_pg_log_trim_min: 0 diff --git a/qa/overrides/whitelist_wrongly_marked_down.yaml b/qa/overrides/whitelist_wrongly_marked_down.yaml new file mode 100644 index 00000000..4e21dc9b --- /dev/null +++ b/qa/overrides/whitelist_wrongly_marked_down.yaml @@ -0,0 +1,10 @@ +overrides: + ceph: + log-whitelist: + - but it is still running + conf: + mds: + debug mds: 20 + debug ms: 1 + client: + debug client: 10 \ No newline at end of file diff --git a/qa/packages/packages.yaml b/qa/packages/packages.yaml new file mode 100644 index 00000000..b19e5b01 --- /dev/null +++ b/qa/packages/packages.yaml @@ -0,0 +1,50 @@ +--- +ceph: + deb: + - ceph + - ceph-mds + - ceph-mgr + - ceph-common + - ceph-fuse + - ceph-test + - radosgw + - python-ceph + - libcephfs2 + - libcephfs-dev + - librados2 + - librbd1 + - rbd-fuse + - ceph-common-dbg + - ceph-fuse-dbg + - ceph-mds-dbg + - ceph-mgr-dbg + - ceph-mon-dbg + - ceph-osd-dbg + - ceph-test-dbg + - libcephfs2-dbg + - librados2-dbg + - libradosstriper1-dbg + - librbd1-dbg + - librgw2-dbg + - radosgw-dbg + - rbd-fuse-dbg + - rbd-mirror-dbg + - rbd-nbd-dbg + rpm: + - ceph-radosgw + - ceph-test + - ceph + - ceph-mgr + - ceph-mgr-dashboard + - ceph-mgr-diskprediction-cloud + - ceph-mgr-diskprediction-local + - ceph-mgr-rook + - ceph-mgr-ssh + - ceph-fuse + - libcephfs2 + - libcephfs-devel + - librados2 + - librbd1 + - python-ceph + - rbd-fuse + - ceph-debuginfo diff --git a/qa/qa_scripts/cephscrub.sh b/qa/qa_scripts/cephscrub.sh new file mode 100755 index 00000000..331d5ce3 --- /dev/null +++ b/qa/qa_scripts/cephscrub.sh @@ -0,0 +1,30 @@ +# remove the ceph directories +sudo rm -rf /var/log/ceph +sudo rm -rf /var/lib/ceph +sudo rm -rf /etc/ceph +sudo rm -rf /var/run/ceph +# remove the ceph packages +sudo apt-get -y purge ceph +sudo apt-get -y purge ceph-dbg +sudo apt-get -y purge ceph-mds +sudo apt-get -y purge ceph-mds-dbg +sudo apt-get -y purge ceph-fuse +sudo apt-get -y purge ceph-fuse-dbg +sudo apt-get -y purge ceph-common +sudo apt-get -y purge ceph-common-dbg +sudo apt-get -y purge ceph-resource-agents +sudo apt-get -y purge librados2 +sudo apt-get -y purge librados2-dbg +sudo apt-get -y purge librados-dev +sudo apt-get -y purge librbd1 +sudo apt-get -y purge librbd1-dbg +sudo apt-get -y purge librbd-dev +sudo apt-get -y purge libcephfs2 +sudo apt-get -y purge libcephfs2-dbg +sudo apt-get -y purge libcephfs-dev +sudo apt-get -y purge radosgw +sudo apt-get -y purge radosgw-dbg +sudo apt-get -y purge obsync +sudo apt-get -y purge python-rados +sudo apt-get -y purge python-rbd +sudo apt-get -y purge python-cephfs diff --git a/qa/qa_scripts/openstack/README b/qa/qa_scripts/openstack/README new file mode 100644 index 00000000..63fe2d97 --- /dev/null +++ b/qa/qa_scripts/openstack/README @@ -0,0 +1,32 @@ +This directory contains scripts to quickly bring up an OpenStack instance, +attach a ceph cluster, create a nova compute node, and store the associated glance images, cinder volumes, nova vm, and cinder backup on ceph via rbd. + +execs is a directory that contains executables that are copied and remotely +run on the OpenStack instance + +files is a directory that contains templates used to initialize OpenStack +conf files. These templates reflect the state of these conf files on 5/17/2016. +If further development is necessary in the future, these templates should +probably be removed and direct editing of the OpenStack conf files should +probably be performed. + +These scripts also assume that either there is a rhel iso file named +rhel-server-7.2-x86_64-boot.iso in the user's home directory, or the +exported variable RHEL_ISO is set to point at an existing rhel iso file. +If one is also running the ceph-deploy based ceph_install.sh, this script +also assumes that there is a file named rhceph-1.3.1-rhel-7-x86_64-dvd.iso +in the files directory. These iso files can be obtained from the rhel site +and are not stored with these scripts. + +To install openstack: +./openstack.sh + +This assumes that the ceph cluster is already set up. + +To setup a ceph-cluster using an iso and ceph-deploy: +./ceph_install.sh + +To setup a ceph-cluster using the cdn and ceph-ansible: +cd ceph_install_w_ansible +./ceph_install.sh + diff --git a/qa/qa_scripts/openstack/ceph_install.sh b/qa/qa_scripts/openstack/ceph_install.sh new file mode 100755 index 00000000..47831bd0 --- /dev/null +++ b/qa/qa_scripts/openstack/ceph_install.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash +# +# Install a simple ceph cluster upon which openstack images will be stored. +# +set -fv +ceph_node=${1} +source copy_func.sh +copy_file files/$OS_CEPH_ISO $ceph_node . +copy_file execs/ceph_cluster.sh $ceph_node . 0777 +copy_file execs/ceph-pool-create.sh $ceph_node . 0777 +ssh $ceph_node ./ceph_cluster.sh $* diff --git a/qa/qa_scripts/openstack/ceph_install_w_ansible/README b/qa/qa_scripts/openstack/ceph_install_w_ansible/README new file mode 100644 index 00000000..282c46e4 --- /dev/null +++ b/qa/qa_scripts/openstack/ceph_install_w_ansible/README @@ -0,0 +1,32 @@ + +ceph_install.sh installs a ceph cluster using the cdn and ceph-ansible. + +Right now, it takes 5 parameters -- an admin node, a ceph mon node, and +three osd nodes. + +In order to subscribe to the cdn, in your home directory create a file named +secrets, (~/secrets), that contains the following lines: + +subscrname=Your-Redhat-Cdn-Id +subscrpassword=Your-Redhat-Cdn-Password + +If you want to set the monitor_interface or the public_network values, +in your home directory create a file named ip_info (~/ip_info), that +contains the following lines: + +mon_intf=your-monitor-interface (default is eno1) +pub_netw=public-network (default is 10.8.128.0/21) + +This script first subscribes to the cdn, enables the rhel 7 repos, and does +a yum update. (multi_action.sh performs all the actions on all nodes at once, +staller.sh is used to make sure that all updates are complete before exiting, +and execs/cdn_setup.sh is used to remotely update the cdn information. + +After that, it makes sure that all nodes can connect via passwordless ssh +(using talknice.sh and config) and then installs the appropriate repos and +runs ceph_ansible on the admin node using execs/ceph_ansible.sh, +execs/edit_ansible_hosts.sh and execs/edit_groupvars_osds.sh. + +repolocs.sh contains the locations of repo files. These variables can +be changed if one wishes to use different urls. + diff --git a/qa/qa_scripts/openstack/ceph_install_w_ansible/ceph_install.sh b/qa/qa_scripts/openstack/ceph_install_w_ansible/ceph_install.sh new file mode 100755 index 00000000..b4d14f9c --- /dev/null +++ b/qa/qa_scripts/openstack/ceph_install_w_ansible/ceph_install.sh @@ -0,0 +1,39 @@ +#! /usr/bin/env bash +if [ $# -ne 5 ]; then + echo 'Usage: ceph_install.sh ' + exit -1 +fi +allnodes=$* +adminnode=$1 +shift +cephnodes=$* +monnode=$1 +shift +osdnodes=$* +./multi_action.sh cdn_setup.sh $allnodes +./talknice.sh $allnodes +for mac in $allnodes; do + ssh $mac sudo yum -y install yum-utils +done + +source ./repolocs.sh +ssh $adminnode sudo yum-config-manager --add ${CEPH_REPO_TOOLS} +ssh $monnode sudo yum-config-manager --add ${CEPH_REPO_MON} +for mac in $osdnodes; do + ssh $mac sudo yum-config-manager --add ${CEPH_REPO_OSD} +done +ssh $adminnode sudo yum-config-manager --add ${INSTALLER_REPO_LOC} + +for mac in $allnodes; do + ssh $mac sudo sed -i 's/gpgcheck=1/gpgcheck=0/' /etc/yum.conf +done + +source copy_func.sh +copy_file execs/ceph_ansible.sh $adminnode . 0777 ubuntu:ubuntu +copy_file execs/edit_ansible_hosts.sh $adminnode . 0777 ubuntu:ubuntu +copy_file execs/edit_groupvars_osds.sh $adminnode . 0777 ubuntu:ubuntu +copy_file ../execs/ceph-pool-create.sh $monnode . 0777 ubuntu:ubuntu +if [ -e ~/ip_info ]; then + copy_file ~/ip_info $adminnode . 0777 ubuntu:ubuntu +fi +ssh $adminnode ./ceph_ansible.sh $cephnodes diff --git a/qa/qa_scripts/openstack/ceph_install_w_ansible/config b/qa/qa_scripts/openstack/ceph_install_w_ansible/config new file mode 100644 index 00000000..a7d81986 --- /dev/null +++ b/qa/qa_scripts/openstack/ceph_install_w_ansible/config @@ -0,0 +1,5 @@ +Host plana* mira* burnupi* tala* saya* vpm* names* gitbuilder* teuthology gw* senta* vercoi* rex* magna* + ServerAliveInterval 360 + StrictHostKeyChecking no + UserKnownHostsFile=/dev/null + User ubuntu diff --git a/qa/qa_scripts/openstack/ceph_install_w_ansible/copy_func.sh b/qa/qa_scripts/openstack/ceph_install_w_ansible/copy_func.sh new file mode 120000 index 00000000..6a36be7b --- /dev/null +++ b/qa/qa_scripts/openstack/ceph_install_w_ansible/copy_func.sh @@ -0,0 +1 @@ +../copy_func.sh \ No newline at end of file diff --git a/qa/qa_scripts/openstack/ceph_install_w_ansible/execs/cdn_setup.sh b/qa/qa_scripts/openstack/ceph_install_w_ansible/execs/cdn_setup.sh new file mode 100755 index 00000000..0c87039d --- /dev/null +++ b/qa/qa_scripts/openstack/ceph_install_w_ansible/execs/cdn_setup.sh @@ -0,0 +1,20 @@ +#! /usr/bin/env bash +if [ -f ~/secrets ]; then + source ~/secrets +fi +subm=`which subscription-manager` +if [ ${#subm} -eq 0 ]; then + sudo yum -y update + exit +fi +subst=`sudo subscription-manager status | grep "^Overall" | awk '{print $NF}'` +if [ $subst == 'Unknown' ]; then + mynameis=${subscrname:-'inigomontoya'} + mypassis=${subscrpassword:-'youkeelmyfatherpreparetodie'} + sudo subscription-manager register --username=$mynameis --password=$mypassis --force + sudo subscription-manager refresh + if [ $? -eq 1 ]; then exit 1; fi + sudo subscription-manager attach --pool=8a85f9823e3d5e43013e3ddd4e2a0977 +fi +sudo subscription-manager repos --enable=rhel-7-server-rpms +sudo yum -y update diff --git a/qa/qa_scripts/openstack/ceph_install_w_ansible/execs/ceph_ansible.sh b/qa/qa_scripts/openstack/ceph_install_w_ansible/execs/ceph_ansible.sh new file mode 100755 index 00000000..8581de60 --- /dev/null +++ b/qa/qa_scripts/openstack/ceph_install_w_ansible/execs/ceph_ansible.sh @@ -0,0 +1,36 @@ +#! /usr/bin/env bash +cephnodes=$* +monnode=$1 +sudo yum -y install ceph-ansible +cd +sudo ./edit_ansible_hosts.sh $cephnodes +mkdir ceph-ansible-keys +cd /usr/share/ceph-ansible/group_vars/ +if [ -f ~/ip_info ]; then + source ~/ip_info +fi +mon_intf=${mon_intf:-'eno1'} +pub_netw=${pub_netw:-'10.8.128.0\/21'} +sudo cp all.sample all +sudo sed -i 's/#ceph_origin:.*/ceph_origin: distro/' all +sudo sed -i 's/#fetch_directory:.*/fetch_directory: ~\/ceph-ansible-keys/' all +sudo sed -i 's/#ceph_stable:.*/ceph_stable: true/' all +sudo sed -i 's/#ceph_stable_rh_storage:.*/ceph_stable_rh_storage: false/' all +sudo sed -i 's/#ceph_stable_rh_storage_cdn_install:.*/ceph_stable_rh_storage_cdn_install: true/' all +sudo sed -i 's/#cephx:.*/cephx: true/' all +sudo sed -i "s/#monitor_interface:.*/monitor_interface: ${mon_intf}/" all +sudo sed -i 's/#journal_size:.*/journal_size: 1024/' all +sudo sed -i "s/#public_network:.*/public_network: ${pub_netw}/" all +sudo cp osds.sample osds +sudo sed -i 's/#fetch_directory:.*/fetch_directory: ~\/ceph-ansible-keys/' osds +sudo sed -i 's/#crush_location:/crush_location:/' osds +sudo sed -i 's/#osd_crush_location:/osd_crush_location:/' osds +sudo sed -i 's/#cephx:/cephx:/' osds +sudo sed -i 's/#devices:/devices:/' osds +sudo sed -i 's/#journal_collocation:.*/journal_collocation: true/' osds +cd +sudo ./edit_groupvars_osds.sh +cd /usr/share/ceph-ansible +sudo cp site.yml.sample site.yml +ansible-playbook site.yml +ssh $monnode ~/ceph-pool-create.sh diff --git a/qa/qa_scripts/openstack/ceph_install_w_ansible/execs/edit_ansible_hosts.sh b/qa/qa_scripts/openstack/ceph_install_w_ansible/execs/edit_ansible_hosts.sh new file mode 100755 index 00000000..7eb0b701 --- /dev/null +++ b/qa/qa_scripts/openstack/ceph_install_w_ansible/execs/edit_ansible_hosts.sh @@ -0,0 +1,17 @@ +#! /usr/bin/env bash +ed /etc/ansible/hosts << EOF +$ +a + +[mons] +${1} + +[osds] +${2} +${3} +${4} + +. +w +q +EOF diff --git a/qa/qa_scripts/openstack/ceph_install_w_ansible/execs/edit_groupvars_osds.sh b/qa/qa_scripts/openstack/ceph_install_w_ansible/execs/edit_groupvars_osds.sh new file mode 100755 index 00000000..751658b0 --- /dev/null +++ b/qa/qa_scripts/openstack/ceph_install_w_ansible/execs/edit_groupvars_osds.sh @@ -0,0 +1,13 @@ +#! /usr/bin/env bash +ed /usr/share/ceph-ansible/group_vars/osds << EOF +$ +/^devices: +.+1 +i + - /dev/sdb + - /dev/sdc + - /dev/sdd +. +w +q +EOF diff --git a/qa/qa_scripts/openstack/ceph_install_w_ansible/multi_action.sh b/qa/qa_scripts/openstack/ceph_install_w_ansible/multi_action.sh new file mode 100755 index 00000000..abc368b0 --- /dev/null +++ b/qa/qa_scripts/openstack/ceph_install_w_ansible/multi_action.sh @@ -0,0 +1,19 @@ +#! /usr/bin/env bash +source copy_func.sh +allparms=$* +cmdv=$1 +shift +sites=$* +for mac in $sites; do + echo $cmdv $mac + if [ -f ~/secrets ]; then + copy_file ~/secrets $mac . 0777 ubuntu:ubuntu + fi + copy_file execs/${cmdv} $mac . 0777 ubuntu:ubuntu + ssh $mac ./${cmdv} & +done +./staller.sh $allparms +for mac in $sites; do + ssh $mac sudo rm -rf secrets +done +echo "DONE" diff --git a/qa/qa_scripts/openstack/ceph_install_w_ansible/repolocs.sh b/qa/qa_scripts/openstack/ceph_install_w_ansible/repolocs.sh new file mode 100755 index 00000000..5d82f35d --- /dev/null +++ b/qa/qa_scripts/openstack/ceph_install_w_ansible/repolocs.sh @@ -0,0 +1,8 @@ +#! /usr/bin/env bash +SPECIFIC_VERSION=latest-Ceph-2-RHEL-7 +#SPECIFIC_VERSION=Ceph-2-RHEL-7-20160630.t.0 +#SPECIFIC_VERSION=Ceph-2.0-RHEL-7-20160718.t.0 +export CEPH_REPO_TOOLS=http://download.eng.bos.redhat.com/rcm-guest/ceph-drops/auto/ceph-2-rhel-7-compose/${SPECIFIC_VERSION}/compose/Tools/x86_64/os/ +export CEPH_REPO_MON=http://download.eng.bos.redhat.com/rcm-guest/ceph-drops/auto/ceph-2-rhel-7-compose/${SPECIFIC_VERSION}/compose/MON/x86_64/os/ +export CEPH_REPO_OSD=http://download.eng.bos.redhat.com/rcm-guest/ceph-drops/auto/ceph-2-rhel-7-compose/${SPECIFIC_VERSION}/compose/OSD/x86_64/os/ +export INSTALLER_REPO_LOC=http://download.eng.bos.redhat.com/rcm-guest/ceph-drops/auto/rhscon-2-rhel-7-compose/latest-RHSCON-2-RHEL-7/compose/Installer/x86_64/os/ diff --git a/qa/qa_scripts/openstack/ceph_install_w_ansible/staller.sh b/qa/qa_scripts/openstack/ceph_install_w_ansible/staller.sh new file mode 100755 index 00000000..99c00da3 --- /dev/null +++ b/qa/qa_scripts/openstack/ceph_install_w_ansible/staller.sh @@ -0,0 +1,15 @@ +#! /usr/bin/env bash +cmd_wait=$1 +shift +sites=$* +donebit=0 +while [ $donebit -ne 1 ]; do + sleep 10 + donebit=1 + for rem in $sites; do + rval=`ssh $rem ps aux | grep $cmd_wait | wc -l` + if [ $rval -gt 0 ]; then + donebit=0 + fi + done +done diff --git a/qa/qa_scripts/openstack/ceph_install_w_ansible/talknice.sh b/qa/qa_scripts/openstack/ceph_install_w_ansible/talknice.sh new file mode 100755 index 00000000..ffed4f1d --- /dev/null +++ b/qa/qa_scripts/openstack/ceph_install_w_ansible/talknice.sh @@ -0,0 +1,29 @@ +#!/usr/bin/env bash +declare -A rsapub +for fulln in $*; do + sname=`echo $fulln | sed 's/\..*//'` + nhead=`echo $sname | sed 's/[0-9]*//g'` + x=`ssh $fulln "ls .ssh/id_rsa"` + if [ -z $x ]; then + ssh $fulln "ssh-keygen -N '' -f .ssh/id_rsa"; + fi + xx=`ssh $fulln "ls .ssh/config"` + if [ -z $xx ]; then + scp config $fulln:/home/ubuntu/.ssh/config + fi + ssh $fulln "chown ubuntu:ubuntu .ssh/config" + ssh $fulln "chmod 0600 .ssh/config" + rsapub[$fulln]=`ssh $fulln "cat .ssh/id_rsa.pub"` +done +for ii in $*; do + ssh $ii sudo iptables -F + for jj in $*; do + pval=${rsapub[$jj]} + if [ "$ii" != "$jj" ]; then + xxxx=`ssh $ii "grep $jj .ssh/authorized_keys"` + if [ -z "$xxxx" ]; then + ssh $ii "echo '$pval' | sudo tee -a /home/ubuntu/.ssh/authorized_keys" + fi + fi + done; +done diff --git a/qa/qa_scripts/openstack/connectceph.sh b/qa/qa_scripts/openstack/connectceph.sh new file mode 100755 index 00000000..2d70df7f --- /dev/null +++ b/qa/qa_scripts/openstack/connectceph.sh @@ -0,0 +1,44 @@ +#!/usr/bin/env bash +# +# Connect openstack node just installed to a ceph cluster. +# +# Essentially implements: +# +# http://docs.ceph.com/docs/master/rbd/rbd-openstack/ +# +# The directory named files contains templates for the /etc/glance/glance-api.conf, +# /etc/cinder/cinder.conf, /etc/nova/nova.conf Openstack files +# +set -fv +source ./copy_func.sh +source ./fix_conf_file.sh +openstack_node=${1} +ceph_node=${2} + +scp $ceph_node:/etc/ceph/ceph.conf ./ceph.conf +ssh $openstack_node sudo mkdir /etc/ceph +copy_file ceph.conf $openstack_node /etc/ceph 0644 +rm -f ceph.conf +ssh $openstack_node sudo yum -y install python-rbd +ssh $openstack_node sudo yum -y install ceph-common +ssh $ceph_node "sudo ceph auth get-or-create client.cinder mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rwx pool=vms, allow rx pool=images'" +ssh $ceph_node "sudo ceph auth get-or-create client.glance mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=images'" +ssh $ceph_node "sudo ceph auth get-or-create client.cinder-backup mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=backups'" +ssh $ceph_node sudo ceph auth get-or-create client.glance mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=images' +ssh $ceph_node sudo ceph auth get-or-create client.cinder-backup mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=backups' +ssh $ceph_node sudo ceph auth get-or-create client.glance | ssh $openstack_node sudo tee /etc/ceph/ceph.client.glance.keyring +ssh $openstack_node sudo chown glance:glance /etc/ceph/ceph.client.glance.keyring +ssh $ceph_node sudo ceph auth get-or-create client.cinder | ssh $openstack_node sudo tee /etc/ceph/ceph.client.cinder.keyring +ssh $openstack_node sudo chown cinder:cinder /etc/ceph/ceph.client.cinder.keyring +ssh $ceph_node sudo ceph auth get-or-create client.cinder-backup | ssh $openstack_node sudo tee /etc/ceph/ceph.client.cinder-backup.keyring +ssh $openstack_node sudo chown cinder:cinder /etc/ceph/ceph.client.cinder-backup.keyring +ssh $ceph_node sudo ceph auth get-key client.cinder | ssh $openstack_node tee client.cinder.key +copy_file execs/libvirt-secret.sh $openstack_node . +secret_msg=`ssh $openstack_node sudo ./libvirt-secret.sh $openstack_node` +secret_virt=`echo $secret_msg | sed 's/.* set //'` +echo $secret_virt +fix_conf_file $openstack_node glance-api /etc/glance +fix_conf_file $openstack_node cinder /etc/cinder $secret_virt +fix_conf_file $openstack_node nova /etc/nova $secret_virt +copy_file execs/start_openstack.sh $openstack_node . 0755 +ssh $openstack_node ./start_openstack.sh diff --git a/qa/qa_scripts/openstack/copy_func.sh b/qa/qa_scripts/openstack/copy_func.sh new file mode 100755 index 00000000..57198026 --- /dev/null +++ b/qa/qa_scripts/openstack/copy_func.sh @@ -0,0 +1,22 @@ +# +# copy_file(, , , [], [] +# +# copy a file -- this is needed because passwordless ssh does not +# work when sudo'ing. +# -- name of local file to be copied +# -- node where we want the file +# -- location where we want the file on +# -- (optional) permissions on the copied file +# -- (optional) owner of the copied file +# +function copy_file() { + fname=`basename ${1}` + scp ${1} ${2}:/tmp/${fname} + ssh ${2} sudo cp /tmp/${fname} ${3} + if [ $# -gt 3 ]; then + ssh ${2} sudo chmod ${4} ${3}/${fname} + fi + if [ $# -gt 4 ]; then + ssh ${2} sudo chown ${5} ${3}/${fname} + fi +} diff --git a/qa/qa_scripts/openstack/execs/ceph-pool-create.sh b/qa/qa_scripts/openstack/execs/ceph-pool-create.sh new file mode 100755 index 00000000..723c8306 --- /dev/null +++ b/qa/qa_scripts/openstack/execs/ceph-pool-create.sh @@ -0,0 +1,34 @@ +#!/usr/bin/env bash +set -f + +# +# On the ceph site, make the pools required for Openstack +# + +# +# Make a pool, if it does not already exist. +# +function make_pool { + if [[ -z `sudo ceph osd lspools | grep " $1,"` ]]; then + echo "making $1" + sudo ceph osd pool create $1 128 + fi +} + +# +# Make sure the pg_num and pgp_num values are good. +# +count=`sudo ceph osd pool get rbd pg_num | sed 's/pg_num: //'` +while [ $count -lt 128 ]; do + sudo ceph osd pool set rbd pg_num $count + count=`expr $count + 32` + sleep 30 +done +sudo ceph osd pool set rbd pg_num 128 +sleep 30 +sudo ceph osd pool set rbd pgp_num 128 +sleep 30 +make_pool volumes +make_pool images +make_pool backups +make_pool vms diff --git a/qa/qa_scripts/openstack/execs/ceph_cluster.sh b/qa/qa_scripts/openstack/execs/ceph_cluster.sh new file mode 100755 index 00000000..5afb3c78 --- /dev/null +++ b/qa/qa_scripts/openstack/execs/ceph_cluster.sh @@ -0,0 +1,50 @@ +#!/usr/bin/env bash +set -f + +echo $OS_CEPH_ISO +if [[ $# -ne 4 ]]; then + echo "Usage: ceph_cluster mon.0 osd.0 osd.1 osd.2" + exit -1 +fi +allsites=$* +mon=$1 +shift +osds=$* +ISOVAL=${OS_CEPH_ISO-rhceph-1.3.1-rhel-7-x86_64-dvd.iso} +sudo mount -o loop ${ISOVAL} /mnt + +fqdn=`hostname -f` +lsetup=`ls /mnt/Installer | grep "^ice_setup"` +sudo yum -y install /mnt/Installer/${lsetup} +sudo ice_setup -d /mnt << EOF +yes +/mnt +$fqdn +http +EOF +ceph-deploy new ${mon} +ceph-deploy install --repo --release=ceph-mon ${mon} +ceph-deploy install --repo --release=ceph-osd ${allsites} +ceph-deploy install --mon ${mon} +ceph-deploy install --osd ${allsites} +ceph-deploy mon create-initial +sudo service ceph -a start osd +for d in b c d; do + for m in $osds; do + ceph-deploy disk zap ${m}:sd${d} + done + for m in $osds; do + ceph-deploy osd prepare ${m}:sd${d} + done + for m in $osds; do + ceph-deploy osd activate ${m}:sd${d}1:sd${d}2 + done +done + +sudo ./ceph-pool-create.sh + +hchk=`sudo ceph health` +while [[ $hchk != 'HEALTH_OK' ]]; do + sleep 30 + hchk=`sudo ceph health` +done diff --git a/qa/qa_scripts/openstack/execs/libvirt-secret.sh b/qa/qa_scripts/openstack/execs/libvirt-secret.sh new file mode 100755 index 00000000..75e9e91a --- /dev/null +++ b/qa/qa_scripts/openstack/execs/libvirt-secret.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash +set -f + +# +# Generate a libvirt secret on the Openstack node. +# +openstack_node=${1} +uuid=`uuidgen` +cat > secret.xml < + ${uuid} + + client.cinder secret + + +EOF +sudo virsh secret-define --file secret.xml +sudo virsh secret-set-value --secret ${uuid} --base64 $(cat client.cinder.key) +echo ${uuid} diff --git a/qa/qa_scripts/openstack/execs/openstack-preinstall.sh b/qa/qa_scripts/openstack/execs/openstack-preinstall.sh new file mode 100755 index 00000000..a2b235e7 --- /dev/null +++ b/qa/qa_scripts/openstack/execs/openstack-preinstall.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash +set -f + +# +# Remotely setup the stuff needed to run packstack. This should do items 1-4 in +# https://docs.google.com/document/d/1us18KR3LuLyINgGk2rmI-SVj9UksCE7y4C2D_68Aa8o/edit?ts=56a78fcb +# +yum remove -y rhos-release +rpm -ivh http://rhos-release.virt.bos.redhat.com/repos/rhos-release/rhos-release-latest.noarch.rpm +rm -rf /etc/yum.repos.d/* +rm -rf /var/cache/yum/* +rhos-release 8 +yum update -y +yum install -y nc puppet vim screen setroubleshoot crudini bpython openstack-packstack +systemctl disable ntpd +systemctl stop ntpd +reboot diff --git a/qa/qa_scripts/openstack/execs/run_openstack.sh b/qa/qa_scripts/openstack/execs/run_openstack.sh new file mode 100755 index 00000000..8764cbeb --- /dev/null +++ b/qa/qa_scripts/openstack/execs/run_openstack.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash +set -fv + +# +# Create a glance image, a corresponding cinder volume, a nova instance, attach, the cinder volume to the +# nova instance, and create a backup. +# +image_name=${1}X +file_name=${2-rhel-server-7.2-x86_64-boot.iso} +source ./keystonerc_admin +glance image-create --name $image_name --disk-format iso --container-format bare --file $file_name +glance_id=`glance image-list | grep ${image_name} | sed 's/^| //' | sed 's/ |.*//'` +cinder create --image-id ${glance_id} --display-name ${image_name}-volume 8 +nova boot --image ${image_name} --flavor 1 ${image_name}-inst +cinder_id=`cinder list | grep ${image_name} | sed 's/^| //' | sed 's/ |.*//'` +chkr=`cinder list | grep ${image_name}-volume | grep available` +while [ -z "$chkr" ]; do + sleep 30 + chkr=`cinder list | grep ${image_name}-volume | grep available` +done +nova volume-attach ${image_name}-inst ${cinder_id} auto +sleep 30 +cinder backup-create --name ${image_name}-backup ${image_name}-volume --force diff --git a/qa/qa_scripts/openstack/execs/start_openstack.sh b/qa/qa_scripts/openstack/execs/start_openstack.sh new file mode 100755 index 00000000..f5f12fe5 --- /dev/null +++ b/qa/qa_scripts/openstack/execs/start_openstack.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash +set -fv + +# +# start the Openstack services +# +sudo cp /root/keystonerc_admin ./keystonerc_admin +sudo chmod 0644 ./keystonerc_admin +source ./keystonerc_admin +sudo service httpd stop +sudo service openstack-keystone restart +sudo service openstack-glance-api restart +sudo service openstack-nova-compute restart +sudo service openstack-cinder-volume restart +sudo service openstack-cinder-backup restart diff --git a/qa/qa_scripts/openstack/files/cinder.template.conf b/qa/qa_scripts/openstack/files/cinder.template.conf new file mode 100644 index 00000000..807125ac --- /dev/null +++ b/qa/qa_scripts/openstack/files/cinder.template.conf @@ -0,0 +1,3481 @@ +[DEFAULT] + +# +# From cinder +# + +# Backup metadata version to be used when backing up volume metadata. If this +# number is bumped, make sure the service doing the restore supports the new +# version. (integer value) +#backup_metadata_version = 2 + +# The number of chunks or objects, for which one Ceilometer notification will +# be sent (integer value) +#backup_object_number_per_notification = 10 + +# Interval, in seconds, between two progress notifications reporting the backup +# status (integer value) +#backup_timer_interval = 120 + +# The maximum number of items that a collection resource returns in a single +# response (integer value) +#osapi_max_limit = 1000 + +# Base URL that will be presented to users in links to the OpenStack Volume API +# (string value) +# Deprecated group/name - [DEFAULT]/osapi_compute_link_prefix +#osapi_volume_base_URL = + +# Ceph configuration file to use. (string value) +#backup_ceph_conf = /etc/ceph/ceph.conf +backup_ceph_conf = /etc/ceph/ceph.conf + +# The Ceph user to connect with. Default here is to use the same user as for +# Cinder volumes. If not using cephx this should be set to None. (string value) +#backup_ceph_user = cinder +backup_ceph_user = cinder-backup + +# The chunk size, in bytes, that a backup is broken into before transfer to the +# Ceph object store. (integer value) +#backup_ceph_chunk_size = 134217728 +backup_ceph_chunk_size = 134217728 + +# The Ceph pool where volume backups are stored. (string value) +#backup_ceph_pool = backups +backup_ceph_pool = backups + +# RBD stripe unit to use when creating a backup image. (integer value) +#backup_ceph_stripe_unit = 0 +backup_ceph_stripe_unit = 0 + +# RBD stripe count to use when creating a backup image. (integer value) +#backup_ceph_stripe_count = 0 +backup_ceph_stripe_count = 0 + +# If True, always discard excess bytes when restoring volumes i.e. pad with +# zeroes. (boolean value) +#restore_discard_excess_bytes = true +restore_discard_excess_bytes = true + +# File with the list of available smbfs shares. (string value) +#smbfs_shares_config = /etc/cinder/smbfs_shares + +# Default format that will be used when creating volumes if no volume format is +# specified. (string value) +# Allowed values: raw, qcow2, vhd, vhdx +#smbfs_default_volume_format = qcow2 + +# Create volumes as sparsed files which take no space rather than regular files +# when using raw format, in which case volume creation takes lot of time. +# (boolean value) +#smbfs_sparsed_volumes = true + +# Percent of ACTUAL usage of the underlying volume before no new volumes can be +# allocated to the volume destination. (floating point value) +#smbfs_used_ratio = 0.95 + +# This will compare the allocated to available space on the volume destination. +# If the ratio exceeds this number, the destination will no longer be valid. +# (floating point value) +#smbfs_oversub_ratio = 1.0 + +# Base dir containing mount points for smbfs shares. (string value) +#smbfs_mount_point_base = $state_path/mnt + +# Mount options passed to the smbfs client. See mount.cifs man page for +# details. (string value) +#smbfs_mount_options = noperm,file_mode=0775,dir_mode=0775 + +# Compression algorithm (None to disable) (string value) +#backup_compression_algorithm = zlib + +# Use thin provisioning for SAN volumes? (boolean value) +#san_thin_provision = true + +# IP address of SAN controller (string value) +#san_ip = + +# Username for SAN controller (string value) +#san_login = admin + +# Password for SAN controller (string value) +#san_password = + +# Filename of private key to use for SSH authentication (string value) +#san_private_key = + +# Cluster name to use for creating volumes (string value) +#san_clustername = + +# SSH port to use with SAN (integer value) +# Minimum value: 1 +# Maximum value: 65535 +#san_ssh_port = 22 + +# Execute commands locally instead of over SSH; use if the volume service is +# running on the SAN device (boolean value) +#san_is_local = false + +# SSH connection timeout in seconds (integer value) +#ssh_conn_timeout = 30 + +# Minimum ssh connections in the pool (integer value) +#ssh_min_pool_conn = 1 + +# Maximum ssh connections in the pool (integer value) +#ssh_max_pool_conn = 5 + +# Configuration file for HDS NFS cinder plugin (string value) +#hds_hnas_nfs_config_file = /opt/hds/hnas/cinder_nfs_conf.xml + +# Global backend request timeout, in seconds. (integer value) +#violin_request_timeout = 300 + +# Option to enable strict host key checking. When set to "True" Cinder will +# only connect to systems with a host key present in the configured +# "ssh_hosts_key_file". When set to "False" the host key will be saved upon +# first connection and used for subsequent connections. Default=False (boolean +# value) +#strict_ssh_host_key_policy = false + +# File containing SSH host keys for the systems with which Cinder needs to +# communicate. OPTIONAL: Default=$state_path/ssh_known_hosts (string value) +#ssh_hosts_key_file = $state_path/ssh_known_hosts + +# The storage family type used on the storage system; valid values are +# ontap_7mode for using Data ONTAP operating in 7-Mode, ontap_cluster for using +# clustered Data ONTAP, or eseries for using E-Series. (string value) +# Allowed values: ontap_7mode, ontap_cluster, eseries +#netapp_storage_family = ontap_cluster + +# The storage protocol to be used on the data path with the storage system. +# (string value) +# Allowed values: iscsi, fc, nfs +#netapp_storage_protocol = + +# The hostname (or IP address) for the storage system or proxy server. (string +# value) +#netapp_server_hostname = + +# The TCP port to use for communication with the storage system or proxy +# server. If not specified, Data ONTAP drivers will use 80 for HTTP and 443 for +# HTTPS; E-Series will use 8080 for HTTP and 8443 for HTTPS. (integer value) +#netapp_server_port = + +# The transport protocol used when communicating with the storage system or +# proxy server. (string value) +# Allowed values: http, https +#netapp_transport_type = http + +# Administrative user account name used to access the storage system or proxy +# server. (string value) +#netapp_login = + +# Password for the administrative user account specified in the netapp_login +# option. (string value) +#netapp_password = + +# This option specifies the virtual storage server (Vserver) name on the +# storage cluster on which provisioning of block storage volumes should occur. +# (string value) +#netapp_vserver = + +# The vFiler unit on which provisioning of block storage volumes will be done. +# This option is only used by the driver when connecting to an instance with a +# storage family of Data ONTAP operating in 7-Mode. Only use this option when +# utilizing the MultiStore feature on the NetApp storage system. (string value) +#netapp_vfiler = + +# The name of the config.conf stanza for a Data ONTAP (7-mode) HA partner. +# This option is only used by the driver when connecting to an instance with a +# storage family of Data ONTAP operating in 7-Mode, and it is required if the +# storage protocol selected is FC. (string value) +#netapp_partner_backend_name = + +# The quantity to be multiplied by the requested volume size to ensure enough +# space is available on the virtual storage server (Vserver) to fulfill the +# volume creation request. Note: this option is deprecated and will be removed +# in favor of "reserved_percentage" in the Mitaka release. (floating point +# value) +#netapp_size_multiplier = 1.2 + +# This option determines if storage space is reserved for LUN allocation. If +# enabled, LUNs are thick provisioned. If space reservation is disabled, +# storage space is allocated on demand. (string value) +# Allowed values: enabled, disabled +#netapp_lun_space_reservation = enabled + +# If the percentage of available space for an NFS share has dropped below the +# value specified by this option, the NFS image cache will be cleaned. (integer +# value) +#thres_avl_size_perc_start = 20 + +# When the percentage of available space on an NFS share has reached the +# percentage specified by this option, the driver will stop clearing files from +# the NFS image cache that have not been accessed in the last M minutes, where +# M is the value of the expiry_thres_minutes configuration option. (integer +# value) +#thres_avl_size_perc_stop = 60 + +# This option specifies the threshold for last access time for images in the +# NFS image cache. When a cache cleaning cycle begins, images in the cache that +# have not been accessed in the last M minutes, where M is the value of this +# parameter, will be deleted from the cache to create free space on the NFS +# share. (integer value) +#expiry_thres_minutes = 720 + +# This option is used to specify the path to the E-Series proxy application on +# a proxy server. The value is combined with the value of the +# netapp_transport_type, netapp_server_hostname, and netapp_server_port options +# to create the URL used by the driver to connect to the proxy application. +# (string value) +#netapp_webservice_path = /devmgr/v2 + +# This option is only utilized when the storage family is configured to +# eseries. This option is used to restrict provisioning to the specified +# controllers. Specify the value of this option to be a comma separated list of +# controller hostnames or IP addresses to be used for provisioning. (string +# value) +#netapp_controller_ips = + +# Password for the NetApp E-Series storage array. (string value) +#netapp_sa_password = + +# This option specifies whether the driver should allow operations that require +# multiple attachments to a volume. An example would be live migration of +# servers that have volumes attached. When enabled, this backend is limited to +# 256 total volumes in order to guarantee volumes can be accessed by more than +# one host. (boolean value) +#netapp_enable_multiattach = false + +# This option specifies the path of the NetApp copy offload tool binary. Ensure +# that the binary has execute permissions set which allow the effective user of +# the cinder-volume process to execute the file. (string value) +#netapp_copyoffload_tool_path = + +# This option defines the type of operating system that will access a LUN +# exported from Data ONTAP; it is assigned to the LUN at the time it is +# created. (string value) +#netapp_lun_ostype = + +# This option defines the type of operating system for all initiators that can +# access a LUN. This information is used when mapping LUNs to individual hosts +# or groups of hosts. (string value) +# Deprecated group/name - [DEFAULT]/netapp_eseries_host_type +#netapp_host_type = + +# This option is used to restrict provisioning to the specified pools. Specify +# the value of this option to be a regular expression which will be applied to +# the names of objects from the storage backend which represent pools in +# Cinder. This option is only utilized when the storage protocol is configured +# to use iSCSI or FC. (string value) +# Deprecated group/name - [DEFAULT]/netapp_volume_list +# Deprecated group/name - [DEFAULT]/netapp_storage_pools +#netapp_pool_name_search_pattern = (.+) + +# Base dir containing mount point for gluster share. (string value) +#glusterfs_backup_mount_point = $state_path/backup_mount + +# GlusterFS share in : format. +# Eg: 1.2.3.4:backup_vol (string value) +#glusterfs_backup_share = + +# Volume prefix for the backup id when backing up to TSM (string value) +#backup_tsm_volume_prefix = backup + +# TSM password for the running username (string value) +#backup_tsm_password = password + +# Enable or Disable compression for backups (boolean value) +#backup_tsm_compression = true + +# Request for FC Zone creating host group (boolean value) +#hpxp_zoning_request = false + +# Type of storage command line interface (string value) +#hpxp_storage_cli = + +# ID of storage system (string value) +#hpxp_storage_id = + +# Pool of storage system (string value) +#hpxp_pool = + +# Thin pool of storage system (string value) +#hpxp_thin_pool = + +# Logical device range of storage system (string value) +#hpxp_ldev_range = + +# Default copy method of storage system. There are two valid values: "FULL" +# specifies that a full copy; "THIN" specifies that a thin copy. Default value +# is "FULL" (string value) +#hpxp_default_copy_method = FULL + +# Copy speed of storage system (integer value) +#hpxp_copy_speed = 3 + +# Interval to check copy (integer value) +#hpxp_copy_check_interval = 3 + +# Interval to check copy asynchronously (integer value) +#hpxp_async_copy_check_interval = 10 + +# Target port names for host group or iSCSI target (list value) +#hpxp_target_ports = + +# Target port names of compute node for host group or iSCSI target (list value) +#hpxp_compute_target_ports = + +# Request for creating host group or iSCSI target (boolean value) +#hpxp_group_request = false + +# Instance numbers for HORCM (list value) +#hpxp_horcm_numbers = 200,201 + +# Username of storage system for HORCM (string value) +#hpxp_horcm_user = + +# Add to HORCM configuration (boolean value) +#hpxp_horcm_add_conf = true + +# Resource group name of storage system for HORCM (string value) +#hpxp_horcm_resource_name = meta_resource + +# Only discover a specific name of host group or iSCSI target (boolean value) +#hpxp_horcm_name_only_discovery = false + +# Storage system storage pool for volumes (string value) +#storwize_svc_volpool_name = volpool + +# Storage system space-efficiency parameter for volumes (percentage) (integer +# value) +# Minimum value: -1 +# Maximum value: 100 +#storwize_svc_vol_rsize = 2 + +# Storage system threshold for volume capacity warnings (percentage) (integer +# value) +# Minimum value: -1 +# Maximum value: 100 +#storwize_svc_vol_warning = 0 + +# Storage system autoexpand parameter for volumes (True/False) (boolean value) +#storwize_svc_vol_autoexpand = true + +# Storage system grain size parameter for volumes (32/64/128/256) (integer +# value) +#storwize_svc_vol_grainsize = 256 + +# Storage system compression option for volumes (boolean value) +#storwize_svc_vol_compression = false + +# Enable Easy Tier for volumes (boolean value) +#storwize_svc_vol_easytier = true + +# The I/O group in which to allocate volumes (integer value) +#storwize_svc_vol_iogrp = 0 + +# Maximum number of seconds to wait for FlashCopy to be prepared. (integer +# value) +# Minimum value: 1 +# Maximum value: 600 +#storwize_svc_flashcopy_timeout = 120 + +# Connection protocol (iSCSI/FC) (string value) +#storwize_svc_connection_protocol = iSCSI + +# Configure CHAP authentication for iSCSI connections (Default: Enabled) +# (boolean value) +#storwize_svc_iscsi_chap_enabled = true + +# Connect with multipath (FC only; iSCSI multipath is controlled by Nova) +# (boolean value) +#storwize_svc_multipath_enabled = false + +# Allows vdisk to multi host mapping (boolean value) +#storwize_svc_multihostmap_enabled = true + +# Indicate whether svc driver is compatible for NPIV setup. If it is +# compatible, it will allow no wwpns being returned on get_conn_fc_wwpns during +# initialize_connection. It should always be set to True. It will be deprecated +# and removed in M release. (boolean value) +#storwize_svc_npiv_compatibility_mode = true + +# Allow tenants to specify QOS on create (boolean value) +#storwize_svc_allow_tenant_qos = false + +# If operating in stretched cluster mode, specify the name of the pool in which +# mirrored copies are stored.Example: "pool2" (string value) +#storwize_svc_stretched_cluster_partner = + +# Driver to use for backups. (string value) +#backup_driver = cinder.backup.drivers.swift +backup_driver = cinder.backup.drivers.ceph + +# Offload pending backup delete during backup service startup. (boolean value) +#backup_service_inithost_offload = false + +# Make exception message format errors fatal. (boolean value) +#fatal_exception_format_errors = false + +# IP address of this host (string value) +#my_ip = 10.16.48.99 + +# Default glance host name or IP (string value) +#glance_host = $my_ip +glance_host = VARINET4ADDR + +# Default glance port (integer value) +# Minimum value: 1 +# Maximum value: 65535 +#glance_port = 9292 + +# A list of the glance API servers available to cinder ([hostname|ip]:port) +# (list value) +#glance_api_servers = $glance_host:$glance_port + +# Version of the glance API to use (integer value) +#glance_api_version = 1 + +# Number retries when downloading an image from glance (integer value) +#glance_num_retries = 0 + +# Allow to perform insecure SSL (https) requests to glance (boolean value) +#glance_api_insecure = false + +# Enables or disables negotiation of SSL layer compression. In some cases +# disabling compression can improve data throughput, such as when high network +# bandwidth is available and you use compressed image formats like qcow2. +# (boolean value) +#glance_api_ssl_compression = false + +# Location of ca certificates file to use for glance client requests. (string +# value) +#glance_ca_certificates_file = + +# http/https timeout value for glance operations. If no value (None) is +# supplied here, the glanceclient default value is used. (integer value) +#glance_request_timeout = + +# The topic that scheduler nodes listen on (string value) +#scheduler_topic = cinder-scheduler + +# The topic that volume nodes listen on (string value) +#volume_topic = cinder-volume + +# The topic that volume backup nodes listen on (string value) +#backup_topic = cinder-backup + +# DEPRECATED: Deploy v1 of the Cinder API. (boolean value) +#enable_v1_api = true +enable_v1_api = True + +# Deploy v2 of the Cinder API. (boolean value) +#enable_v2_api = true +enable_v2_api = True + +# Enables or disables rate limit of the API. (boolean value) +#api_rate_limit = true + +# Specify list of extensions to load when using osapi_volume_extension option +# with cinder.api.contrib.select_extensions (list value) +#osapi_volume_ext_list = + +# osapi volume extension to load (multi valued) +#osapi_volume_extension = cinder.api.contrib.standard_extensions + +# Full class name for the Manager for volume (string value) +#volume_manager = cinder.volume.manager.VolumeManager + +# Full class name for the Manager for volume backup (string value) +#backup_manager = cinder.backup.manager.BackupManager + +# Full class name for the Manager for scheduler (string value) +#scheduler_manager = cinder.scheduler.manager.SchedulerManager + +# Name of this node. This can be an opaque identifier. It is not necessarily a +# host name, FQDN, or IP address. (string value) +#host = x86-024.build.eng.bos.redhat.com +host = VARHOSTNAME + +# Availability zone of this node (string value) +#storage_availability_zone = nova +storage_availability_zone = nova + +# Default availability zone for new volumes. If not set, the +# storage_availability_zone option value is used as the default for new +# volumes. (string value) +#default_availability_zone = +default_availability_zone = nova + +# If the requested Cinder availability zone is unavailable, fall back to the +# value of default_availability_zone, then storage_availability_zone, instead +# of failing. (boolean value) +#allow_availability_zone_fallback = false + +# Default volume type to use (string value) +#default_volume_type = + +# Time period for which to generate volume usages. The options are hour, day, +# month, or year. (string value) +#volume_usage_audit_period = month + +# Path to the rootwrap configuration file to use for running commands as root +# (string value) +#rootwrap_config = /etc/cinder/rootwrap.conf + +# Enable monkey patching (boolean value) +#monkey_patch = false + +# List of modules/decorators to monkey patch (list value) +#monkey_patch_modules = + +# Maximum time since last check-in for a service to be considered up (integer +# value) +#service_down_time = 60 + +# The full class name of the volume API class to use (string value) +#volume_api_class = cinder.volume.api.API + +# The full class name of the volume backup API class (string value) +#backup_api_class = cinder.backup.api.API + +# The strategy to use for auth. Supports noauth, keystone, and deprecated. +# (string value) +# Allowed values: noauth, keystone, deprecated +#auth_strategy = keystone +auth_strategy = keystone + +# A list of backend names to use. These backend names should be backed by a +# unique [CONFIG] group with its options (list value) +#enabled_backends = +enabled_backends = ceph + +# Whether snapshots count against gigabyte quota (boolean value) +#no_snapshot_gb_quota = false + +# The full class name of the volume transfer API class (string value) +#transfer_api_class = cinder.transfer.api.API + +# The full class name of the volume replication API class (string value) +#replication_api_class = cinder.replication.api.API + +# The full class name of the consistencygroup API class (string value) +#consistencygroup_api_class = cinder.consistencygroup.api.API + +# OpenStack privileged account username. Used for requests to other services +# (such as Nova) that require an account with special rights. (string value) +#os_privileged_user_name = + +# Password associated with the OpenStack privileged account. (string value) +#os_privileged_user_password = + +# Tenant name associated with the OpenStack privileged account. (string value) +#os_privileged_user_tenant = + +# Auth URL associated with the OpenStack privileged account. (string value) +#os_privileged_user_auth_url = + +# Multiplier used for weighing volume capacity. Negative numbers mean to stack +# vs spread. (floating point value) +#capacity_weight_multiplier = 1.0 + +# Multiplier used for weighing volume capacity. Negative numbers mean to stack +# vs spread. (floating point value) +#allocated_capacity_weight_multiplier = -1.0 + +# IP address of sheep daemon. (string value) +#sheepdog_store_address = 127.0.0.1 + +# Port of sheep daemon. (integer value) +# Minimum value: 1 +# Maximum value: 65535 +#sheepdog_store_port = 7000 + +# Specifies the path of the GPFS directory where Block Storage volume and +# snapshot files are stored. (string value) +#gpfs_mount_point_base = + +# Specifies the path of the Image service repository in GPFS. Leave undefined +# if not storing images in GPFS. (string value) +#gpfs_images_dir = + +# Specifies the type of image copy to be used. Set this when the Image service +# repository also uses GPFS so that image files can be transferred efficiently +# from the Image service to the Block Storage service. There are two valid +# values: "copy" specifies that a full copy of the image is made; +# "copy_on_write" specifies that copy-on-write optimization strategy is used +# and unmodified blocks of the image file are shared efficiently. (string +# value) +# Allowed values: copy, copy_on_write, +#gpfs_images_share_mode = + +# Specifies an upper limit on the number of indirections required to reach a +# specific block due to snapshots or clones. A lengthy chain of copy-on-write +# snapshots or clones can have a negative impact on performance, but improves +# space utilization. 0 indicates unlimited clone depth. (integer value) +#gpfs_max_clone_depth = 0 + +# Specifies that volumes are created as sparse files which initially consume no +# space. If set to False, the volume is created as a fully allocated file, in +# which case, creation may take a significantly longer time. (boolean value) +#gpfs_sparse_volumes = true + +# Specifies the storage pool that volumes are assigned to. By default, the +# system storage pool is used. (string value) +#gpfs_storage_pool = system + +# Set 512 byte emulation on volume creation; (boolean value) +#sf_emulate_512 = true + +# Allow tenants to specify QOS on create (boolean value) +#sf_allow_tenant_qos = false + +# Create SolidFire accounts with this prefix. Any string can be used here, but +# the string "hostname" is special and will create a prefix using the cinder +# node hostname (previous default behavior). The default is NO prefix. (string +# value) +#sf_account_prefix = + +# Account name on the SolidFire Cluster to use as owner of template/cache +# volumes (created if does not exist). (string value) +#sf_template_account_name = openstack-vtemplate + +# Create an internal cache of copy of images when a bootable volume is created +# to eliminate fetch from glance and qemu-conversion on subsequent calls. +# (boolean value) +#sf_allow_template_caching = true + +# Overrides default cluster SVIP with the one specified. This is required or +# deployments that have implemented the use of VLANs for iSCSI networks in +# their cloud. (string value) +#sf_svip = + +# Create an internal mapping of volume IDs and account. Optimizes lookups and +# performance at the expense of memory, very large deployments may want to +# consider setting to False. (boolean value) +#sf_enable_volume_mapping = true + +# SolidFire API port. Useful if the device api is behind a proxy on a different +# port. (integer value) +# Minimum value: 1 +# Maximum value: 65535 +#sf_api_port = 443 + +# IBMNAS platform type to be used as backend storage; valid values are - v7ku : +# for using IBM Storwize V7000 Unified, sonas : for using IBM Scale Out NAS, +# gpfs-nas : for using NFS based IBM GPFS deployments. (string value) +# Allowed values: v7ku, sonas, gpfs-nas +#ibmnas_platform_type = v7ku + +# The URL of the Swift endpoint (string value) +#backup_swift_url = +backup_swift_url = http://VARINET4ADDR:8080/v1/AUTH_ + +# Info to match when looking for swift in the service catalog. Format is: +# separated values of the form: :: - +# Only used if backup_swift_url is unset (string value) +#swift_catalog_info = object-store:swift:publicURL + +# Swift authentication mechanism (string value) +#backup_swift_auth = per_user + +# Swift authentication version. Specify "1" for auth 1.0, or "2" for auth 2.0 +# (string value) +#backup_swift_auth_version = 1 + +# Swift tenant/account name. Required when connecting to an auth 2.0 system +# (string value) +#backup_swift_tenant = + +# Swift user name (string value) +#backup_swift_user = + +# Swift key for authentication (string value) +#backup_swift_key = + +# The default Swift container to use (string value) +#backup_swift_container = volumebackups +backup_swift_container = volumes_backup + +# The size in bytes of Swift backup objects (integer value) +#backup_swift_object_size = 52428800 + +# The size in bytes that changes are tracked for incremental backups. +# backup_swift_object_size has to be multiple of backup_swift_block_size. +# (integer value) +#backup_swift_block_size = 32768 + +# The number of retries to make for Swift operations (integer value) +#backup_swift_retry_attempts = 3 + +# The backoff time in seconds between Swift retries (integer value) +#backup_swift_retry_backoff = 2 + +# Enable or Disable the timer to send the periodic progress notifications to +# Ceilometer when backing up the volume to the Swift backend storage. The +# default value is True to enable the timer. (boolean value) +#backup_swift_enable_progress_timer = true + +# Location of the CA certificate file to use for swift client requests. (string +# value) +#backup_swift_ca_cert_file = + +# These values will be used for CloudByte storage's addQos API call. (dict +# value) +#cb_add_qosgroup = graceallowed:false,iops:10,iopscontrol:true,latency:15,memlimit:0,networkspeed:0,throughput:0,tpcontrol:false + +# These values will be used for CloudByte storage's createVolume API call. +# (dict value) +#cb_create_volume = blocklength:512B,compression:off,deduplication:off,protocoltype:ISCSI,recordsize:16k,sync:always + +# Driver will use this API key to authenticate against the CloudByte storage's +# management interface. (string value) +#cb_apikey = + +# CloudByte storage specific account name. This maps to a project name in +# OpenStack. (string value) +#cb_account_name = + +# This corresponds to the name of Tenant Storage Machine (TSM) in CloudByte +# storage. A volume will be created in this TSM. (string value) +#cb_tsm_name = + +# A retry value in seconds. Will be used by the driver to check if volume +# creation was successful in CloudByte storage. (integer value) +#cb_confirm_volume_create_retry_interval = 5 + +# Will confirm a successful volume creation in CloudByte storage by making this +# many number of attempts. (integer value) +#cb_confirm_volume_create_retries = 3 + +# A retry value in seconds. Will be used by the driver to check if volume +# deletion was successful in CloudByte storage. (integer value) +#cb_confirm_volume_delete_retry_interval = 5 + +# Will confirm a successful volume deletion in CloudByte storage by making this +# many number of attempts. (integer value) +#cb_confirm_volume_delete_retries = 3 + +# This corresponds to the discovery authentication group in CloudByte storage. +# Chap users are added to this group. Driver uses the first user found for this +# group. Default value is None. (string value) +#cb_auth_group = None + +# Interval, in seconds, between nodes reporting state to datastore (integer +# value) +#report_interval = 10 + +# Interval, in seconds, between running periodic tasks (integer value) +#periodic_interval = 60 + +# Range, in seconds, to randomly delay when starting the periodic task +# scheduler to reduce stampeding. (Disable by setting to 0) (integer value) +#periodic_fuzzy_delay = 60 + +# IP address on which OpenStack Volume API listens (string value) +#osapi_volume_listen = 0.0.0.0 +osapi_volume_listen = 0.0.0.0 + +# Port on which OpenStack Volume API listens (integer value) +# Minimum value: 1 +# Maximum value: 65535 +#osapi_volume_listen_port = 8776 + +# Number of workers for OpenStack Volume API service. The default is equal to +# the number of CPUs available. (integer value) +#osapi_volume_workers = +osapi_volume_workers = 12 + +# The full class name of the compute API class to use (string value) +#compute_api_class = cinder.compute.nova.API + +# Number of nodes that should replicate the data. (string value) +#drbdmanage_redundancy = 1 + +# Pool or Vdisk name to use for volume creation. (string value) +#dothill_backend_name = A + +# linear (for Vdisk) or virtual (for Pool). (string value) +# Allowed values: linear, virtual +#dothill_backend_type = virtual + +# DotHill API interface protocol. (string value) +# Allowed values: http, https +#dothill_api_protocol = https + +# Whether to verify DotHill array SSL certificate. (boolean value) +#dothill_verify_certificate = false + +# DotHill array SSL certificate path. (string value) +#dothill_verify_certificate_path = + +# List of comma-separated target iSCSI IP addresses. (list value) +#dothill_iscsi_ips = + +# File with the list of available gluster shares (string value) +#glusterfs_shares_config = /etc/cinder/glusterfs_shares + +# Base dir containing mount points for gluster shares. (string value) +#glusterfs_mount_point_base = $state_path/mnt + +# REST API authorization token. (string value) +#pure_api_token = + +# ID of the project which will be used as the Cinder internal tenant. (string +# value) +#cinder_internal_tenant_project_id = + +# ID of the user to be used in volume operations as the Cinder internal tenant. +# (string value) +#cinder_internal_tenant_user_id = + +# The scheduler host manager class to use (string value) +#scheduler_host_manager = cinder.scheduler.host_manager.HostManager + +# Maximum number of attempts to schedule an volume (integer value) +#scheduler_max_attempts = 3 + +# Path or URL to Scality SOFS configuration file (string value) +#scality_sofs_config = + +# Base dir where Scality SOFS shall be mounted (string value) +#scality_sofs_mount_point = $state_path/scality + +# Path from Scality SOFS root to volume dir (string value) +#scality_sofs_volume_dir = cinder/volumes + +# VNX authentication scope type. (string value) +#storage_vnx_authentication_type = global + +# Directory path that contains the VNX security file. Make sure the security +# file is generated first. (string value) +#storage_vnx_security_file_dir = + +# Naviseccli Path. (string value) +#naviseccli_path = + +# Comma-separated list of storage pool names to be used. (string value) +# Deprecated group/name - [DEFAULT]/storage_vnx_pool_name +#storage_vnx_pool_names = + +# VNX secondary SP IP Address. (string value) +#san_secondary_ip = + +# Default timeout for CLI operations in minutes. For example, LUN migration is +# a typical long running operation, which depends on the LUN size and the load +# of the array. An upper bound in the specific deployment can be set to avoid +# unnecessary long wait. By default, it is 365 days long. (integer value) +#default_timeout = 525600 + +# Default max number of LUNs in a storage group. By default, the value is 255. +# (integer value) +#max_luns_per_storage_group = 255 + +# To destroy storage group when the last LUN is removed from it. By default, +# the value is False. (boolean value) +#destroy_empty_storage_group = false + +# Mapping between hostname and its iSCSI initiator IP addresses. (string value) +#iscsi_initiators = + +# Comma separated iSCSI or FC ports to be used in Nova or Cinder. (string +# value) +#io_port_list = * + +# Automatically register initiators. By default, the value is False. (boolean +# value) +#initiator_auto_registration = false + +# Automatically deregister initiators after the related storage group is +# destroyed. By default, the value is False. (boolean value) +#initiator_auto_deregistration = false + +# Report free_capacity_gb as 0 when the limit to maximum number of pool LUNs is +# reached. By default, the value is False. (boolean value) +#check_max_pool_luns_threshold = false + +# Delete a LUN even if it is in Storage Groups. (boolean value) +#force_delete_lun_in_storagegroup = false + +# Force LUN creation even if the full threshold of pool is reached. (boolean +# value) +#ignore_pool_full_threshold = false + +# IP address for connecting to VMware ESX/vCenter server. (string value) +#vmware_host_ip = + +# Username for authenticating with VMware ESX/vCenter server. (string value) +#vmware_host_username = + +# Password for authenticating with VMware ESX/vCenter server. (string value) +#vmware_host_password = + +# Optional VIM service WSDL Location e.g http:///vimService.wsdl. +# Optional over-ride to default location for bug work-arounds. (string value) +#vmware_wsdl_location = + +# Number of times VMware ESX/vCenter server API must be retried upon connection +# related issues. (integer value) +#vmware_api_retry_count = 10 + +# The interval (in seconds) for polling remote tasks invoked on VMware +# ESX/vCenter server. (floating point value) +#vmware_task_poll_interval = 0.5 + +# Name of the vCenter inventory folder that will contain Cinder volumes. This +# folder will be created under "OpenStack/", where +# project_folder is of format "Project ()". (string value) +#vmware_volume_folder = Volumes + +# Timeout in seconds for VMDK volume transfer between Cinder and Glance. +# (integer value) +#vmware_image_transfer_timeout_secs = 7200 + +# Max number of objects to be retrieved per batch. Query results will be +# obtained in batches from the server and not in one shot. Server may still +# limit the count to something less than the configured value. (integer value) +#vmware_max_objects_retrieval = 100 + +# Optional string specifying the VMware vCenter server version. The driver +# attempts to retrieve the version from VMware vCenter server. Set this +# configuration only if you want to override the vCenter server version. +# (string value) +#vmware_host_version = + +# Directory where virtual disks are stored during volume backup and restore. +# (string value) +#vmware_tmp_dir = /tmp + +# CA bundle file to use in verifying the vCenter server certificate. (string +# value) +#vmware_ca_file = + +# If true, the vCenter server certificate is not verified. If false, then the +# default CA truststore is used for verification. This option is ignored if +# "vmware_ca_file" is set. (boolean value) +#vmware_insecure = false + +# Name of a vCenter compute cluster where volumes should be created. (multi +# valued) +#vmware_cluster_name = + +# Pool or Vdisk name to use for volume creation. (string value) +#lenovo_backend_name = A + +# linear (for VDisk) or virtual (for Pool). (string value) +# Allowed values: linear, virtual +#lenovo_backend_type = virtual + +# Lenovo api interface protocol. (string value) +# Allowed values: http, https +#lenovo_api_protocol = https + +# Whether to verify Lenovo array SSL certificate. (boolean value) +#lenovo_verify_certificate = false + +# Lenovo array SSL certificate path. (string value) +#lenovo_verify_certificate_path = + +# List of comma-separated target iSCSI IP addresses. (list value) +#lenovo_iscsi_ips = + +# The maximum size in bytes of the files used to hold backups. If the volume +# being backed up exceeds this size, then it will be backed up into multiple +# files.backup_file_size must be a multiple of backup_sha_block_size_bytes. +# (integer value) +#backup_file_size = 1999994880 + +# The size in bytes that changes are tracked for incremental backups. +# backup_file_size has to be multiple of backup_sha_block_size_bytes. (integer +# value) +#backup_sha_block_size_bytes = 32768 + +# Enable or Disable the timer to send the periodic progress notifications to +# Ceilometer when backing up the volume to the backend storage. The default +# value is True to enable the timer. (boolean value) +#backup_enable_progress_timer = true + +# Path specifying where to store backups. (string value) +#backup_posix_path = $state_path/backup + +# Custom directory to use for backups. (string value) +#backup_container = + +# REST server port. (string value) +#sio_rest_server_port = 443 + +# Whether to verify server certificate. (boolean value) +#sio_verify_server_certificate = false + +# Server certificate path. (string value) +#sio_server_certificate_path = + +# Whether to round volume capacity. (boolean value) +#sio_round_volume_capacity = true + +# Whether to allow force delete. (boolean value) +#sio_force_delete = false + +# Whether to unmap volume before deletion. (boolean value) +#sio_unmap_volume_before_deletion = false + +# Protection domain id. (string value) +#sio_protection_domain_id = + +# Protection domain name. (string value) +#sio_protection_domain_name = + +# Storage pools. (string value) +#sio_storage_pools = + +# Storage pool name. (string value) +#sio_storage_pool_name = + +# Storage pool id. (string value) +#sio_storage_pool_id = + +# Group name to use for creating volumes. Defaults to "group-0". (string value) +#eqlx_group_name = group-0 + +# Timeout for the Group Manager cli command execution. Default is 30. Note that +# this option is deprecated in favour of "ssh_conn_timeout" as specified in +# cinder/volume/drivers/san/san.py and will be removed in M release. (integer +# value) +#eqlx_cli_timeout = 30 + +# Maximum retry count for reconnection. Default is 5. (integer value) +#eqlx_cli_max_retries = 5 + +# Use CHAP authentication for targets. Note that this option is deprecated in +# favour of "use_chap_auth" as specified in cinder/volume/driver.py and will be +# removed in next release. (boolean value) +#eqlx_use_chap = false + +# Existing CHAP account name. Note that this option is deprecated in favour of +# "chap_username" as specified in cinder/volume/driver.py and will be removed +# in next release. (string value) +#eqlx_chap_login = admin + +# Password for specified CHAP account name. Note that this option is deprecated +# in favour of "chap_password" as specified in cinder/volume/driver.py and will +# be removed in the next release (string value) +#eqlx_chap_password = password + +# Pool in which volumes will be created. Defaults to "default". (string value) +#eqlx_pool = default + +# The number of characters in the salt. (integer value) +#volume_transfer_salt_length = 8 + +# The number of characters in the autogenerated auth key. (integer value) +#volume_transfer_key_length = 16 + +# Services to be added to the available pool on create (boolean value) +#enable_new_services = true + +# Template string to be used to generate volume names (string value) +#volume_name_template = volume-%s + +# Template string to be used to generate snapshot names (string value) +#snapshot_name_template = snapshot-%s + +# Template string to be used to generate backup names (string value) +#backup_name_template = backup-%s + +# Multiplier used for weighing volume number. Negative numbers mean to spread +# vs stack. (floating point value) +#volume_number_multiplier = -1.0 + +# Default storage pool for volumes. (integer value) +#ise_storage_pool = 1 + +# Raid level for ISE volumes. (integer value) +#ise_raid = 1 + +# Number of retries (per port) when establishing connection to ISE management +# port. (integer value) +#ise_connection_retries = 5 + +# Interval (secs) between retries. (integer value) +#ise_retry_interval = 1 + +# Number on retries to get completion status after issuing a command to ISE. +# (integer value) +#ise_completion_retries = 30 + +# Storage pool name. (string value) +#zfssa_pool = + +# Project name. (string value) +#zfssa_project = + +# Block size. (string value) +# Allowed values: 512, 1k, 2k, 4k, 8k, 16k, 32k, 64k, 128k +#zfssa_lun_volblocksize = 8k + +# Flag to enable sparse (thin-provisioned): True, False. (boolean value) +#zfssa_lun_sparse = false + +# Data compression. (string value) +# Allowed values: off, lzjb, gzip-2, gzip, gzip-9 +#zfssa_lun_compression = off + +# Synchronous write bias. (string value) +# Allowed values: latency, throughput +#zfssa_lun_logbias = latency + +# iSCSI initiator group. (string value) +#zfssa_initiator_group = + +# iSCSI initiator IQNs. (comma separated) (string value) +#zfssa_initiator = + +# iSCSI initiator CHAP user (name). (string value) +#zfssa_initiator_user = + +# Secret of the iSCSI initiator CHAP user. (string value) +#zfssa_initiator_password = + +# iSCSI initiators configuration. (string value) +#zfssa_initiator_config = + +# iSCSI target group name. (string value) +#zfssa_target_group = tgt-grp + +# iSCSI target CHAP user (name). (string value) +#zfssa_target_user = + +# Secret of the iSCSI target CHAP user. (string value) +#zfssa_target_password = + +# iSCSI target portal (Data-IP:Port, w.x.y.z:3260). (string value) +#zfssa_target_portal = + +# Network interfaces of iSCSI targets. (comma separated) (string value) +#zfssa_target_interfaces = + +# REST connection timeout. (seconds) (integer value) +#zfssa_rest_timeout = + +# IP address used for replication data. (maybe the same as data ip) (string +# value) +#zfssa_replication_ip = + +# Flag to enable local caching: True, False. (boolean value) +#zfssa_enable_local_cache = true + +# Name of ZFSSA project where cache volumes are stored. (string value) +#zfssa_cache_project = os-cinder-cache + +# Sets the value of TCP_KEEPALIVE (True/False) for each server socket. (boolean +# value) +#tcp_keepalive = true + +# Sets the value of TCP_KEEPIDLE in seconds for each server socket. Not +# supported on OS X. (integer value) +#tcp_keepidle = 600 + +# Sets the value of TCP_KEEPINTVL in seconds for each server socket. Not +# supported on OS X. (integer value) +#tcp_keepalive_interval = + +# Sets the value of TCP_KEEPCNT for each server socket. Not supported on OS X. +# (integer value) +#tcp_keepalive_count = + +# CA certificate file to use to verify connecting clients (string value) +#ssl_ca_file = + +# Certificate file to use when starting the server securely (string value) +#ssl_cert_file = + +# Private key file to use when starting the server securely (string value) +#ssl_key_file = + +# Maximum line size of message headers to be accepted. max_header_line may need +# to be increased when using large tokens (typically those generated by the +# Keystone v3 API with big service catalogs). (integer value) +#max_header_line = 16384 + +# Timeout for client connections' socket operations. If an incoming connection +# is idle for this number of seconds it will be closed. A value of '0' means +# wait forever. (integer value) +#client_socket_timeout = 900 + +# If False, closes the client socket connection explicitly. Setting it to True +# to maintain backward compatibility. Recommended setting is set it to False. +# (boolean value) +#wsgi_keep_alive = true + +# Number of times to attempt to run flakey shell commands (integer value) +#num_shell_tries = 3 + +# The percentage of backend capacity is reserved (integer value) +# Maximum value: 100 +#reserved_percentage = 0 + +# Prefix for iSCSI volumes (string value) +#iscsi_target_prefix = iqn.2010-10.org.openstack: + +# The IP address that the iSCSI daemon is listening on (string value) +#iscsi_ip_address = $my_ip + +# The list of secondary IP addresses of the iSCSI daemon (list value) +#iscsi_secondary_ip_addresses = + +# The port that the iSCSI daemon is listening on (integer value) +# Minimum value: 1 +# Maximum value: 65535 +#iscsi_port = 3260 + +# The maximum number of times to rescan targets to find volume (integer value) +#num_volume_device_scan_tries = 3 + +# The backend name for a given driver implementation (string value) +#volume_backend_name = + +# Do we attach/detach volumes in cinder using multipath for volume to image and +# image to volume transfers? (boolean value) +#use_multipath_for_image_xfer = false + +# If this is set to True, attachment of volumes for image transfer will be +# aborted when multipathd is not running. Otherwise, it will fallback to single +# path. (boolean value) +#enforce_multipath_for_image_xfer = false + +# Method used to wipe old volumes (string value) +# Allowed values: none, zero, shred +#volume_clear = zero + +# Size in MiB to wipe at start of old volumes. 0 => all (integer value) +#volume_clear_size = 0 + +# The flag to pass to ionice to alter the i/o priority of the process used to +# zero a volume after deletion, for example "-c3" for idle only priority. +# (string value) +#volume_clear_ionice = + +# iSCSI target user-land tool to use. tgtadm is default, use lioadm for LIO +# iSCSI support, scstadmin for SCST target support, iseradm for the ISER +# protocol, ietadm for iSCSI Enterprise Target, iscsictl for Chelsio iSCSI +# Target or fake for testing. (string value) +# Allowed values: tgtadm, lioadm, scstadmin, iseradm, iscsictl, ietadm, fake +#iscsi_helper = tgtadm + +# Volume configuration file storage directory (string value) +#volumes_dir = $state_path/volumes + +# IET configuration file (string value) +#iet_conf = /etc/iet/ietd.conf + +# Chiscsi (CXT) global defaults configuration file (string value) +#chiscsi_conf = /etc/chelsio-iscsi/chiscsi.conf + +# Sets the behavior of the iSCSI target to either perform blockio or fileio +# optionally, auto can be set and Cinder will autodetect type of backing device +# (string value) +# Allowed values: blockio, fileio, auto +#iscsi_iotype = fileio + +# The default block size used when copying/clearing volumes (string value) +#volume_dd_blocksize = 1M + +# The blkio cgroup name to be used to limit bandwidth of volume copy (string +# value) +#volume_copy_blkio_cgroup_name = cinder-volume-copy + +# The upper limit of bandwidth of volume copy. 0 => unlimited (integer value) +#volume_copy_bps_limit = 0 + +# Sets the behavior of the iSCSI target to either perform write-back(on) or +# write-through(off). This parameter is valid if iscsi_helper is set to tgtadm +# or iseradm. (string value) +# Allowed values: on, off +#iscsi_write_cache = on + +# Sets the target-specific flags for the iSCSI target. Only used for tgtadm to +# specify backing device flags using bsoflags option. The specified string is +# passed as is to the underlying tool. (string value) +#iscsi_target_flags = + +# Determines the iSCSI protocol for new iSCSI volumes, created with tgtadm or +# lioadm target helpers. In order to enable RDMA, this parameter should be set +# with the value "iser". The supported iSCSI protocol values are "iscsi" and +# "iser". (string value) +# Allowed values: iscsi, iser +#iscsi_protocol = iscsi + +# The path to the client certificate key for verification, if the driver +# supports it. (string value) +#driver_client_cert_key = + +# The path to the client certificate for verification, if the driver supports +# it. (string value) +#driver_client_cert = + +# Tell driver to use SSL for connection to backend storage if the driver +# supports it. (boolean value) +#driver_use_ssl = false + +# Float representation of the over subscription ratio when thin provisioning is +# involved. Default ratio is 20.0, meaning provisioned capacity can be 20 times +# of the total physical capacity. If the ratio is 10.5, it means provisioned +# capacity can be 10.5 times of the total physical capacity. A ratio of 1.0 +# means provisioned capacity cannot exceed the total physical capacity. A ratio +# lower than 1.0 will be ignored and the default value will be used instead. +# (floating point value) +#max_over_subscription_ratio = 20.0 + +# Certain ISCSI targets have predefined target names, SCST target driver uses +# this name. (string value) +#scst_target_iqn_name = + +# SCST target implementation can choose from multiple SCST target drivers. +# (string value) +#scst_target_driver = iscsi + +# Option to enable/disable CHAP authentication for targets. (boolean value) +# Deprecated group/name - [DEFAULT]/eqlx_use_chap +#use_chap_auth = false + +# CHAP user name. (string value) +# Deprecated group/name - [DEFAULT]/eqlx_chap_login +#chap_username = + +# Password for specified CHAP account name. (string value) +# Deprecated group/name - [DEFAULT]/eqlx_chap_password +#chap_password = + +# Namespace for driver private data values to be saved in. (string value) +#driver_data_namespace = + +# String representation for an equation that will be used to filter hosts. Only +# used when the driver filter is set to be used by the Cinder scheduler. +# (string value) +#filter_function = + +# String representation for an equation that will be used to determine the +# goodness of a host. Only used when using the goodness weigher is set to be +# used by the Cinder scheduler. (string value) +#goodness_function = + +# If set to True the http client will validate the SSL certificate of the +# backend endpoint. (boolean value) +#driver_ssl_cert_verify = false + +# List of options that control which trace info is written to the DEBUG log +# level to assist developers. Valid values are method and api. (list value) +#trace_flags = + +# There are two types of target configurations managed (replicate to another +# configured backend) or unmanaged (replicate to a device not managed by +# Cinder). (boolean value) +#managed_replication_target = true + +# List of k/v pairs representing a replication target for this backend device. +# For unmanaged the format is: {'key-1'='val1' 'key-2'='val2'...},{...} and for +# managed devices its simply a list of valid configured backend_names that the +# driver supports replicating to: backend-a,bakcend-b... (list value) +#replication_devices = + +# If set to True, upload-to-image in raw format will create a cloned volume and +# register its location to the image service, instead of uploading the volume +# content. The cinder backend and locations support must be enabled in the +# image service, and glance_api_version must be set to 2. (boolean value) +#image_upload_use_cinder_backend = false + +# If set to True, the image volume created by upload-to-image will be placed in +# the internal tenant. Otherwise, the image volume is created in the current +# context's tenant. (boolean value) +#image_upload_use_internal_tenant = false + +# Enable the image volume cache for this backend. (boolean value) +#image_volume_cache_enabled = false + +# Max size of the image volume cache for this backend in GB. 0 => unlimited. +# (integer value) +#image_volume_cache_max_size_gb = 0 + +# Max number of entries allowed in the image volume cache. 0 => unlimited. +# (integer value) +#image_volume_cache_max_count = 0 + +# The maximum number of times to rescan iSER targetto find volume (integer +# value) +#num_iser_scan_tries = 3 + +# Prefix for iSER volumes (string value) +#iser_target_prefix = iqn.2010-10.org.openstack: + +# The IP address that the iSER daemon is listening on (string value) +#iser_ip_address = $my_ip + +# The port that the iSER daemon is listening on (integer value) +# Minimum value: 1 +# Maximum value: 65535 +#iser_port = 3260 + +# The name of the iSER target user-land tool to use (string value) +#iser_helper = tgtadm + +# Public url to use for versions endpoint. The default is None, which will use +# the request's host_url attribute to populate the URL base. If Cinder is +# operating behind a proxy, you will want to change this to represent the +# proxy's URL. (string value) +#public_endpoint = + +# Nimble Controller pool name (string value) +#nimble_pool_name = default + +# Nimble Subnet Label (string value) +#nimble_subnet_label = * + +# Path to store VHD backed volumes (string value) +#windows_iscsi_lun_path = C:\iSCSIVirtualDisks + +# Pool or Vdisk name to use for volume creation. (string value) +#hpmsa_backend_name = A + +# linear (for Vdisk) or virtual (for Pool). (string value) +# Allowed values: linear, virtual +#hpmsa_backend_type = virtual + +# HPMSA API interface protocol. (string value) +# Allowed values: http, https +#hpmsa_api_protocol = https + +# Whether to verify HPMSA array SSL certificate. (boolean value) +#hpmsa_verify_certificate = false + +# HPMSA array SSL certificate path. (string value) +#hpmsa_verify_certificate_path = + +# List of comma-separated target iSCSI IP addresses. (list value) +#hpmsa_iscsi_ips = + +# A list of url schemes that can be downloaded directly via the direct_url. +# Currently supported schemes: [file]. (list value) +#allowed_direct_url_schemes = + +# Default core properties of image (list value) +#glance_core_properties = checksum,container_format,disk_format,image_name,image_id,min_disk,min_ram,name,size + +# Name for the VG that will contain exported volumes (string value) +#volume_group = cinder-volumes + +# If >0, create LVs with multiple mirrors. Note that this requires lvm_mirrors +# + 2 PVs with available space (integer value) +#lvm_mirrors = 0 + +# Type of LVM volumes to deploy; (default, thin, or auto). Auto defaults to +# thin if thin is supported. (string value) +# Allowed values: default, thin, auto +#lvm_type = default + +# LVM conf file to use for the LVM driver in Cinder; this setting is ignored if +# the specified file does not exist (You can also specify 'None' to not use a +# conf file even if one exists). (string value) +#lvm_conf_file = /etc/cinder/lvm.conf + +# use this file for cinder emc plugin config data (string value) +#cinder_emc_config_file = /etc/cinder/cinder_emc_config.xml + +# IP address or Hostname of NAS system. (string value) +#nas_ip = + +# User name to connect to NAS system. (string value) +#nas_login = admin + +# Password to connect to NAS system. (string value) +#nas_password = + +# SSH port to use to connect to NAS system. (integer value) +# Minimum value: 1 +# Maximum value: 65535 +#nas_ssh_port = 22 + +# Filename of private key to use for SSH authentication. (string value) +#nas_private_key = + +# Allow network-attached storage systems to operate in a secure environment +# where root level access is not permitted. If set to False, access is as the +# root user and insecure. If set to True, access is not as root. If set to +# auto, a check is done to determine if this is a new installation: True is +# used if so, otherwise False. Default is auto. (string value) +#nas_secure_file_operations = auto + +# Set more secure file permissions on network-attached storage volume files to +# restrict broad other/world access. If set to False, volumes are created with +# open permissions. If set to True, volumes are created with permissions for +# the cinder user and group (660). If set to auto, a check is done to determine +# if this is a new installation: True is used if so, otherwise False. Default +# is auto. (string value) +#nas_secure_file_permissions = auto + +# Path to the share to use for storing Cinder volumes. For example: +# "/srv/export1" for an NFS server export available at 10.0.5.10:/srv/export1 . +# (string value) +#nas_share_path = + +# Options used to mount the storage backend file system where Cinder volumes +# are stored. (string value) +#nas_mount_options = + +# Provisioning type that will be used when creating volumes. (string value) +# Allowed values: thin, thick +# Deprecated group/name - [DEFAULT]/glusterfs_sparsed_volumes +# Deprecated group/name - [DEFAULT]/glusterfs_qcow2_volumes +#nas_volume_prov_type = thin + +# IP address or hostname of mg-a (string value) +#gateway_mga = + +# IP address or hostname of mg-b (string value) +#gateway_mgb = + +# Use igroups to manage targets and initiators (boolean value) +#use_igroups = false + +# Global backend request timeout, in seconds (integer value) +#request_timeout = 300 + +# Comma-separated list of REST servers IP to connect to. (eg +# http://IP1/,http://IP2:81/path (string value) +#srb_base_urls = + +# XMS cluster id in multi-cluster environment (string value) +#xtremio_cluster_name = + +# Number of retries in case array is busy (integer value) +#xtremio_array_busy_retry_count = 5 + +# Interval between retries in case array is busy (integer value) +#xtremio_array_busy_retry_interval = 5 + +# Serial number of storage system (string value) +#hitachi_serial_number = + +# Name of an array unit (string value) +#hitachi_unit_name = + +# Pool ID of storage system (integer value) +#hitachi_pool_id = + +# Thin pool ID of storage system (integer value) +#hitachi_thin_pool_id = + +# Range of logical device of storage system (string value) +#hitachi_ldev_range = + +# Default copy method of storage system (string value) +#hitachi_default_copy_method = FULL + +# Copy speed of storage system (integer value) +#hitachi_copy_speed = 3 + +# Interval to check copy (integer value) +#hitachi_copy_check_interval = 3 + +# Interval to check copy asynchronously (integer value) +#hitachi_async_copy_check_interval = 10 + +# Control port names for HostGroup or iSCSI Target (string value) +#hitachi_target_ports = + +# Range of group number (string value) +#hitachi_group_range = + +# Request for creating HostGroup or iSCSI Target (boolean value) +#hitachi_group_request = false + +# Infortrend raid pool name list. It is separated with comma. (string value) +#infortrend_pools_name = + +# The Infortrend CLI absolute path. By default, it is at +# /opt/bin/Infortrend/raidcmd_ESDS10.jar (string value) +#infortrend_cli_path = /opt/bin/Infortrend/raidcmd_ESDS10.jar + +# Maximum retry time for cli. Default is 5. (integer value) +#infortrend_cli_max_retries = 5 + +# Default timeout for CLI copy operations in minutes. Support: migrate volume, +# create cloned volume and create volume from snapshot. By Default, it is 30 +# minutes. (integer value) +#infortrend_cli_timeout = 30 + +# Infortrend raid channel ID list on Slot A for OpenStack usage. It is +# separated with comma. By default, it is the channel 0~7. (string value) +#infortrend_slots_a_channels_id = 0,1,2,3,4,5,6,7 + +# Infortrend raid channel ID list on Slot B for OpenStack usage. It is +# separated with comma. By default, it is the channel 0~7. (string value) +#infortrend_slots_b_channels_id = 0,1,2,3,4,5,6,7 + +# Let the volume use specific provisioning. By default, it is the full +# provisioning. The supported options are full or thin. (string value) +#infortrend_provisioning = full + +# Let the volume use specific tiering level. By default, it is the level 0. The +# supported levels are 0,2,3,4. (string value) +#infortrend_tiering = 0 + +# Configuration file for HDS iSCSI cinder plugin (string value) +#hds_hnas_iscsi_config_file = /opt/hds/hnas/cinder_iscsi_conf.xml + +# The name of ceph cluster (string value) +#rbd_cluster_name = ceph + +# The RADOS pool where rbd volumes are stored (string value) +#rbd_pool = rbd + +# The RADOS client name for accessing rbd volumes - only set when using cephx +# authentication (string value) +#rbd_user = + +# Path to the ceph configuration file (string value) +#rbd_ceph_conf = + +# Flatten volumes created from snapshots to remove dependency from volume to +# snapshot (boolean value) +#rbd_flatten_volume_from_snapshot = false + +# The libvirt uuid of the secret for the rbd_user volumes (string value) +#rbd_secret_uuid = + +# Directory where temporary image files are stored when the volume driver does +# not write them directly to the volume. Warning: this option is now +# deprecated, please use image_conversion_dir instead. (string value) +#volume_tmp_dir = + +# Maximum number of nested volume clones that are taken before a flatten +# occurs. Set to 0 to disable cloning. (integer value) +#rbd_max_clone_depth = 5 + +# Volumes will be chunked into objects of this size (in megabytes). (integer +# value) +#rbd_store_chunk_size = 4 + +# Timeout value (in seconds) used when connecting to ceph cluster. If value < +# 0, no timeout is set and default librados value is used. (integer value) +#rados_connect_timeout = -1 + +# Number of retries if connection to ceph cluster failed. (integer value) +#rados_connection_retries = 3 + +# Interval value (in seconds) between connection retries to ceph cluster. +# (integer value) +#rados_connection_interval = 5 + +# The hostname (or IP address) for the storage system (string value) +#tintri_server_hostname = + +# User name for the storage system (string value) +#tintri_server_username = + +# Password for the storage system (string value) +#tintri_server_password = + +# API version for the storage system (string value) +#tintri_api_version = v310 + +# Instance numbers for HORCM (string value) +#hitachi_horcm_numbers = 200,201 + +# Username of storage system for HORCM (string value) +#hitachi_horcm_user = + +# Password of storage system for HORCM (string value) +#hitachi_horcm_password = + +# Add to HORCM configuration (boolean value) +#hitachi_horcm_add_conf = true + +# Timeout until a resource lock is released, in seconds. The value must be +# between 0 and 7200. (integer value) +#hitachi_horcm_resource_lock_timeout = 600 + +# HP LeftHand WSAPI Server Url like https://:8081/lhos (string +# value) +#hplefthand_api_url = + +# HP LeftHand Super user username (string value) +#hplefthand_username = + +# HP LeftHand Super user password (string value) +#hplefthand_password = + +# HP LeftHand cluster name (string value) +#hplefthand_clustername = + +# Configure CHAP authentication for iSCSI connections (Default: Disabled) +# (boolean value) +#hplefthand_iscsi_chap_enabled = false + +# Enable HTTP debugging to LeftHand (boolean value) +#hplefthand_debug = false + +# Administrative user account name used to access the storage system or proxy +# server. (string value) +#netapp_login = + +# Password for the administrative user account specified in the netapp_login +# option. (string value) +#netapp_password = + +# The hostname (or IP address) for the storage system or proxy server. (string +# value) +#netapp_server_hostname = + +# The TCP port to use for communication with the storage system or proxy +# server. If not specified, Data ONTAP drivers will use 80 for HTTP and 443 for +# HTTPS; E-Series will use 8080 for HTTP and 8443 for HTTPS. (integer value) +#netapp_server_port = + +# This option is used to specify the path to the E-Series proxy application on +# a proxy server. The value is combined with the value of the +# netapp_transport_type, netapp_server_hostname, and netapp_server_port options +# to create the URL used by the driver to connect to the proxy application. +# (string value) +#netapp_webservice_path = /devmgr/v2 + +# This option is only utilized when the storage family is configured to +# eseries. This option is used to restrict provisioning to the specified +# controllers. Specify the value of this option to be a comma separated list of +# controller hostnames or IP addresses to be used for provisioning. (string +# value) +#netapp_controller_ips = + +# Password for the NetApp E-Series storage array. (string value) +#netapp_sa_password = + +# This option specifies whether the driver should allow operations that require +# multiple attachments to a volume. An example would be live migration of +# servers that have volumes attached. When enabled, this backend is limited to +# 256 total volumes in order to guarantee volumes can be accessed by more than +# one host. (boolean value) +#netapp_enable_multiattach = false + +# The transport protocol used when communicating with the storage system or +# proxy server. (string value) +# Allowed values: http, https +#netapp_transport_type = http + +# This option defines the type of operating system that will access a LUN +# exported from Data ONTAP; it is assigned to the LUN at the time it is +# created. (string value) +#netapp_lun_ostype = + +# This option defines the type of operating system for all initiators that can +# access a LUN. This information is used when mapping LUNs to individual hosts +# or groups of hosts. (string value) +# Deprecated group/name - [DEFAULT]/netapp_eseries_host_type +#netapp_host_type = + +# This option is used to restrict provisioning to the specified pools. Specify +# the value of this option to be a regular expression which will be applied to +# the names of objects from the storage backend which represent pools in +# Cinder. This option is only utilized when the storage protocol is configured +# to use iSCSI or FC. (string value) +# Deprecated group/name - [DEFAULT]/netapp_volume_list +# Deprecated group/name - [DEFAULT]/netapp_storage_pools +#netapp_pool_name_search_pattern = (.+) + +# Request for FC Zone creating HostGroup (boolean value) +#hitachi_zoning_request = false + +# Number of volumes allowed per project (integer value) +#quota_volumes = 10 + +# Number of volume snapshots allowed per project (integer value) +#quota_snapshots = 10 + +# Number of consistencygroups allowed per project (integer value) +#quota_consistencygroups = 10 + +# Total amount of storage, in gigabytes, allowed for volumes and snapshots per +# project (integer value) +#quota_gigabytes = 1000 + +# Number of volume backups allowed per project (integer value) +#quota_backups = 10 + +# Total amount of storage, in gigabytes, allowed for backups per project +# (integer value) +#quota_backup_gigabytes = 1000 + +# Number of seconds until a reservation expires (integer value) +#reservation_expire = 86400 + +# Count of reservations until usage is refreshed (integer value) +#until_refresh = 0 + +# Number of seconds between subsequent usage refreshes (integer value) +#max_age = 0 + +# Default driver to use for quota checks (string value) +#quota_driver = cinder.quota.DbQuotaDriver + +# Enables or disables use of default quota class with default quota. (boolean +# value) +#use_default_quota_class = true + +# Max size allowed per volume, in gigabytes (integer value) +#per_volume_size_limit = -1 + +# The configuration file for the Cinder Huawei driver. (string value) +#cinder_huawei_conf_file = /etc/cinder/cinder_huawei_conf.xml + +# Storage Center System Serial Number (integer value) +#dell_sc_ssn = 64702 + +# Dell API port (integer value) +# Minimum value: 1 +# Maximum value: 65535 +#dell_sc_api_port = 3033 + +# Name of the server folder to use on the Storage Center (string value) +#dell_sc_server_folder = openstack + +# Name of the volume folder to use on the Storage Center (string value) +#dell_sc_volume_folder = openstack + +# Enable HTTPS SC certificate verification. (boolean value) +#dell_sc_verify_cert = false + +# Which filter class names to use for filtering hosts when not specified in the +# request. (list value) +#scheduler_default_filters = AvailabilityZoneFilter,CapacityFilter,CapabilitiesFilter + +# Which weigher class names to use for weighing hosts. (list value) +#scheduler_default_weighers = CapacityWeigher + +# Base dir containing mount point for NFS share. (string value) +#backup_mount_point_base = $state_path/backup_mount + +# NFS share in hostname:path, ipv4addr:path, or "[ipv6addr]:path" format. +# (string value) +#backup_share = + +# Mount options passed to the NFS client. See NFS man page for details. (string +# value) +#backup_mount_options = + +# IP address/hostname of Blockbridge API. (string value) +#blockbridge_api_host = + +# Override HTTPS port to connect to Blockbridge API server. (integer value) +#blockbridge_api_port = + +# Blockbridge API authentication scheme (token or password) (string value) +# Allowed values: token, password +#blockbridge_auth_scheme = token + +# Blockbridge API token (for auth scheme 'token') (string value) +#blockbridge_auth_token = + +# Blockbridge API user (for auth scheme 'password') (string value) +#blockbridge_auth_user = + +# Blockbridge API password (for auth scheme 'password') (string value) +#blockbridge_auth_password = + +# Defines the set of exposed pools and their associated backend query strings +# (dict value) +#blockbridge_pools = OpenStack:+openstack + +# Default pool name if unspecified. (string value) +#blockbridge_default_pool = + +# Data path IP address (string value) +#zfssa_data_ip = + +# HTTPS port number (string value) +#zfssa_https_port = 443 + +# Options to be passed while mounting share over nfs (string value) +#zfssa_nfs_mount_options = + +# Storage pool name. (string value) +#zfssa_nfs_pool = + +# Project name. (string value) +#zfssa_nfs_project = NFSProject + +# Share name. (string value) +#zfssa_nfs_share = nfs_share + +# Data compression. (string value) +# Allowed values: off, lzjb, gzip-2, gzip, gzip-9 +#zfssa_nfs_share_compression = off + +# Synchronous write bias-latency, throughput. (string value) +# Allowed values: latency, throughput +#zfssa_nfs_share_logbias = latency + +# REST connection timeout. (seconds) (integer value) +#zfssa_rest_timeout = + +# Flag to enable local caching: True, False. (boolean value) +#zfssa_enable_local_cache = true + +# Name of directory inside zfssa_nfs_share where cache volumes are stored. +# (string value) +#zfssa_cache_directory = os-cinder-cache + +# Space network name to use for data transfer (string value) +#hgst_net = Net 1 (IPv4) + +# Comma separated list of Space storage servers:devices. ex: +# os1_stor:gbd0,os2_stor:gbd0 (string value) +#hgst_storage_servers = os:gbd0 + +# Should spaces be redundantly stored (1/0) (string value) +#hgst_redundancy = 0 + +# User to own created spaces (string value) +#hgst_space_user = root + +# Group to own created spaces (string value) +#hgst_space_group = disk + +# UNIX mode for created spaces (string value) +#hgst_space_mode = 0600 + +# Directory used for temporary storage during image conversion (string value) +#image_conversion_dir = $state_path/conversion + +# Match this value when searching for nova in the service catalog. Format is: +# separated values of the form: :: +# (string value) +#nova_catalog_info = compute:Compute Service:publicURL +nova_catalog_info = compute:nova:publicURL + +# Same as nova_catalog_info, but for admin endpoint. (string value) +#nova_catalog_admin_info = compute:Compute Service:adminURL +nova_catalog_admin_info = compute:nova:adminURL + +# Override service catalog lookup with template for nova endpoint e.g. +# http://localhost:8774/v2/%(project_id)s (string value) +#nova_endpoint_template = + +# Same as nova_endpoint_template, but for admin endpoint. (string value) +#nova_endpoint_admin_template = + +# Region name of this node (string value) +#os_region_name = + +# Location of ca certificates file to use for nova client requests. (string +# value) +#nova_ca_certificates_file = + +# Allow to perform insecure SSL requests to nova (boolean value) +#nova_api_insecure = false + +# Connect with multipath (FC only).(Default is false.) (boolean value) +#flashsystem_multipath_enabled = false + +# DPL pool uuid in which DPL volumes are stored. (string value) +#dpl_pool = + +# DPL port number. (integer value) +# Minimum value: 1 +# Maximum value: 65535 +#dpl_port = 8357 + +# Add CHAP user (boolean value) +#hitachi_add_chap_user = false + +# iSCSI authentication method (string value) +#hitachi_auth_method = + +# iSCSI authentication username (string value) +#hitachi_auth_user = HBSD-CHAP-user + +# iSCSI authentication password (string value) +#hitachi_auth_password = HBSD-CHAP-password + +# Driver to use for volume creation (string value) +#volume_driver = cinder.volume.drivers.lvm.LVMVolumeDriver + +# Timeout for creating the volume to migrate to when performing volume +# migration (seconds) (integer value) +#migration_create_volume_timeout_secs = 300 + +# Offload pending volume delete during volume service startup (boolean value) +#volume_service_inithost_offload = false + +# FC Zoning mode configured (string value) +#zoning_mode = none + +# User defined capabilities, a JSON formatted string specifying key/value +# pairs. The key/value pairs can be used by the CapabilitiesFilter to select +# between backends when requests specify volume types. For example, specifying +# a service level or the geographical location of a backend, then creating a +# volume type to allow the user to select by these different properties. +# (string value) +#extra_capabilities = {} + +# Default iSCSI Port ID of FlashSystem. (Default port is 0.) (integer value) +#flashsystem_iscsi_portid = 0 + +# Connection protocol should be FC. (Default is FC.) (string value) +#flashsystem_connection_protocol = FC + +# Allows vdisk to multi host mapping. (Default is True) (boolean value) +#flashsystem_multihostmap_enabled = true + +# 3PAR WSAPI Server Url like https://<3par ip>:8080/api/v1 (string value) +#hp3par_api_url = + +# 3PAR username with the 'edit' role (string value) +#hp3par_username = + +# 3PAR password for the user specified in hp3par_username (string value) +#hp3par_password = + +# List of the CPG(s) to use for volume creation (list value) +#hp3par_cpg = OpenStack + +# The CPG to use for Snapshots for volumes. If empty the userCPG will be used. +# (string value) +#hp3par_cpg_snap = + +# The time in hours to retain a snapshot. You can't delete it before this +# expires. (string value) +#hp3par_snapshot_retention = + +# The time in hours when a snapshot expires and is deleted. This must be +# larger than expiration (string value) +#hp3par_snapshot_expiration = + +# Enable HTTP debugging to 3PAR (boolean value) +#hp3par_debug = false + +# List of target iSCSI addresses to use. (list value) +#hp3par_iscsi_ips = + +# Enable CHAP authentication for iSCSI connections. (boolean value) +#hp3par_iscsi_chap_enabled = false + +# Proxy driver that connects to the IBM Storage Array (string value) +#xiv_ds8k_proxy = xiv_ds8k_openstack.nova_proxy.XIVDS8KNovaProxy + +# Connection type to the IBM Storage Array (string value) +# Allowed values: fibre_channel, iscsi +#xiv_ds8k_connection_type = iscsi + +# CHAP authentication mode, effective only for iscsi (disabled|enabled) (string +# value) +# Allowed values: disabled, enabled +#xiv_chap = disabled + +# List of Management IP addresses (separated by commas) (string value) +#management_ips = + +# DEPRECATED: This will be removed in the Liberty release. Use san_login and +# san_password instead. This directly sets the Datera API token. (string value) +#datera_api_token = + +# Datera API port. (string value) +#datera_api_port = 7717 + +# Datera API version. (string value) +#datera_api_version = 1 + +# Number of replicas to create of an inode. (string value) +#datera_num_replicas = 3 + +# List of all available devices (list value) +#available_devices = + +# URL to the Quobyte volume e.g., quobyte:/// (string +# value) +#quobyte_volume_url = + +# Path to a Quobyte Client configuration file. (string value) +#quobyte_client_cfg = + +# Create volumes as sparse files which take no space. If set to False, volume +# is created as regular file.In such case volume creation takes a lot of time. +# (boolean value) +#quobyte_sparsed_volumes = true + +# Create volumes as QCOW2 files rather than raw files. (boolean value) +#quobyte_qcow2_volumes = true + +# Base dir containing the mount point for the Quobyte volume. (string value) +#quobyte_mount_point_base = $state_path/mnt + +# File with the list of available vzstorage shares. (string value) +#vzstorage_shares_config = /etc/cinder/vzstorage_shares + +# Create volumes as sparsed files which take no space rather than regular files +# when using raw format, in which case volume creation takes lot of time. +# (boolean value) +#vzstorage_sparsed_volumes = true + +# Percent of ACTUAL usage of the underlying volume before no new volumes can be +# allocated to the volume destination. (floating point value) +#vzstorage_used_ratio = 0.95 + +# Base dir containing mount points for vzstorage shares. (string value) +#vzstorage_mount_point_base = $state_path/mnt + +# Mount options passed to the vzstorage client. See section of the pstorage- +# mount man page for details. (list value) +#vzstorage_mount_options = + +# File with the list of available nfs shares (string value) +#nfs_shares_config = /etc/cinder/nfs_shares + +# Create volumes as sparsed files which take no space.If set to False volume is +# created as regular file.In such case volume creation takes a lot of time. +# (boolean value) +#nfs_sparsed_volumes = true + +# Percent of ACTUAL usage of the underlying volume before no new volumes can be +# allocated to the volume destination. Note that this option is deprecated in +# favor of "reserved_percentage" and will be removed in the Mitaka release. +# (floating point value) +#nfs_used_ratio = 0.95 + +# This will compare the allocated to available space on the volume destination. +# If the ratio exceeds this number, the destination will no longer be valid. +# Note that this option is deprecated in favor of "max_oversubscription_ratio" +# and will be removed in the Mitaka release. (floating point value) +#nfs_oversub_ratio = 1.0 + +# Base dir containing mount points for nfs shares. (string value) +#nfs_mount_point_base = $state_path/mnt + +# Mount options passed to the nfs client. See section of the nfs man page for +# details. (string value) +#nfs_mount_options = + +# The number of attempts to mount nfs shares before raising an error. At least +# one attempt will be made to mount an nfs share, regardless of the value +# specified. (integer value) +#nfs_mount_attempts = 3 + +# +# From oslo.log +# + +# Print debugging output (set logging level to DEBUG instead of default INFO +# level). (boolean value) +#debug = false +debug = True + +# If set to false, will disable INFO logging level, making WARNING the default. +# (boolean value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#verbose = true +verbose = True + +# The name of a logging configuration file. This file is appended to any +# existing logging configuration files. For details about logging configuration +# files, see the Python logging module documentation. (string value) +# Deprecated group/name - [DEFAULT]/log_config +#log_config_append = + +# DEPRECATED. A logging.Formatter log message format string which may use any +# of the available logging.LogRecord attributes. This option is deprecated. +# Please use logging_context_format_string and logging_default_format_string +# instead. (string value) +#log_format = + +# Format string for %%(asctime)s in log records. Default: %(default)s . (string +# value) +#log_date_format = %Y-%m-%d %H:%M:%S + +# (Optional) Name of log file to output to. If no default is set, logging will +# go to stdout. (string value) +# Deprecated group/name - [DEFAULT]/logfile +#log_file = + +# (Optional) The base directory used for relative --log-file paths. (string +# value) +# Deprecated group/name - [DEFAULT]/logdir +#log_dir = +log_dir = /var/log/cinder + +# Use syslog for logging. Existing syslog format is DEPRECATED and will be +# changed later to honor RFC5424. (boolean value) +#use_syslog = false + +# (Optional) Enables or disables syslog rfc5424 format for logging. If enabled, +# prefixes the MSG part of the syslog message with APP-NAME (RFC5424). The +# format without the APP-NAME is deprecated in Kilo, and will be removed in +# Mitaka, along with this option. (boolean value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#use_syslog_rfc_format = true + +# Syslog facility to receive log lines. (string value) +#syslog_log_facility = LOG_USER + +# Log output to standard error. (boolean value) +#use_stderr = true + +# Format string to use for log messages with context. (string value) +#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s + +# Format string to use for log messages without context. (string value) +#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s + +# Data to append to log format when level is DEBUG. (string value) +#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d + +# Prefix each line of exception output with this format. (string value) +#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s + +# List of logger=LEVEL pairs. (list value) +#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN + +# Enables or disables publication of error events. (boolean value) +#publish_errors = false + +# The format for an instance that is passed with the log message. (string +# value) +#instance_format = "[instance: %(uuid)s] " + +# The format for an instance UUID that is passed with the log message. (string +# value) +#instance_uuid_format = "[instance: %(uuid)s] " + +# Enables or disables fatal status of deprecations. (boolean value) +#fatal_deprecations = false + +# +# From oslo.messaging +# + +# Size of RPC connection pool. (integer value) +# Deprecated group/name - [DEFAULT]/rpc_conn_pool_size +#rpc_conn_pool_size = 30 + +# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. +# The "host" option should point or resolve to this address. (string value) +#rpc_zmq_bind_address = * + +# MatchMaker driver. (string value) +#rpc_zmq_matchmaker = local + +# ZeroMQ receiver listening port. (integer value) +#rpc_zmq_port = 9501 + +# Number of ZeroMQ contexts, defaults to 1. (integer value) +#rpc_zmq_contexts = 1 + +# Maximum number of ingress messages to locally buffer per topic. Default is +# unlimited. (integer value) +#rpc_zmq_topic_backlog = + +# Directory for holding IPC sockets. (string value) +#rpc_zmq_ipc_dir = /var/run/openstack + +# Name of this node. Must be a valid hostname, FQDN, or IP address. Must match +# "host" option, if running Nova. (string value) +#rpc_zmq_host = localhost + +# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq. +# (integer value) +#rpc_cast_timeout = 30 + +# Heartbeat frequency. (integer value) +#matchmaker_heartbeat_freq = 300 + +# Heartbeat time-to-live. (integer value) +#matchmaker_heartbeat_ttl = 600 + +# Size of executor thread pool. (integer value) +# Deprecated group/name - [DEFAULT]/rpc_thread_pool_size +#executor_thread_pool_size = 64 + +# The Drivers(s) to handle sending notifications. Possible values are +# messaging, messagingv2, routing, log, test, noop (multi valued) +#notification_driver = +notification_driver =messagingv2 + +# AMQP topic used for OpenStack notifications. (list value) +# Deprecated group/name - [rpc_notifier2]/topics +#notification_topics = notifications + +# Seconds to wait for a response from a call. (integer value) +#rpc_response_timeout = 60 + +# A URL representing the messaging driver to use and its full configuration. If +# not set, we fall back to the rpc_backend option and driver specific +# configuration. (string value) +#transport_url = + +# The messaging driver to use, defaults to rabbit. Other drivers include qpid +# and zmq. (string value) +#rpc_backend = rabbit +rpc_backend = rabbit + +# The default exchange under which topics are scoped. May be overridden by an +# exchange name specified in the transport_url option. (string value) +#control_exchange = openstack +control_exchange = openstack + +# +# From oslo.messaging +# + +# Size of RPC connection pool. (integer value) +# Deprecated group/name - [DEFAULT]/rpc_conn_pool_size +#rpc_conn_pool_size = 30 + +# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. +# The "host" option should point or resolve to this address. (string value) +#rpc_zmq_bind_address = * + +# MatchMaker driver. (string value) +#rpc_zmq_matchmaker = local + +# ZeroMQ receiver listening port. (integer value) +#rpc_zmq_port = 9501 + +# Number of ZeroMQ contexts, defaults to 1. (integer value) +#rpc_zmq_contexts = 1 + +# Maximum number of ingress messages to locally buffer per topic. Default is +# unlimited. (integer value) +#rpc_zmq_topic_backlog = + +# Directory for holding IPC sockets. (string value) +#rpc_zmq_ipc_dir = /var/run/openstack + +# Name of this node. Must be a valid hostname, FQDN, or IP address. Must match +# "host" option, if running Nova. (string value) +#rpc_zmq_host = localhost + +# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq. +# (integer value) +#rpc_cast_timeout = 30 + +# Heartbeat frequency. (integer value) +#matchmaker_heartbeat_freq = 300 + +# Heartbeat time-to-live. (integer value) +#matchmaker_heartbeat_ttl = 600 + +# Size of executor thread pool. (integer value) +# Deprecated group/name - [DEFAULT]/rpc_thread_pool_size +#executor_thread_pool_size = 64 + +# The Drivers(s) to handle sending notifications. Possible values are +# messaging, messagingv2, routing, log, test, noop (multi valued) +#notification_driver = + +# AMQP topic used for OpenStack notifications. (list value) +# Deprecated group/name - [rpc_notifier2]/topics +#notification_topics = notifications + +# Seconds to wait for a response from a call. (integer value) +#rpc_response_timeout = 60 + +# A URL representing the messaging driver to use and its full configuration. If +# not set, we fall back to the rpc_backend option and driver specific +# configuration. (string value) +#transport_url = + +# The messaging driver to use, defaults to rabbit. Other drivers include qpid +# and zmq. (string value) +#rpc_backend = rabbit + +# The default exchange under which topics are scoped. May be overridden by an +# exchange name specified in the transport_url option. (string value) +#control_exchange = openstack +api_paste_config=/etc/cinder/api-paste.ini + + +[BRCD_FABRIC_EXAMPLE] + +# +# From cinder +# + +# Management IP of fabric (string value) +#fc_fabric_address = + +# Fabric user ID (string value) +#fc_fabric_user = + +# Password for user (string value) +#fc_fabric_password = + +# Connecting port (integer value) +# Minimum value: 1 +# Maximum value: 65535 +#fc_fabric_port = 22 + +# overridden zoning policy (string value) +#zoning_policy = initiator-target + +# overridden zoning activation state (boolean value) +#zone_activate = true + +# overridden zone name prefix (string value) +#zone_name_prefix = + +# Principal switch WWN of the fabric (string value) +#principal_switch_wwn = + + +[CISCO_FABRIC_EXAMPLE] + +# +# From cinder +# + +# Management IP of fabric (string value) +#cisco_fc_fabric_address = + +# Fabric user ID (string value) +#cisco_fc_fabric_user = + +# Password for user (string value) +#cisco_fc_fabric_password = + +# Connecting port (integer value) +# Minimum value: 1 +# Maximum value: 65535 +#cisco_fc_fabric_port = 22 + +# overridden zoning policy (string value) +#cisco_zoning_policy = initiator-target + +# overridden zoning activation state (boolean value) +#cisco_zone_activate = true + +# overridden zone name prefix (string value) +#cisco_zone_name_prefix = + +# VSAN of the Fabric (string value) +#cisco_zoning_vsan = + + +[cors] + +# +# From oslo.middleware +# + +# Indicate whether this resource may be shared with the domain received in the +# requests "origin" header. (string value) +#allowed_origin = + +# Indicate that the actual request can include user credentials (boolean value) +#allow_credentials = true + +# Indicate which headers are safe to expose to the API. Defaults to HTTP Simple +# Headers. (list value) +#expose_headers = Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma + +# Maximum cache age of CORS preflight requests. (integer value) +#max_age = 3600 + +# Indicate which methods can be used during the actual request. (list value) +#allow_methods = GET,POST,PUT,DELETE,OPTIONS + +# Indicate which header field names may be used during the actual request. +# (list value) +#allow_headers = Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma + + +[cors.subdomain] + +# +# From oslo.middleware +# + +# Indicate whether this resource may be shared with the domain received in the +# requests "origin" header. (string value) +#allowed_origin = + +# Indicate that the actual request can include user credentials (boolean value) +#allow_credentials = true + +# Indicate which headers are safe to expose to the API. Defaults to HTTP Simple +# Headers. (list value) +#expose_headers = Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma + +# Maximum cache age of CORS preflight requests. (integer value) +#max_age = 3600 + +# Indicate which methods can be used during the actual request. (list value) +#allow_methods = GET,POST,PUT,DELETE,OPTIONS + +# Indicate which header field names may be used during the actual request. +# (list value) +#allow_headers = Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma + + +[database] + +# +# From oslo.db +# + +# The file name to use with SQLite. (string value) +# Deprecated group/name - [DEFAULT]/sqlite_db +#sqlite_db = oslo.sqlite + +# If True, SQLite uses synchronous mode. (boolean value) +# Deprecated group/name - [DEFAULT]/sqlite_synchronous +#sqlite_synchronous = true + +# The back end to use for the database. (string value) +# Deprecated group/name - [DEFAULT]/db_backend +#backend = sqlalchemy + +# The SQLAlchemy connection string to use to connect to the database. (string +# value) +# Deprecated group/name - [DEFAULT]/sql_connection +# Deprecated group/name - [DATABASE]/sql_connection +# Deprecated group/name - [sql]/connection +#connection = +connection = mysql+pymysql://cinder:qum5net@VARINET4ADDR/cinder + +# The SQLAlchemy connection string to use to connect to the slave database. +# (string value) +#slave_connection = + +# The SQL mode to be used for MySQL sessions. This option, including the +# default, overrides any server-set SQL mode. To use whatever SQL mode is set +# by the server configuration, set this to no value. Example: mysql_sql_mode= +# (string value) +#mysql_sql_mode = TRADITIONAL + +# Timeout before idle SQL connections are reaped. (integer value) +# Deprecated group/name - [DEFAULT]/sql_idle_timeout +# Deprecated group/name - [DATABASE]/sql_idle_timeout +# Deprecated group/name - [sql]/idle_timeout +#idle_timeout = 3600 + +# Minimum number of SQL connections to keep open in a pool. (integer value) +# Deprecated group/name - [DEFAULT]/sql_min_pool_size +# Deprecated group/name - [DATABASE]/sql_min_pool_size +#min_pool_size = 1 + +# Maximum number of SQL connections to keep open in a pool. (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_pool_size +# Deprecated group/name - [DATABASE]/sql_max_pool_size +#max_pool_size = + +# Maximum number of database connection retries during startup. Set to -1 to +# specify an infinite retry count. (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_retries +# Deprecated group/name - [DATABASE]/sql_max_retries +#max_retries = 10 + +# Interval between retries of opening a SQL connection. (integer value) +# Deprecated group/name - [DEFAULT]/sql_retry_interval +# Deprecated group/name - [DATABASE]/reconnect_interval +#retry_interval = 10 + +# If set, use this value for max_overflow with SQLAlchemy. (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_overflow +# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow +#max_overflow = + +# Verbosity of SQL debugging information: 0=None, 100=Everything. (integer +# value) +# Deprecated group/name - [DEFAULT]/sql_connection_debug +#connection_debug = 0 + +# Add Python stack traces to SQL as comment strings. (boolean value) +# Deprecated group/name - [DEFAULT]/sql_connection_trace +#connection_trace = false + +# If set, use this value for pool_timeout with SQLAlchemy. (integer value) +# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout +#pool_timeout = + +# Enable the experimental use of database reconnect on connection lost. +# (boolean value) +#use_db_reconnect = false + +# Seconds between retries of a database transaction. (integer value) +#db_retry_interval = 1 + +# If True, increases the interval between retries of a database operation up to +# db_max_retry_interval. (boolean value) +#db_inc_retry_interval = true + +# If db_inc_retry_interval is set, the maximum seconds between retries of a +# database operation. (integer value) +#db_max_retry_interval = 10 + +# Maximum retries in case of connection error or deadlock error before error is +# raised. Set to -1 to specify an infinite retry count. (integer value) +#db_max_retries = 20 + + +[fc-zone-manager] + +# +# From cinder +# + +# FC Zone Driver responsible for zone management (string value) +#zone_driver = cinder.zonemanager.drivers.brocade.brcd_fc_zone_driver.BrcdFCZoneDriver + +# Zoning policy configured by user; valid values include "initiator-target" or +# "initiator" (string value) +#zoning_policy = initiator-target + +# Comma separated list of Fibre Channel fabric names. This list of names is +# used to retrieve other SAN credentials for connecting to each SAN fabric +# (string value) +#fc_fabric_names = + +# FC SAN Lookup Service (string value) +#fc_san_lookup_service = cinder.zonemanager.drivers.brocade.brcd_fc_san_lookup_service.BrcdFCSanLookupService + +# Southbound connector for zoning operation (string value) +#brcd_sb_connector = cinder.zonemanager.drivers.brocade.brcd_fc_zone_client_cli.BrcdFCZoneClientCLI + +# Southbound connector for zoning operation (string value) +#cisco_sb_connector = cinder.zonemanager.drivers.cisco.cisco_fc_zone_client_cli.CiscoFCZoneClientCLI + + +[keymgr] + +# +# From cinder +# + +# Authentication url for encryption service. (string value) +#encryption_auth_url = http://localhost:5000/v3 + +# Url for encryption service. (string value) +#encryption_api_url = http://localhost:9311/v1 + +# The full class name of the key manager API class (string value) +#api_class = cinder.keymgr.conf_key_mgr.ConfKeyManager + +# Fixed key returned by key manager, specified in hex (string value) +#fixed_key = + + +[keystone_authtoken] + +# +# From keystonemiddleware.auth_token +# + +# Complete public Identity API endpoint. (string value) +#auth_uri = +auth_uri = http://VARINET4ADDR:5000/v2.0 + +# API version of the admin Identity API endpoint. (string value) +#auth_version = + +# Do not handle authorization requests within the middleware, but delegate the +# authorization decision to downstream WSGI components. (boolean value) +#delay_auth_decision = false + +# Request timeout value for communicating with Identity API server. (integer +# value) +#http_connect_timeout = + +# How many times are we trying to reconnect when communicating with Identity +# API Server. (integer value) +#http_request_max_retries = 3 + +# Env key for the swift cache. (string value) +#cache = + +# Required if identity server requires client certificate (string value) +#certfile = + +# Required if identity server requires client certificate (string value) +#keyfile = + +# A PEM encoded Certificate Authority to use when verifying HTTPs connections. +# Defaults to system CAs. (string value) +#cafile = + +# Verify HTTPS connections. (boolean value) +#insecure = false + +# The region in which the identity server can be found. (string value) +#region_name = + +# Directory used to cache files related to PKI tokens. (string value) +#signing_dir = + +# Optionally specify a list of memcached server(s) to use for caching. If left +# undefined, tokens will instead be cached in-process. (list value) +# Deprecated group/name - [DEFAULT]/memcache_servers +#memcached_servers = + +# In order to prevent excessive effort spent validating tokens, the middleware +# caches previously-seen tokens for a configurable duration (in seconds). Set +# to -1 to disable caching completely. (integer value) +#token_cache_time = 300 + +# Determines the frequency at which the list of revoked tokens is retrieved +# from the Identity service (in seconds). A high number of revocation events +# combined with a low cache duration may significantly reduce performance. +# (integer value) +#revocation_cache_time = 10 + +# (Optional) If defined, indicate whether token data should be authenticated or +# authenticated and encrypted. Acceptable values are MAC or ENCRYPT. If MAC, +# token data is authenticated (with HMAC) in the cache. If ENCRYPT, token data +# is encrypted and authenticated in the cache. If the value is not one of these +# options or empty, auth_token will raise an exception on initialization. +# (string value) +#memcache_security_strategy = + +# (Optional, mandatory if memcache_security_strategy is defined) This string is +# used for key derivation. (string value) +#memcache_secret_key = + +# (Optional) Number of seconds memcached server is considered dead before it is +# tried again. (integer value) +#memcache_pool_dead_retry = 300 + +# (Optional) Maximum total number of open connections to every memcached +# server. (integer value) +#memcache_pool_maxsize = 10 + +# (Optional) Socket timeout in seconds for communicating with a memcached +# server. (integer value) +#memcache_pool_socket_timeout = 3 + +# (Optional) Number of seconds a connection to memcached is held unused in the +# pool before it is closed. (integer value) +#memcache_pool_unused_timeout = 60 + +# (Optional) Number of seconds that an operation will wait to get a memcached +# client connection from the pool. (integer value) +#memcache_pool_conn_get_timeout = 10 + +# (Optional) Use the advanced (eventlet safe) memcached client pool. The +# advanced pool will only work under python 2.x. (boolean value) +#memcache_use_advanced_pool = false + +# (Optional) Indicate whether to set the X-Service-Catalog header. If False, +# middleware will not ask for service catalog on token validation and will not +# set the X-Service-Catalog header. (boolean value) +#include_service_catalog = true + +# Used to control the use and type of token binding. Can be set to: "disabled" +# to not check token binding. "permissive" (default) to validate binding +# information if the bind type is of a form known to the server and ignore it +# if not. "strict" like "permissive" but if the bind type is unknown the token +# will be rejected. "required" any form of token binding is needed to be +# allowed. Finally the name of a binding method that must be present in tokens. +# (string value) +#enforce_token_bind = permissive + +# If true, the revocation list will be checked for cached tokens. This requires +# that PKI tokens are configured on the identity server. (boolean value) +#check_revocations_for_cached = false + +# Hash algorithms to use for hashing PKI tokens. This may be a single algorithm +# or multiple. The algorithms are those supported by Python standard +# hashlib.new(). The hashes will be tried in the order given, so put the +# preferred one first for performance. The result of the first hash will be +# stored in the cache. This will typically be set to multiple values only while +# migrating from a less secure algorithm to a more secure one. Once all the old +# tokens are expired this option should be set to a single value for better +# performance. (list value) +#hash_algorithms = md5 + +# Prefix to prepend at the beginning of the path. Deprecated, use identity_uri. +# (string value) +#auth_admin_prefix = + +# Host providing the admin Identity API endpoint. Deprecated, use identity_uri. +# (string value) +#auth_host = 127.0.0.1 + +# Port of the admin Identity API endpoint. Deprecated, use identity_uri. +# (integer value) +#auth_port = 35357 + +# Protocol of the admin Identity API endpoint (http or https). Deprecated, use +# identity_uri. (string value) +#auth_protocol = https + +# Complete admin Identity API endpoint. This should specify the unversioned +# root endpoint e.g. https://localhost:35357/ (string value) +#identity_uri = +identity_uri = http://VARINET4ADDR:35357 + +# This option is deprecated and may be removed in a future release. Single +# shared secret with the Keystone configuration used for bootstrapping a +# Keystone installation, or otherwise bypassing the normal authentication +# process. This option should not be used, use `admin_user` and +# `admin_password` instead. (string value) +#admin_token = + +# Service username. (string value) +#admin_user = +admin_user = cinder + +# Service user password. (string value) +#admin_password = +admin_password = qum5net + +# Service tenant name. (string value) +#admin_tenant_name = admin +admin_tenant_name = services + + +[matchmaker_redis] + +# +# From oslo.messaging +# + +# Host to locate redis. (string value) +#host = 127.0.0.1 + +# Use this port to connect to redis host. (integer value) +#port = 6379 + +# Password for Redis server (optional). (string value) +#password = + +# +# From oslo.messaging +# + +# Host to locate redis. (string value) +#host = 127.0.0.1 + +# Use this port to connect to redis host. (integer value) +#port = 6379 + +# Password for Redis server (optional). (string value) +#password = + + +[matchmaker_ring] + +# +# From oslo.messaging +# + +# Matchmaker ring file (JSON). (string value) +# Deprecated group/name - [DEFAULT]/matchmaker_ringfile +#ringfile = /etc/oslo/matchmaker_ring.json + +# +# From oslo.messaging +# + +# Matchmaker ring file (JSON). (string value) +# Deprecated group/name - [DEFAULT]/matchmaker_ringfile +#ringfile = /etc/oslo/matchmaker_ring.json + + +[oslo_concurrency] + +# +# From oslo.concurrency +# + +# Enables or disables inter-process locks. (boolean value) +# Deprecated group/name - [DEFAULT]/disable_process_locking +#disable_process_locking = false + +# Directory to use for lock files. For security, the specified directory +# should only be writable by the user running the processes that need locking. +# Defaults to environment variable OSLO_LOCK_PATH. If external locks are used, +# a lock path must be set. (string value) +# Deprecated group/name - [DEFAULT]/lock_path +#lock_path = + + +[oslo_messaging_amqp] + +# +# From oslo.messaging +# + +# address prefix used when sending to a specific server (string value) +# Deprecated group/name - [amqp1]/server_request_prefix +#server_request_prefix = exclusive + +# address prefix used when broadcasting to all servers (string value) +# Deprecated group/name - [amqp1]/broadcast_prefix +#broadcast_prefix = broadcast + +# address prefix when sending to any server in group (string value) +# Deprecated group/name - [amqp1]/group_request_prefix +#group_request_prefix = unicast + +# Name for the AMQP container (string value) +# Deprecated group/name - [amqp1]/container_name +#container_name = + +# Timeout for inactive connections (in seconds) (integer value) +# Deprecated group/name - [amqp1]/idle_timeout +#idle_timeout = 0 + +# Debug: dump AMQP frames to stdout (boolean value) +# Deprecated group/name - [amqp1]/trace +#trace = false + +# CA certificate PEM file to verify server certificate (string value) +# Deprecated group/name - [amqp1]/ssl_ca_file +#ssl_ca_file = + +# Identifying certificate PEM file to present to clients (string value) +# Deprecated group/name - [amqp1]/ssl_cert_file +#ssl_cert_file = + +# Private key PEM file used to sign cert_file certificate (string value) +# Deprecated group/name - [amqp1]/ssl_key_file +#ssl_key_file = + +# Password for decrypting ssl_key_file (if encrypted) (string value) +# Deprecated group/name - [amqp1]/ssl_key_password +#ssl_key_password = + +# Accept clients using either SSL or plain TCP (boolean value) +# Deprecated group/name - [amqp1]/allow_insecure_clients +#allow_insecure_clients = false + +# +# From oslo.messaging +# + +# address prefix used when sending to a specific server (string value) +# Deprecated group/name - [amqp1]/server_request_prefix +#server_request_prefix = exclusive + +# address prefix used when broadcasting to all servers (string value) +# Deprecated group/name - [amqp1]/broadcast_prefix +#broadcast_prefix = broadcast + +# address prefix when sending to any server in group (string value) +# Deprecated group/name - [amqp1]/group_request_prefix +#group_request_prefix = unicast + +# Name for the AMQP container (string value) +# Deprecated group/name - [amqp1]/container_name +#container_name = + +# Timeout for inactive connections (in seconds) (integer value) +# Deprecated group/name - [amqp1]/idle_timeout +#idle_timeout = 0 + +# Debug: dump AMQP frames to stdout (boolean value) +# Deprecated group/name - [amqp1]/trace +#trace = false + +# CA certificate PEM file to verify server certificate (string value) +# Deprecated group/name - [amqp1]/ssl_ca_file +#ssl_ca_file = + +# Identifying certificate PEM file to present to clients (string value) +# Deprecated group/name - [amqp1]/ssl_cert_file +#ssl_cert_file = + +# Private key PEM file used to sign cert_file certificate (string value) +# Deprecated group/name - [amqp1]/ssl_key_file +#ssl_key_file = + +# Password for decrypting ssl_key_file (if encrypted) (string value) +# Deprecated group/name - [amqp1]/ssl_key_password +#ssl_key_password = + +# Accept clients using either SSL or plain TCP (boolean value) +# Deprecated group/name - [amqp1]/allow_insecure_clients +#allow_insecure_clients = false + + +[oslo_messaging_qpid] + +# +# From oslo.messaging +# + +# Use durable queues in AMQP. (boolean value) +# Deprecated group/name - [DEFAULT]/amqp_durable_queues +# Deprecated group/name - [DEFAULT]/rabbit_durable_queues +#amqp_durable_queues = false + +# Auto-delete queues in AMQP. (boolean value) +# Deprecated group/name - [DEFAULT]/amqp_auto_delete +#amqp_auto_delete = false + +# Send a single AMQP reply to call message. The current behaviour since oslo- +# incubator is to send two AMQP replies - first one with the payload, a second +# one to ensure the other have finish to send the payload. We are going to +# remove it in the N release, but we must keep backward compatible at the same +# time. This option provides such compatibility - it defaults to False in +# Liberty and can be turned on for early adopters with a new installations or +# for testing. Please note, that this option will be removed in the Mitaka +# release. (boolean value) +#send_single_reply = false + +# Qpid broker hostname. (string value) +# Deprecated group/name - [DEFAULT]/qpid_hostname +#qpid_hostname = localhost + +# Qpid broker port. (integer value) +# Deprecated group/name - [DEFAULT]/qpid_port +#qpid_port = 5672 + +# Qpid HA cluster host:port pairs. (list value) +# Deprecated group/name - [DEFAULT]/qpid_hosts +#qpid_hosts = $qpid_hostname:$qpid_port + +# Username for Qpid connection. (string value) +# Deprecated group/name - [DEFAULT]/qpid_username +#qpid_username = + +# Password for Qpid connection. (string value) +# Deprecated group/name - [DEFAULT]/qpid_password +#qpid_password = + +# Space separated list of SASL mechanisms to use for auth. (string value) +# Deprecated group/name - [DEFAULT]/qpid_sasl_mechanisms +#qpid_sasl_mechanisms = + +# Seconds between connection keepalive heartbeats. (integer value) +# Deprecated group/name - [DEFAULT]/qpid_heartbeat +#qpid_heartbeat = 60 + +# Transport to use, either 'tcp' or 'ssl'. (string value) +# Deprecated group/name - [DEFAULT]/qpid_protocol +#qpid_protocol = tcp + +# Whether to disable the Nagle algorithm. (boolean value) +# Deprecated group/name - [DEFAULT]/qpid_tcp_nodelay +#qpid_tcp_nodelay = true + +# The number of prefetched messages held by receiver. (integer value) +# Deprecated group/name - [DEFAULT]/qpid_receiver_capacity +#qpid_receiver_capacity = 1 + +# The qpid topology version to use. Version 1 is what was originally used by +# impl_qpid. Version 2 includes some backwards-incompatible changes that allow +# broker federation to work. Users should update to version 2 when they are +# able to take everything down, as it requires a clean break. (integer value) +# Deprecated group/name - [DEFAULT]/qpid_topology_version +#qpid_topology_version = 1 + +# +# From oslo.messaging +# + +# Use durable queues in AMQP. (boolean value) +# Deprecated group/name - [DEFAULT]/amqp_durable_queues +# Deprecated group/name - [DEFAULT]/rabbit_durable_queues +#amqp_durable_queues = false + +# Auto-delete queues in AMQP. (boolean value) +# Deprecated group/name - [DEFAULT]/amqp_auto_delete +#amqp_auto_delete = false + +# Send a single AMQP reply to call message. The current behaviour since oslo- +# incubator is to send two AMQP replies - first one with the payload, a second +# one to ensure the other have finish to send the payload. We are going to +# remove it in the N release, but we must keep backward compatible at the same +# time. This option provides such compatibility - it defaults to False in +# Liberty and can be turned on for early adopters with a new installations or +# for testing. Please note, that this option will be removed in the Mitaka +# release. (boolean value) +#send_single_reply = false + +# Qpid broker hostname. (string value) +# Deprecated group/name - [DEFAULT]/qpid_hostname +#qpid_hostname = localhost + +# Qpid broker port. (integer value) +# Deprecated group/name - [DEFAULT]/qpid_port +#qpid_port = 5672 + +# Qpid HA cluster host:port pairs. (list value) +# Deprecated group/name - [DEFAULT]/qpid_hosts +#qpid_hosts = $qpid_hostname:$qpid_port + +# Username for Qpid connection. (string value) +# Deprecated group/name - [DEFAULT]/qpid_username +#qpid_username = + +# Password for Qpid connection. (string value) +# Deprecated group/name - [DEFAULT]/qpid_password +#qpid_password = + +# Space separated list of SASL mechanisms to use for auth. (string value) +# Deprecated group/name - [DEFAULT]/qpid_sasl_mechanisms +#qpid_sasl_mechanisms = + +# Seconds between connection keepalive heartbeats. (integer value) +# Deprecated group/name - [DEFAULT]/qpid_heartbeat +#qpid_heartbeat = 60 + +# Transport to use, either 'tcp' or 'ssl'. (string value) +# Deprecated group/name - [DEFAULT]/qpid_protocol +#qpid_protocol = tcp + +# Whether to disable the Nagle algorithm. (boolean value) +# Deprecated group/name - [DEFAULT]/qpid_tcp_nodelay +#qpid_tcp_nodelay = true + +# The number of prefetched messages held by receiver. (integer value) +# Deprecated group/name - [DEFAULT]/qpid_receiver_capacity +#qpid_receiver_capacity = 1 + +# The qpid topology version to use. Version 1 is what was originally used by +# impl_qpid. Version 2 includes some backwards-incompatible changes that allow +# broker federation to work. Users should update to version 2 when they are +# able to take everything down, as it requires a clean break. (integer value) +# Deprecated group/name - [DEFAULT]/qpid_topology_version +#qpid_topology_version = 1 + + +[oslo_messaging_rabbit] + +# +# From oslo.messaging +# + +# Use durable queues in AMQP. (boolean value) +# Deprecated group/name - [DEFAULT]/amqp_durable_queues +# Deprecated group/name - [DEFAULT]/rabbit_durable_queues +#amqp_durable_queues = false +amqp_durable_queues = False + +# Auto-delete queues in AMQP. (boolean value) +# Deprecated group/name - [DEFAULT]/amqp_auto_delete +#amqp_auto_delete = false + +# Send a single AMQP reply to call message. The current behaviour since oslo- +# incubator is to send two AMQP replies - first one with the payload, a second +# one to ensure the other have finish to send the payload. We are going to +# remove it in the N release, but we must keep backward compatible at the same +# time. This option provides such compatibility - it defaults to False in +# Liberty and can be turned on for early adopters with a new installations or +# for testing. Please note, that this option will be removed in the Mitaka +# release. (boolean value) +#send_single_reply = false + +# SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and +# SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some +# distributions. (string value) +# Deprecated group/name - [DEFAULT]/kombu_ssl_version +#kombu_ssl_version = + +# SSL key file (valid only if SSL enabled). (string value) +# Deprecated group/name - [DEFAULT]/kombu_ssl_keyfile +#kombu_ssl_keyfile = +kombu_ssl_keyfile = + +# SSL cert file (valid only if SSL enabled). (string value) +# Deprecated group/name - [DEFAULT]/kombu_ssl_certfile +#kombu_ssl_certfile = +kombu_ssl_certfile = + +# SSL certification authority file (valid only if SSL enabled). (string value) +# Deprecated group/name - [DEFAULT]/kombu_ssl_ca_certs +#kombu_ssl_ca_certs = +kombu_ssl_ca_certs = + +# How long to wait before reconnecting in response to an AMQP consumer cancel +# notification. (floating point value) +# Deprecated group/name - [DEFAULT]/kombu_reconnect_delay +#kombu_reconnect_delay = 1.0 + +# How long to wait before considering a reconnect attempt to have failed. This +# value should not be longer than rpc_response_timeout. (integer value) +#kombu_reconnect_timeout = 60 + +# The RabbitMQ broker address where a single node is used. (string value) +# Deprecated group/name - [DEFAULT]/rabbit_host +#rabbit_host = localhost +rabbit_host = VARINET4ADDR + +# The RabbitMQ broker port where a single node is used. (integer value) +# Deprecated group/name - [DEFAULT]/rabbit_port +#rabbit_port = 5672 +rabbit_port = 5672 + +# RabbitMQ HA cluster host:port pairs. (list value) +# Deprecated group/name - [DEFAULT]/rabbit_hosts +#rabbit_hosts = $rabbit_host:$rabbit_port +rabbit_hosts = VARINET4ADDR:5672 + +# Connect over SSL for RabbitMQ. (boolean value) +# Deprecated group/name - [DEFAULT]/rabbit_use_ssl +#rabbit_use_ssl = false +rabbit_use_ssl = False + +# The RabbitMQ userid. (string value) +# Deprecated group/name - [DEFAULT]/rabbit_userid +#rabbit_userid = guest +rabbit_userid = guest + +# The RabbitMQ password. (string value) +# Deprecated group/name - [DEFAULT]/rabbit_password +#rabbit_password = guest +rabbit_password = guest + +# The RabbitMQ login method. (string value) +# Deprecated group/name - [DEFAULT]/rabbit_login_method +#rabbit_login_method = AMQPLAIN + +# The RabbitMQ virtual host. (string value) +# Deprecated group/name - [DEFAULT]/rabbit_virtual_host +#rabbit_virtual_host = / +rabbit_virtual_host = / + +# How frequently to retry connecting with RabbitMQ. (integer value) +#rabbit_retry_interval = 1 + +# How long to backoff for between retries when connecting to RabbitMQ. (integer +# value) +# Deprecated group/name - [DEFAULT]/rabbit_retry_backoff +#rabbit_retry_backoff = 2 + +# Maximum number of RabbitMQ connection retries. Default is 0 (infinite retry +# count). (integer value) +# Deprecated group/name - [DEFAULT]/rabbit_max_retries +#rabbit_max_retries = 0 + +# Use HA queues in RabbitMQ (x-ha-policy: all). If you change this option, you +# must wipe the RabbitMQ database. (boolean value) +# Deprecated group/name - [DEFAULT]/rabbit_ha_queues +#rabbit_ha_queues = false +rabbit_ha_queues = False + +# Specifies the number of messages to prefetch. Setting to zero allows +# unlimited messages. (integer value) +#rabbit_qos_prefetch_count = 0 + +# Number of seconds after which the Rabbit broker is considered down if +# heartbeat's keep-alive fails (0 disable the heartbeat). EXPERIMENTAL (integer +# value) +#heartbeat_timeout_threshold = 60 +heartbeat_timeout_threshold = 0 + +# How often times during the heartbeat_timeout_threshold we check the +# heartbeat. (integer value) +#heartbeat_rate = 2 +heartbeat_rate = 2 + +# Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake (boolean value) +# Deprecated group/name - [DEFAULT]/fake_rabbit +#fake_rabbit = false + +# +# From oslo.messaging +# + +# Use durable queues in AMQP. (boolean value) +# Deprecated group/name - [DEFAULT]/amqp_durable_queues +# Deprecated group/name - [DEFAULT]/rabbit_durable_queues +#amqp_durable_queues = false + +# Auto-delete queues in AMQP. (boolean value) +# Deprecated group/name - [DEFAULT]/amqp_auto_delete +#amqp_auto_delete = false + +# Send a single AMQP reply to call message. The current behaviour since oslo- +# incubator is to send two AMQP replies - first one with the payload, a second +# one to ensure the other have finish to send the payload. We are going to +# remove it in the N release, but we must keep backward compatible at the same +# time. This option provides such compatibility - it defaults to False in +# Liberty and can be turned on for early adopters with a new installations or +# for testing. Please note, that this option will be removed in the Mitaka +# release. (boolean value) +#send_single_reply = false + +# SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and +# SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some +# distributions. (string value) +# Deprecated group/name - [DEFAULT]/kombu_ssl_version +#kombu_ssl_version = + +# SSL key file (valid only if SSL enabled). (string value) +# Deprecated group/name - [DEFAULT]/kombu_ssl_keyfile +#kombu_ssl_keyfile = + +# SSL cert file (valid only if SSL enabled). (string value) +# Deprecated group/name - [DEFAULT]/kombu_ssl_certfile +#kombu_ssl_certfile = + +# SSL certification authority file (valid only if SSL enabled). (string value) +# Deprecated group/name - [DEFAULT]/kombu_ssl_ca_certs +#kombu_ssl_ca_certs = + +# How long to wait before reconnecting in response to an AMQP consumer cancel +# notification. (floating point value) +# Deprecated group/name - [DEFAULT]/kombu_reconnect_delay +#kombu_reconnect_delay = 1.0 + +# How long to wait before considering a reconnect attempt to have failed. This +# value should not be longer than rpc_response_timeout. (integer value) +#kombu_reconnect_timeout = 60 + +# The RabbitMQ broker address where a single node is used. (string value) +# Deprecated group/name - [DEFAULT]/rabbit_host +#rabbit_host = localhost + +# The RabbitMQ broker port where a single node is used. (integer value) +# Deprecated group/name - [DEFAULT]/rabbit_port +#rabbit_port = 5672 + +# RabbitMQ HA cluster host:port pairs. (list value) +# Deprecated group/name - [DEFAULT]/rabbit_hosts +#rabbit_hosts = $rabbit_host:$rabbit_port + +# Connect over SSL for RabbitMQ. (boolean value) +# Deprecated group/name - [DEFAULT]/rabbit_use_ssl +#rabbit_use_ssl = false + +# The RabbitMQ userid. (string value) +# Deprecated group/name - [DEFAULT]/rabbit_userid +#rabbit_userid = guest + +# The RabbitMQ password. (string value) +# Deprecated group/name - [DEFAULT]/rabbit_password +#rabbit_password = guest + +# The RabbitMQ login method. (string value) +# Deprecated group/name - [DEFAULT]/rabbit_login_method +#rabbit_login_method = AMQPLAIN + +# The RabbitMQ virtual host. (string value) +# Deprecated group/name - [DEFAULT]/rabbit_virtual_host +#rabbit_virtual_host = / + +# How frequently to retry connecting with RabbitMQ. (integer value) +#rabbit_retry_interval = 1 + +# How long to backoff for between retries when connecting to RabbitMQ. (integer +# value) +# Deprecated group/name - [DEFAULT]/rabbit_retry_backoff +#rabbit_retry_backoff = 2 + +# Maximum number of RabbitMQ connection retries. Default is 0 (infinite retry +# count). (integer value) +# Deprecated group/name - [DEFAULT]/rabbit_max_retries +#rabbit_max_retries = 0 + +# Use HA queues in RabbitMQ (x-ha-policy: all). If you change this option, you +# must wipe the RabbitMQ database. (boolean value) +# Deprecated group/name - [DEFAULT]/rabbit_ha_queues +#rabbit_ha_queues = false + +# Specifies the number of messages to prefetch. Setting to zero allows +# unlimited messages. (integer value) +#rabbit_qos_prefetch_count = 0 + +# Number of seconds after which the Rabbit broker is considered down if +# heartbeat's keep-alive fails (0 disable the heartbeat). EXPERIMENTAL (integer +# value) +#heartbeat_timeout_threshold = 60 + +# How often times during the heartbeat_timeout_threshold we check the +# heartbeat. (integer value) +#heartbeat_rate = 2 + +# Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake (boolean value) +# Deprecated group/name - [DEFAULT]/fake_rabbit +#fake_rabbit = false + + +[oslo_middleware] + +# +# From oslo.middleware +# + +# The maximum body size for each request, in bytes. (integer value) +# Deprecated group/name - [DEFAULT]/osapi_max_request_body_size +# Deprecated group/name - [DEFAULT]/max_request_body_size +#max_request_body_size = 114688 + +# +# From oslo.middleware +# + +# The HTTP Header that will be used to determine what the original request +# protocol scheme was, even if it was hidden by an SSL termination proxy. +# (string value) +#secure_proxy_ssl_header = X-Forwarded-Proto + + +[oslo_policy] + +# +# From oslo.policy +# + +# The JSON file that defines policies. (string value) +# Deprecated group/name - [DEFAULT]/policy_file +#policy_file = policy.json + +# Default rule. Enforced when a requested rule is not found. (string value) +# Deprecated group/name - [DEFAULT]/policy_default_rule +#policy_default_rule = default + +# Directories where policy configuration files are stored. They can be relative +# to any directory in the search path defined by the config_dir option, or +# absolute paths. The file defined by policy_file must exist for these +# directories to be searched. Missing or empty directories are ignored. (multi +# valued) +# Deprecated group/name - [DEFAULT]/policy_dirs +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#policy_dirs = policy.d + + +[oslo_reports] + +# +# From oslo.reports +# + +# Path to a log directory where to create a file (string value) +#log_dir = + + +[profiler] + +# +# From cinder +# + +# If False fully disable profiling feature. (boolean value) +#profiler_enabled = false + +# If False doesn't trace SQL requests. (boolean value) +#trace_sqlalchemy = false + +[lvm] +iscsi_helper=lioadm +volume_group=cinder-volumes +iscsi_ip_address=VARINET4ADDR +volume_driver=cinder.volume.drivers.lvm.LVMVolumeDriver +volumes_dir=/var/lib/cinder/volumes +iscsi_protocol=iscsi +volume_backend_name=lvm + +[ceph] +volume_driver = cinder.volume.drivers.rbd.RBDDriver +rbd_pool = volumes +rbd_ceph_conf = /etc/ceph/ceph.conf +rbd_flatten_volume_from_snapshot = false +rbd_max_clone_depth = 5 +rbd_store_chunk_size = 4 +rados_connect_timeout = -1 +glance_api_version = 2 +rbd_user=cinder +rbd_secret_uuid=RBDSECRET diff --git a/qa/qa_scripts/openstack/files/glance-api.template.conf b/qa/qa_scripts/openstack/files/glance-api.template.conf new file mode 100644 index 00000000..956fb1bf --- /dev/null +++ b/qa/qa_scripts/openstack/files/glance-api.template.conf @@ -0,0 +1,1590 @@ +[DEFAULT] + +# +# From glance.api +# + +# When true, this option sets the owner of an image to be the tenant. +# Otherwise, the owner of the image will be the authenticated user +# issuing the request. (boolean value) +#owner_is_tenant=true + +# Role used to identify an authenticated user as administrator. +# (string value) +#admin_role=admin + +# Allow unauthenticated users to access the API with read-only +# privileges. This only applies when using ContextMiddleware. (boolean +# value) +#allow_anonymous_access=false + +# Limits request ID length. (integer value) +#max_request_id_length=64 + +# Public url to use for versions endpoint. The default is None, which +# will use the request's host_url attribute to populate the URL base. +# If Glance is operating behind a proxy, you will want to change this +# to represent the proxy's URL. (string value) +#public_endpoint= + +# Whether to allow users to specify image properties beyond what the +# image schema provides (boolean value) +#allow_additional_image_properties=true + +# Maximum number of image members per image. Negative values evaluate +# to unlimited. (integer value) +#image_member_quota=128 + +# Maximum number of properties allowed on an image. Negative values +# evaluate to unlimited. (integer value) +#image_property_quota=128 + +# Maximum number of tags allowed on an image. Negative values evaluate +# to unlimited. (integer value) +#image_tag_quota=128 + +# Maximum number of locations allowed on an image. Negative values +# evaluate to unlimited. (integer value) +#image_location_quota=10 + +# Python module path of data access API (string value) +#data_api=glance.db.sqlalchemy.api + +# Default value for the number of items returned by a request if not +# specified explicitly in the request (integer value) +#limit_param_default=25 + +# Maximum permissible number of items that could be returned by a +# request (integer value) +#api_limit_max=1000 + +# Whether to include the backend image storage location in image +# properties. Revealing storage location can be a security risk, so +# use this setting with caution! (boolean value) +#show_image_direct_url=false +show_image_direct_url=True + +# Whether to include the backend image locations in image properties. +# For example, if using the file system store a URL of +# "file:///path/to/image" will be returned to the user in the +# 'direct_url' meta-data field. Revealing storage location can be a +# security risk, so use this setting with caution! The overrides +# show_image_direct_url. (boolean value) +#show_multiple_locations=false + +# Maximum size of image a user can upload in bytes. Defaults to +# 1099511627776 bytes (1 TB).WARNING: this value should only be +# increased after careful consideration and must be set to a value +# under 8 EB (9223372036854775808). (integer value) +# Maximum value: 9223372036854775808 +#image_size_cap=1099511627776 + +# Set a system wide quota for every user. This value is the total +# capacity that a user can use across all storage systems. A value of +# 0 means unlimited.Optional unit can be specified for the value. +# Accepted units are B, KB, MB, GB and TB representing Bytes, +# KiloBytes, MegaBytes, GigaBytes and TeraBytes respectively. If no +# unit is specified then Bytes is assumed. Note that there should not +# be any space between value and unit and units are case sensitive. +# (string value) +#user_storage_quota=0 + +# Deploy the v1 OpenStack Images API. (boolean value) +#enable_v1_api=true + +# Deploy the v2 OpenStack Images API. (boolean value) +#enable_v2_api=true + +# Deploy the v3 OpenStack Objects API. (boolean value) +#enable_v3_api=false + +# Deploy the v1 OpenStack Registry API. (boolean value) +#enable_v1_registry=true + +# Deploy the v2 OpenStack Registry API. (boolean value) +#enable_v2_registry=true + +# The hostname/IP of the pydev process listening for debug connections +# (string value) +#pydev_worker_debug_host= + +# The port on which a pydev process is listening for connections. +# (integer value) +# Minimum value: 1 +# Maximum value: 65535 +#pydev_worker_debug_port=5678 + +# AES key for encrypting store 'location' metadata. This includes, if +# used, Swift or S3 credentials. Should be set to a random string of +# length 16, 24 or 32 bytes (string value) +#metadata_encryption_key= + +# Digest algorithm which will be used for digital signature. Use the +# command "openssl list-message-digest-algorithms" to get the +# available algorithmssupported by the version of OpenSSL on the +# platform. Examples are "sha1", "sha256", "sha512", etc. (string +# value) +#digest_algorithm=sha256 + +# This value sets what strategy will be used to determine the image +# location order. Currently two strategies are packaged with Glance +# 'location_order' and 'store_type'. (string value) +# Allowed values: location_order, store_type +#location_strategy=location_order + +# The location of the property protection file.This file contains the +# rules for property protections and the roles/policies associated +# with it. If this config value is not specified, by default, property +# protections won't be enforced. If a value is specified and the file +# is not found, then the glance-api service will not start. (string +# value) +#property_protection_file= + +# This config value indicates whether "roles" or "policies" are used +# in the property protection file. (string value) +# Allowed values: roles, policies +#property_protection_rule_format=roles + +# Modules of exceptions that are permitted to be recreated upon +# receiving exception data from an rpc call. (list value) +#allowed_rpc_exception_modules=glance.common.exception,exceptions + +# Address to bind the server. Useful when selecting a particular +# network interface. (string value) +#bind_host=0.0.0.0 +bind_host=0.0.0.0 + +# The port on which the server will listen. (integer value) +# Minimum value: 1 +# Maximum value: 65535 +#bind_port= +bind_port=9292 + +# The number of child process workers that will be created to service +# requests. The default will be equal to the number of CPUs available. +# (integer value) +#workers=4 +workers=12 + +# Maximum line size of message headers to be accepted. max_header_line +# may need to be increased when using large tokens (typically those +# generated by the Keystone v3 API with big service catalogs (integer +# value) +#max_header_line=16384 + +# If False, server will return the header "Connection: close", If +# True, server will return "Connection: Keep-Alive" in its responses. +# In order to close the client socket connection explicitly after the +# response is sent and read successfully by the client, you simply +# have to set this option to False when you create a wsgi server. +# (boolean value) +#http_keepalive=true + +# Timeout for client connections' socket operations. If an incoming +# connection is idle for this number of seconds it will be closed. A +# value of '0' means wait forever. (integer value) +#client_socket_timeout=900 + +# The backlog value that will be used when creating the TCP listener +# socket. (integer value) +#backlog=4096 +backlog=4096 + +# The value for the socket option TCP_KEEPIDLE. This is the time in +# seconds that the connection must be idle before TCP starts sending +# keepalive probes. (integer value) +#tcp_keepidle=600 + +# CA certificate file to use to verify connecting clients. (string +# value) +#ca_file= + +# Certificate file to use when starting API server securely. (string +# value) +#cert_file= + +# Private key file to use when starting API server securely. (string +# value) +#key_file= + +# If False fully disable profiling feature. (boolean value) +#enabled=false + +# If False doesn't trace SQL requests. (boolean value) +#trace_sqlalchemy=false + +# The path to the sqlite file database that will be used for image +# cache management. (string value) +#image_cache_sqlite_db=cache.db + +# The driver to use for image cache management. (string value) +#image_cache_driver=sqlite + +# The upper limit (the maximum size of accumulated cache in bytes) +# beyond which pruner, if running, starts cleaning the images cache. +# (integer value) +#image_cache_max_size=10737418240 + +# The amount of time to let an image remain in the cache without being +# accessed. (integer value) +#image_cache_stall_time=86400 + +# Base directory that the Image Cache uses. (string value) +#image_cache_dir=/var/lib/glance/image-cache/ +image_cache_dir=/var/lib/glance/image-cache + +# Default publisher_id for outgoing notifications. (string value) +#default_publisher_id=image.localhost + +# List of disabled notifications. A notification can be given either +# as a notification type to disable a single event, or as a +# notification group prefix to disable all events within a group. +# Example: if this config option is set to ["image.create", +# "metadef_namespace"], then "image.create" notification will not be +# sent after image is created and none of the notifications for +# metadefinition namespaces will be sent. (list value) +#disabled_notifications = + +# Address to find the registry server. (string value) +#registry_host=0.0.0.0 +registry_host=0.0.0.0 + +# Port the registry server is listening on. (integer value) +# Minimum value: 1 +# Maximum value: 65535 +#registry_port=9191 +registry_port=9191 + +# Whether to pass through the user token when making requests to the +# registry. To prevent failures with token expiration during big files +# upload, it is recommended to set this parameter to False.If +# "use_user_token" is not in effect, then admin credentials can be +# specified. (boolean value) +#use_user_token=true + +# The administrators user name. If "use_user_token" is not in effect, +# then admin credentials can be specified. (string value) +#admin_user=%SERVICE_USER% + +# The administrators password. If "use_user_token" is not in effect, +# then admin credentials can be specified. (string value) +#admin_password=%SERVICE_PASSWORD% + +# The tenant name of the administrative user. If "use_user_token" is +# not in effect, then admin tenant name can be specified. (string +# value) +#admin_tenant_name=%SERVICE_TENANT_NAME% + +# The URL to the keystone service. If "use_user_token" is not in +# effect and using keystone auth, then URL of keystone can be +# specified. (string value) +#auth_url= + +# The strategy to use for authentication. If "use_user_token" is not +# in effect, then auth strategy can be specified. (string value) +#auth_strategy=noauth + +# The region for the authentication service. If "use_user_token" is +# not in effect and using keystone auth, then region name can be +# specified. (string value) +#auth_region= + +# The protocol to use for communication with the registry server. +# Either http or https. (string value) +#registry_client_protocol=http +registry_client_protocol=http + +# The path to the key file to use in SSL connections to the registry +# server, if any. Alternately, you may set the GLANCE_CLIENT_KEY_FILE +# environment variable to a filepath of the key file (string value) +#registry_client_key_file= + +# The path to the cert file to use in SSL connections to the registry +# server, if any. Alternately, you may set the GLANCE_CLIENT_CERT_FILE +# environment variable to a filepath of the CA cert file (string +# value) +#registry_client_cert_file= + +# The path to the certifying authority cert file to use in SSL +# connections to the registry server, if any. Alternately, you may set +# the GLANCE_CLIENT_CA_FILE environment variable to a filepath of the +# CA cert file. (string value) +#registry_client_ca_file= + +# When using SSL in connections to the registry server, do not require +# validation via a certifying authority. This is the registry's +# equivalent of specifying --insecure on the command line using +# glanceclient for the API. (boolean value) +#registry_client_insecure=false + +# The period of time, in seconds, that the API server will wait for a +# registry request to complete. A value of 0 implies no timeout. +# (integer value) +#registry_client_timeout=600 + +# Whether to pass through headers containing user and tenant +# information when making requests to the registry. This allows the +# registry to use the context middleware without keystonemiddleware's +# auth_token middleware, removing calls to the keystone auth service. +# It is recommended that when using this option, secure communication +# between glance api and glance registry is ensured by means other +# than auth_token middleware. (boolean value) +#send_identity_headers=false + +# The amount of time in seconds to delay before performing a delete. +# (integer value) +#scrub_time=0 + +# The size of thread pool to be used for scrubbing images. The default +# is one, which signifies serial scrubbing. Any value above one +# indicates the max number of images that may be scrubbed in parallel. +# (integer value) +#scrub_pool_size=1 + +# Turn on/off delayed delete. (boolean value) +#delayed_delete=false + +# Role used to identify an authenticated user as administrator. +# (string value) +#admin_role=admin + +# Whether to pass through headers containing user and tenant +# information when making requests to the registry. This allows the +# registry to use the context middleware without keystonemiddleware's +# auth_token middleware, removing calls to the keystone auth service. +# It is recommended that when using this option, secure communication +# between glance api and glance registry is ensured by means other +# than auth_token middleware. (boolean value) +#send_identity_headers=false + +# +# From oslo.log +# + +# Print debugging output (set logging level to DEBUG instead of +# default INFO level). (boolean value) +#debug=False +debug=True + +# If set to false, will disable INFO logging level, making WARNING the +# default. (boolean value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#verbose=True +verbose=True + +# The name of a logging configuration file. This file is appended to +# any existing logging configuration files. For details about logging +# configuration files, see the Python logging module documentation. +# (string value) +# Deprecated group/name - [DEFAULT]/log_config +#log_config_append= + +# DEPRECATED. A logging.Formatter log message format string which may +# use any of the available logging.LogRecord attributes. This option +# is deprecated. Please use logging_context_format_string and +# logging_default_format_string instead. (string value) +#log_format= + +# Format string for %%(asctime)s in log records. Default: %(default)s +# . (string value) +#log_date_format=%Y-%m-%d %H:%M:%S + +# (Optional) Name of log file to output to. If no default is set, +# logging will go to stdout. (string value) +# Deprecated group/name - [DEFAULT]/logfile +#log_file=/var/log/glance/api.log +log_file=/var/log/glance/api.log + +# (Optional) The base directory used for relative --log-file paths. +# (string value) +# Deprecated group/name - [DEFAULT]/logdir +#log_dir= +log_dir=/var/log/glance + +# Use syslog for logging. Existing syslog format is DEPRECATED and +# will be changed later to honor RFC5424. (boolean value) +#use_syslog=false +use_syslog=False + +# (Optional) Enables or disables syslog rfc5424 format for logging. If +# enabled, prefixes the MSG part of the syslog message with APP-NAME +# (RFC5424). The format without the APP-NAME is deprecated in Kilo, +# and will be removed in Mitaka, along with this option. (boolean +# value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#use_syslog_rfc_format=true + +# Syslog facility to receive log lines. (string value) +#syslog_log_facility=LOG_USER +syslog_log_facility=LOG_USER + +# Log output to standard error. (boolean value) +#use_stderr=False +use_stderr=True + +# Format string to use for log messages with context. (string value) +#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s + +# Format string to use for log messages without context. (string +# value) +#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s + +# Data to append to log format when level is DEBUG. (string value) +#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d + +# Prefix each line of exception output with this format. (string +# value) +#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s + +# List of logger=LEVEL pairs. (list value) +#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN + +# Enables or disables publication of error events. (boolean value) +#publish_errors=false + +# The format for an instance that is passed with the log message. +# (string value) +#instance_format="[instance: %(uuid)s] " + +# The format for an instance UUID that is passed with the log message. +# (string value) +#instance_uuid_format="[instance: %(uuid)s] " + +# Enables or disables fatal status of deprecations. (boolean value) +#fatal_deprecations=false + +# +# From oslo.messaging +# + +# Size of RPC connection pool. (integer value) +# Deprecated group/name - [DEFAULT]/rpc_conn_pool_size +#rpc_conn_pool_size=30 + +# ZeroMQ bind address. Should be a wildcard (*), an ethernet +# interface, or IP. The "host" option should point or resolve to this +# address. (string value) +#rpc_zmq_bind_address=* + +# MatchMaker driver. (string value) +#rpc_zmq_matchmaker=local + +# ZeroMQ receiver listening port. (integer value) +#rpc_zmq_port=9501 + +# Number of ZeroMQ contexts, defaults to 1. (integer value) +#rpc_zmq_contexts=1 + +# Maximum number of ingress messages to locally buffer per topic. +# Default is unlimited. (integer value) +#rpc_zmq_topic_backlog= + +# Directory for holding IPC sockets. (string value) +#rpc_zmq_ipc_dir=/var/run/openstack + +# Name of this node. Must be a valid hostname, FQDN, or IP address. +# Must match "host" option, if running Nova. (string value) +#rpc_zmq_host=localhost + +# Seconds to wait before a cast expires (TTL). Only supported by +# impl_zmq. (integer value) +#rpc_cast_timeout=30 + +# Heartbeat frequency. (integer value) +#matchmaker_heartbeat_freq=300 + +# Heartbeat time-to-live. (integer value) +#matchmaker_heartbeat_ttl=600 + +# Size of executor thread pool. (integer value) +# Deprecated group/name - [DEFAULT]/rpc_thread_pool_size +#executor_thread_pool_size=64 + +# The Drivers(s) to handle sending notifications. Possible values are +# messaging, messagingv2, routing, log, test, noop (multi valued) +#notification_driver = +notification_driver =messaging + +# AMQP topic used for OpenStack notifications. (list value) +# Deprecated group/name - [rpc_notifier2]/topics +#notification_topics=notifications + +# Seconds to wait for a response from a call. (integer value) +#rpc_response_timeout=60 + +# A URL representing the messaging driver to use and its full +# configuration. If not set, we fall back to the rpc_backend option +# and driver specific configuration. (string value) +#transport_url= + +# The messaging driver to use, defaults to rabbit. Other drivers +# include qpid and zmq. (string value) +#rpc_backend=rabbit + +# The default exchange under which topics are scoped. May be +# overridden by an exchange name specified in the transport_url +# option. (string value) +#control_exchange=openstack +hw_scsi_model=virtio-scsi +hw_disk_bus=scsi +hw_qemu_guest_agent=yes +os_require_quiesce=yes + +[database] + +# +# From oslo.db +# + +# The file name to use with SQLite. (string value) +# Deprecated group/name - [DEFAULT]/sqlite_db +#sqlite_db=oslo.sqlite + +# If True, SQLite uses synchronous mode. (boolean value) +# Deprecated group/name - [DEFAULT]/sqlite_synchronous +#sqlite_synchronous=true + +# The back end to use for the database. (string value) +# Deprecated group/name - [DEFAULT]/db_backend +#backend=sqlalchemy + +# The SQLAlchemy connection string to use to connect to the database. +# (string value) +# Deprecated group/name - [DEFAULT]/sql_connection +# Deprecated group/name - [DATABASE]/sql_connection +# Deprecated group/name - [sql]/connection +#connection=mysql://glance:glance@localhost/glance +connection=mysql+pymysql://glance:qum5net@VARINET4ADDR/glance + +# The SQLAlchemy connection string to use to connect to the slave +# database. (string value) +#slave_connection= + +# The SQL mode to be used for MySQL sessions. This option, including +# the default, overrides any server-set SQL mode. To use whatever SQL +# mode is set by the server configuration, set this to no value. +# Example: mysql_sql_mode= (string value) +#mysql_sql_mode=TRADITIONAL + +# Timeout before idle SQL connections are reaped. (integer value) +# Deprecated group/name - [DEFAULT]/sql_idle_timeout +# Deprecated group/name - [DATABASE]/sql_idle_timeout +# Deprecated group/name - [sql]/idle_timeout +#idle_timeout=3600 +idle_timeout=3600 + +# Minimum number of SQL connections to keep open in a pool. (integer +# value) +# Deprecated group/name - [DEFAULT]/sql_min_pool_size +# Deprecated group/name - [DATABASE]/sql_min_pool_size +#min_pool_size=1 + +# Maximum number of SQL connections to keep open in a pool. (integer +# value) +# Deprecated group/name - [DEFAULT]/sql_max_pool_size +# Deprecated group/name - [DATABASE]/sql_max_pool_size +#max_pool_size= + +# Maximum number of database connection retries during startup. Set to +# -1 to specify an infinite retry count. (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_retries +# Deprecated group/name - [DATABASE]/sql_max_retries +#max_retries=10 + +# Interval between retries of opening a SQL connection. (integer +# value) +# Deprecated group/name - [DEFAULT]/sql_retry_interval +# Deprecated group/name - [DATABASE]/reconnect_interval +#retry_interval=10 + +# If set, use this value for max_overflow with SQLAlchemy. (integer +# value) +# Deprecated group/name - [DEFAULT]/sql_max_overflow +# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow +#max_overflow= + +# Verbosity of SQL debugging information: 0=None, 100=Everything. +# (integer value) +# Deprecated group/name - [DEFAULT]/sql_connection_debug +#connection_debug=0 + +# Add Python stack traces to SQL as comment strings. (boolean value) +# Deprecated group/name - [DEFAULT]/sql_connection_trace +#connection_trace=false + +# If set, use this value for pool_timeout with SQLAlchemy. (integer +# value) +# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout +#pool_timeout= + +# Enable the experimental use of database reconnect on connection +# lost. (boolean value) +#use_db_reconnect=false + +# Seconds between retries of a database transaction. (integer value) +#db_retry_interval=1 + +# If True, increases the interval between retries of a database +# operation up to db_max_retry_interval. (boolean value) +#db_inc_retry_interval=true + +# If db_inc_retry_interval is set, the maximum seconds between retries +# of a database operation. (integer value) +#db_max_retry_interval=10 + +# Maximum retries in case of connection error or deadlock error before +# error is raised. Set to -1 to specify an infinite retry count. +# (integer value) +#db_max_retries=20 + +# +# From oslo.db.concurrency +# + +# Enable the experimental use of thread pooling for all DB API calls +# (boolean value) +# Deprecated group/name - [DEFAULT]/dbapi_use_tpool +#use_tpool=false + + +[glance_store] + +# +# From glance.store +# + +# List of stores enabled (list value) +#stores=file,http +stores=rbd +default_store=rbd + +# Default scheme to use to store image data. The scheme must be +# registered by one of the stores defined by the 'stores' config +# option. (string value) +#default_store=file + +# Minimum interval seconds to execute updating dynamic storage +# capabilities based on backend status then. It's not a periodic +# routine, the update logic will be executed only when interval +# seconds elapsed and an operation of store has triggered. The feature +# will be enabled only when the option value greater then zero. +# (integer value) +#store_capabilities_update_min_interval=0 + +# +# From glance.store +# + +# Hostname or IP address of the instance to connect to, or a mongodb +# URI, or a list of hostnames / mongodb URIs. If host is an IPv6 +# literal it must be enclosed in '[' and ']' characters following the +# RFC2732 URL syntax (e.g. '[::1]' for localhost) (string value) +#mongodb_store_uri= + +# Database to use (string value) +#mongodb_store_db= + +# Images will be chunked into objects of this size (in megabytes). For +# best performance, this should be a power of two. (integer value) +#sheepdog_store_chunk_size=64 + +# Port of sheep daemon. (integer value) +#sheepdog_store_port=7000 + +# IP address of sheep daemon. (string value) +#sheepdog_store_address=localhost + +# RADOS images will be chunked into objects of this size (in +# megabytes). For best performance, this should be a power of two. +# (integer value) +rbd_store_chunk_size=8 + +# RADOS pool in which images are stored. (string value) +#rbd_store_pool=images +rbd_store_pool=images + +# RADOS user to authenticate as (only applicable if using Cephx. If +# , a default will be chosen based on the client. section in +# rbd_store_ceph_conf) (string value) +rbd_store_user=glance + +# Ceph configuration file path. If , librados will locate the +# default config. If using cephx authentication, this file should +# include a reference to the right keyring in a client. section +# (string value) +#rbd_store_ceph_conf=/etc/ceph/ceph.conf +rbd_store_ceph_conf=/etc/ceph/ceph.conf + +# Timeout value (in seconds) used when connecting to ceph cluster. If +# value <= 0, no timeout is set and default librados value is used. +# (integer value) +#rados_connect_timeout=0 + +# Directory to which the Filesystem backend store writes images. +# (string value) +#filesystem_store_datadir=/var/lib/glance/images/ + +# List of directories and its priorities to which the Filesystem +# backend store writes images. (multi valued) +#filesystem_store_datadirs = + +# The path to a file which contains the metadata to be returned with +# any location associated with this store. The file must contain a +# valid JSON object. The object should contain the keys 'id' and +# 'mountpoint'. The value for both keys should be 'string'. (string +# value) +#filesystem_store_metadata_file= + +# The required permission for created image file. In this way the user +# other service used, e.g. Nova, who consumes the image could be the +# exclusive member of the group that owns the files created. Assigning +# it less then or equal to zero means don't change the default +# permission of the file. This value will be decoded as an octal +# digit. (integer value) +#filesystem_store_file_perm=0 + +# If True, swiftclient won't check for a valid SSL certificate when +# authenticating. (boolean value) +#swift_store_auth_insecure=false + +# A string giving the CA certificate file to use in SSL connections +# for verifying certs. (string value) +#swift_store_cacert= + +# The region of the swift endpoint to be used for single tenant. This +# setting is only necessary if the tenant has multiple swift +# endpoints. (string value) +#swift_store_region= + +# If set, the configured endpoint will be used. If None, the storage +# url from the auth response will be used. (string value) +#swift_store_endpoint= + +# A string giving the endpoint type of the swift service to use +# (publicURL, adminURL or internalURL). This setting is only used if +# swift_store_auth_version is 2. (string value) +#swift_store_endpoint_type=publicURL + +# A string giving the service type of the swift service to use. This +# setting is only used if swift_store_auth_version is 2. (string +# value) +#swift_store_service_type=object-store + +# Container within the account that the account should use for storing +# images in Swift when using single container mode. In multiple +# container mode, this will be the prefix for all containers. (string +# value) +#swift_store_container=glance + +# The size, in MB, that Glance will start chunking image files and do +# a large object manifest in Swift. (integer value) +#swift_store_large_object_size=5120 + +# The amount of data written to a temporary disk buffer during the +# process of chunking the image file. (integer value) +#swift_store_large_object_chunk_size=200 + +# A boolean value that determines if we create the container if it +# does not exist. (boolean value) +#swift_store_create_container_on_put=false + +# If set to True, enables multi-tenant storage mode which causes +# Glance images to be stored in tenant specific Swift accounts. +# (boolean value) +#swift_store_multi_tenant=false + +# When set to 0, a single-tenant store will only use one container to +# store all images. When set to an integer value between 1 and 32, a +# single-tenant store will use multiple containers to store images, +# and this value will determine how many containers are created.Used +# only when swift_store_multi_tenant is disabled. The total number of +# containers that will be used is equal to 16^N, so if this config +# option is set to 2, then 16^2=256 containers will be used to store +# images. (integer value) +#swift_store_multiple_containers_seed=0 + +# A list of tenants that will be granted read/write access on all +# Swift containers created by Glance in multi-tenant mode. (list +# value) +#swift_store_admin_tenants = + +# If set to False, disables SSL layer compression of https swift +# requests. Setting to False may improve performance for images which +# are already in a compressed format, eg qcow2. (boolean value) +#swift_store_ssl_compression=true + +# The number of times a Swift download will be retried before the +# request fails. (integer value) +#swift_store_retry_get_count=0 + +# The reference to the default swift account/backing store parameters +# to use for adding new images. (string value) +#default_swift_reference=ref1 + +# Version of the authentication service to use. Valid versions are 2 +# and 3 for keystone and 1 (deprecated) for swauth and rackspace. +# (deprecated - use "auth_version" in swift_store_config_file) (string +# value) +#swift_store_auth_version=2 + +# The address where the Swift authentication service is listening. +# (deprecated - use "auth_address" in swift_store_config_file) (string +# value) +#swift_store_auth_address= + +# The user to authenticate against the Swift authentication service +# (deprecated - use "user" in swift_store_config_file) (string value) +#swift_store_user= + +# Auth key for the user authenticating against the Swift +# authentication service. (deprecated - use "key" in +# swift_store_config_file) (string value) +#swift_store_key= + +# The config file that has the swift account(s)configs. (string value) +#swift_store_config_file= + +# ESX/ESXi or vCenter Server target system. The server value can be an +# IP address or a DNS name. (string value) +#vmware_server_host= + +# Username for authenticating with VMware ESX/VC server. (string +# value) +#vmware_server_username= + +# Password for authenticating with VMware ESX/VC server. (string +# value) +#vmware_server_password= + +# DEPRECATED. Inventory path to a datacenter. If the +# vmware_server_host specified is an ESX/ESXi, the +# vmware_datacenter_path is optional. If specified, it should be "ha- +# datacenter". This option is deprecated in favor of vmware_datastores +# and will be removed in the Liberty release. (string value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#vmware_datacenter_path=ha-datacenter + +# DEPRECATED. Datastore associated with the datacenter. This option is +# deprecated in favor of vmware_datastores and will be removed in the +# Liberty release. (string value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#vmware_datastore_name= + +# Number of times VMware ESX/VC server API must be retried upon +# connection related issues. (integer value) +#vmware_api_retry_count=10 + +# The interval used for polling remote tasks invoked on VMware ESX/VC +# server. (integer value) +#vmware_task_poll_interval=5 + +# The name of the directory where the glance images will be stored in +# the VMware datastore. (string value) +#vmware_store_image_dir=/openstack_glance + +# Allow to perform insecure SSL requests to ESX/VC. (boolean value) +#vmware_api_insecure=false + +# A list of datastores where the image can be stored. This option may +# be specified multiple times for specifying multiple datastores. +# Either one of vmware_datastore_name or vmware_datastores is +# required. The datastore name should be specified after its +# datacenter path, separated by ":". An optional weight may be given +# after the datastore name, separated again by ":". Thus, the required +# format becomes ::. +# When adding an image, the datastore with highest weight will be +# selected, unless there is not enough free space available in cases +# where the image size is already known. If no weight is given, it is +# assumed to be zero and the directory will be considered for +# selection last. If multiple datastores have the same weight, then +# the one with the most free space available is selected. (multi +# valued) +#vmware_datastores = + +# The host where the S3 server is listening. (string value) +#s3_store_host= + +# The S3 query token access key. (string value) +#s3_store_access_key= + +# The S3 query token secret key. (string value) +#s3_store_secret_key= + +# The S3 bucket to be used to store the Glance data. (string value) +#s3_store_bucket= + +# The local directory where uploads will be staged before they are +# transferred into S3. (string value) +#s3_store_object_buffer_dir= + +# A boolean to determine if the S3 bucket should be created on upload +# if it does not exist or if an error should be returned to the user. +# (boolean value) +#s3_store_create_bucket_on_put=false + +# The S3 calling format used to determine the bucket. Either subdomain +# or path can be used. (string value) +#s3_store_bucket_url_format=subdomain + +# What size, in MB, should S3 start chunking image files and do a +# multipart upload in S3. (integer value) +#s3_store_large_object_size=100 + +# What multipart upload part size, in MB, should S3 use when uploading +# parts. The size must be greater than or equal to 5M. (integer value) +#s3_store_large_object_chunk_size=10 + +# The number of thread pools to perform a multipart upload in S3. +# (integer value) +#s3_store_thread_pools=10 + +# Enable the use of a proxy. (boolean value) +#s3_store_enable_proxy=false + +# Address or hostname for the proxy server. (string value) +#s3_store_proxy_host= + +# The port to use when connecting over a proxy. (integer value) +#s3_store_proxy_port=8080 + +# The username to connect to the proxy. (string value) +#s3_store_proxy_user= + +# The password to use when connecting over a proxy. (string value) +#s3_store_proxy_password= + +# Info to match when looking for cinder in the service catalog. Format +# is : separated values of the form: +# :: (string value) +#cinder_catalog_info=volume:cinder:publicURL + +# Override service catalog lookup with template for cinder endpoint +# e.g. http://localhost:8776/v1/%(project_id)s (string value) +#cinder_endpoint_template= + +# Region name of this node (string value) +#os_region_name= +os_region_name=RegionOne + +# Location of ca certificates file to use for cinder client requests. +# (string value) +#cinder_ca_certificates_file= + +# Number of cinderclient retries on failed http calls (integer value) +#cinder_http_retries=3 + +# Allow to perform insecure SSL requests to cinder (boolean value) +#cinder_api_insecure=false + + +[image_format] + +# +# From glance.api +# + +# Supported values for the 'container_format' image attribute (list +# value) +# Deprecated group/name - [DEFAULT]/container_formats +#container_formats=ami,ari,aki,bare,ovf,ova + +# Supported values for the 'disk_format' image attribute (list value) +# Deprecated group/name - [DEFAULT]/disk_formats +#disk_formats=ami,ari,aki,vhd,vmdk,raw,qcow2,vdi,iso + + +[keystone_authtoken] + +# +# From keystonemiddleware.auth_token +# + +# Complete public Identity API endpoint. (string value) +#auth_uri= +auth_uri=http://VARINET4ADDR:5000/v2.0 + +# API version of the admin Identity API endpoint. (string value) +#auth_version= + +# Do not handle authorization requests within the middleware, but +# delegate the authorization decision to downstream WSGI components. +# (boolean value) +#delay_auth_decision=false + +# Request timeout value for communicating with Identity API server. +# (integer value) +#http_connect_timeout= + +# How many times are we trying to reconnect when communicating with +# Identity API Server. (integer value) +#http_request_max_retries=3 + +# Env key for the swift cache. (string value) +#cache= + +# Required if identity server requires client certificate (string +# value) +#certfile= + +# Required if identity server requires client certificate (string +# value) +#keyfile= + +# A PEM encoded Certificate Authority to use when verifying HTTPs +# connections. Defaults to system CAs. (string value) +#cafile= + +# Verify HTTPS connections. (boolean value) +#insecure=false + +# The region in which the identity server can be found. (string value) +#region_name= + +# Directory used to cache files related to PKI tokens. (string value) +#signing_dir= + +# Optionally specify a list of memcached server(s) to use for caching. +# If left undefined, tokens will instead be cached in-process. (list +# value) +# Deprecated group/name - [DEFAULT]/memcache_servers +#memcached_servers= + +# In order to prevent excessive effort spent validating tokens, the +# middleware caches previously-seen tokens for a configurable duration +# (in seconds). Set to -1 to disable caching completely. (integer +# value) +#token_cache_time=300 + +# Determines the frequency at which the list of revoked tokens is +# retrieved from the Identity service (in seconds). A high number of +# revocation events combined with a low cache duration may +# significantly reduce performance. (integer value) +#revocation_cache_time=10 + +# (Optional) If defined, indicate whether token data should be +# authenticated or authenticated and encrypted. Acceptable values are +# MAC or ENCRYPT. If MAC, token data is authenticated (with HMAC) in +# the cache. If ENCRYPT, token data is encrypted and authenticated in +# the cache. If the value is not one of these options or empty, +# auth_token will raise an exception on initialization. (string value) +#memcache_security_strategy= + +# (Optional, mandatory if memcache_security_strategy is defined) This +# string is used for key derivation. (string value) +#memcache_secret_key= + +# (Optional) Number of seconds memcached server is considered dead +# before it is tried again. (integer value) +#memcache_pool_dead_retry=300 + +# (Optional) Maximum total number of open connections to every +# memcached server. (integer value) +#memcache_pool_maxsize=10 + +# (Optional) Socket timeout in seconds for communicating with a +# memcached server. (integer value) +#memcache_pool_socket_timeout=3 + +# (Optional) Number of seconds a connection to memcached is held +# unused in the pool before it is closed. (integer value) +#memcache_pool_unused_timeout=60 + +# (Optional) Number of seconds that an operation will wait to get a +# memcached client connection from the pool. (integer value) +#memcache_pool_conn_get_timeout=10 + +# (Optional) Use the advanced (eventlet safe) memcached client pool. +# The advanced pool will only work under python 2.x. (boolean value) +#memcache_use_advanced_pool=false + +# (Optional) Indicate whether to set the X-Service-Catalog header. If +# False, middleware will not ask for service catalog on token +# validation and will not set the X-Service-Catalog header. (boolean +# value) +#include_service_catalog=true + +# Used to control the use and type of token binding. Can be set to: +# "disabled" to not check token binding. "permissive" (default) to +# validate binding information if the bind type is of a form known to +# the server and ignore it if not. "strict" like "permissive" but if +# the bind type is unknown the token will be rejected. "required" any +# form of token binding is needed to be allowed. Finally the name of a +# binding method that must be present in tokens. (string value) +#enforce_token_bind=permissive + +# If true, the revocation list will be checked for cached tokens. This +# requires that PKI tokens are configured on the identity server. +# (boolean value) +#check_revocations_for_cached=false + +# Hash algorithms to use for hashing PKI tokens. This may be a single +# algorithm or multiple. The algorithms are those supported by Python +# standard hashlib.new(). The hashes will be tried in the order given, +# so put the preferred one first for performance. The result of the +# first hash will be stored in the cache. This will typically be set +# to multiple values only while migrating from a less secure algorithm +# to a more secure one. Once all the old tokens are expired this +# option should be set to a single value for better performance. (list +# value) +#hash_algorithms=md5 + +# Prefix to prepend at the beginning of the path. Deprecated, use +# identity_uri. (string value) +#auth_admin_prefix = + +# Host providing the admin Identity API endpoint. Deprecated, use +# identity_uri. (string value) +#auth_host=127.0.0.1 + +# Port of the admin Identity API endpoint. Deprecated, use +# identity_uri. (integer value) +#auth_port=35357 + +# Protocol of the admin Identity API endpoint (http or https). +# Deprecated, use identity_uri. (string value) +#auth_protocol=http + +# Complete admin Identity API endpoint. This should specify the +# unversioned root endpoint e.g. https://localhost:35357/ (string +# value) +#identity_uri= +identity_uri=http://VARINET4ADDR:35357 + +# This option is deprecated and may be removed in a future release. +# Single shared secret with the Keystone configuration used for +# bootstrapping a Keystone installation, or otherwise bypassing the +# normal authentication process. This option should not be used, use +# `admin_user` and `admin_password` instead. (string value) +#admin_token= + +# Service username. (string value) +#admin_user= +admin_user=glance + +# Service user password. (string value) +#admin_password= +admin_password=qum5net + +# Service tenant name. (string value) +#admin_tenant_name=admin +admin_tenant_name=services + + +[matchmaker_redis] + +# +# From oslo.messaging +# + +# Host to locate redis. (string value) +#host=127.0.0.1 + +# Use this port to connect to redis host. (integer value) +#port=6379 + +# Password for Redis server (optional). (string value) +#password= + + +[matchmaker_ring] + +# +# From oslo.messaging +# + +# Matchmaker ring file (JSON). (string value) +# Deprecated group/name - [DEFAULT]/matchmaker_ringfile +#ringfile=/etc/oslo/matchmaker_ring.json + + +[oslo_concurrency] + +# +# From oslo.concurrency +# + +# Enables or disables inter-process locks. (boolean value) +# Deprecated group/name - [DEFAULT]/disable_process_locking +#disable_process_locking=false + +# Directory to use for lock files. For security, the specified +# directory should only be writable by the user running the processes +# that need locking. Defaults to environment variable OSLO_LOCK_PATH. +# If external locks are used, a lock path must be set. (string value) +# Deprecated group/name - [DEFAULT]/lock_path +#lock_path= + + +[oslo_messaging_amqp] + +# +# From oslo.messaging +# + +# address prefix used when sending to a specific server (string value) +# Deprecated group/name - [amqp1]/server_request_prefix +#server_request_prefix=exclusive + +# address prefix used when broadcasting to all servers (string value) +# Deprecated group/name - [amqp1]/broadcast_prefix +#broadcast_prefix=broadcast + +# address prefix when sending to any server in group (string value) +# Deprecated group/name - [amqp1]/group_request_prefix +#group_request_prefix=unicast + +# Name for the AMQP container (string value) +# Deprecated group/name - [amqp1]/container_name +#container_name= + +# Timeout for inactive connections (in seconds) (integer value) +# Deprecated group/name - [amqp1]/idle_timeout +#idle_timeout=0 + +# Debug: dump AMQP frames to stdout (boolean value) +# Deprecated group/name - [amqp1]/trace +#trace=false + +# CA certificate PEM file to verify server certificate (string value) +# Deprecated group/name - [amqp1]/ssl_ca_file +#ssl_ca_file = + +# Identifying certificate PEM file to present to clients (string +# value) +# Deprecated group/name - [amqp1]/ssl_cert_file +#ssl_cert_file = + +# Private key PEM file used to sign cert_file certificate (string +# value) +# Deprecated group/name - [amqp1]/ssl_key_file +#ssl_key_file = + +# Password for decrypting ssl_key_file (if encrypted) (string value) +# Deprecated group/name - [amqp1]/ssl_key_password +#ssl_key_password= + +# Accept clients using either SSL or plain TCP (boolean value) +# Deprecated group/name - [amqp1]/allow_insecure_clients +#allow_insecure_clients=false + + +[oslo_messaging_qpid] + +# +# From oslo.messaging +# + +# Use durable queues in AMQP. (boolean value) +# Deprecated group/name - [DEFAULT]/amqp_durable_queues +# Deprecated group/name - [DEFAULT]/rabbit_durable_queues +#amqp_durable_queues=false + +# Auto-delete queues in AMQP. (boolean value) +# Deprecated group/name - [DEFAULT]/amqp_auto_delete +#amqp_auto_delete=false + +# Send a single AMQP reply to call message. The current behaviour +# since oslo-incubator is to send two AMQP replies - first one with +# the payload, a second one to ensure the other have finish to send +# the payload. We are going to remove it in the N release, but we must +# keep backward compatible at the same time. This option provides such +# compatibility - it defaults to False in Liberty and can be turned on +# for early adopters with a new installations or for testing. Please +# note, that this option will be removed in the Mitaka release. +# (boolean value) +#send_single_reply=false + +# Qpid broker hostname. (string value) +# Deprecated group/name - [DEFAULT]/qpid_hostname +#qpid_hostname=localhost + +# Qpid broker port. (integer value) +# Deprecated group/name - [DEFAULT]/qpid_port +#qpid_port=5672 + +# Qpid HA cluster host:port pairs. (list value) +# Deprecated group/name - [DEFAULT]/qpid_hosts +#qpid_hosts=$qpid_hostname:$qpid_port + +# Username for Qpid connection. (string value) +# Deprecated group/name - [DEFAULT]/qpid_username +#qpid_username = + +# Password for Qpid connection. (string value) +# Deprecated group/name - [DEFAULT]/qpid_password +#qpid_password = + +# Space separated list of SASL mechanisms to use for auth. (string +# value) +# Deprecated group/name - [DEFAULT]/qpid_sasl_mechanisms +#qpid_sasl_mechanisms = + +# Seconds between connection keepalive heartbeats. (integer value) +# Deprecated group/name - [DEFAULT]/qpid_heartbeat +#qpid_heartbeat=60 + +# Transport to use, either 'tcp' or 'ssl'. (string value) +# Deprecated group/name - [DEFAULT]/qpid_protocol +#qpid_protocol=tcp + +# Whether to disable the Nagle algorithm. (boolean value) +# Deprecated group/name - [DEFAULT]/qpid_tcp_nodelay +#qpid_tcp_nodelay=true + +# The number of prefetched messages held by receiver. (integer value) +# Deprecated group/name - [DEFAULT]/qpid_receiver_capacity +#qpid_receiver_capacity=1 + +# The qpid topology version to use. Version 1 is what was originally +# used by impl_qpid. Version 2 includes some backwards-incompatible +# changes that allow broker federation to work. Users should update +# to version 2 when they are able to take everything down, as it +# requires a clean break. (integer value) +# Deprecated group/name - [DEFAULT]/qpid_topology_version +#qpid_topology_version=1 + + +[oslo_messaging_rabbit] + +# +# From oslo.messaging +# + +# Use durable queues in AMQP. (boolean value) +# Deprecated group/name - [DEFAULT]/amqp_durable_queues +# Deprecated group/name - [DEFAULT]/rabbit_durable_queues +#amqp_durable_queues=false +amqp_durable_queues=False + +# Auto-delete queues in AMQP. (boolean value) +# Deprecated group/name - [DEFAULT]/amqp_auto_delete +#amqp_auto_delete=false + +# Send a single AMQP reply to call message. The current behaviour +# since oslo-incubator is to send two AMQP replies - first one with +# the payload, a second one to ensure the other have finish to send +# the payload. We are going to remove it in the N release, but we must +# keep backward compatible at the same time. This option provides such +# compatibility - it defaults to False in Liberty and can be turned on +# for early adopters with a new installations or for testing. Please +# note, that this option will be removed in the Mitaka release. +# (boolean value) +#send_single_reply=false + +# SSL version to use (valid only if SSL enabled). Valid values are +# TLSv1 and SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be +# available on some distributions. (string value) +# Deprecated group/name - [DEFAULT]/kombu_ssl_version +#kombu_ssl_version = + +# SSL key file (valid only if SSL enabled). (string value) +# Deprecated group/name - [DEFAULT]/kombu_ssl_keyfile +#kombu_ssl_keyfile = + +# SSL cert file (valid only if SSL enabled). (string value) +# Deprecated group/name - [DEFAULT]/kombu_ssl_certfile +#kombu_ssl_certfile = + +# SSL certification authority file (valid only if SSL enabled). +# (string value) +# Deprecated group/name - [DEFAULT]/kombu_ssl_ca_certs +#kombu_ssl_ca_certs = + +# How long to wait before reconnecting in response to an AMQP consumer +# cancel notification. (floating point value) +# Deprecated group/name - [DEFAULT]/kombu_reconnect_delay +#kombu_reconnect_delay=1.0 + +# How long to wait before considering a reconnect attempt to have +# failed. This value should not be longer than rpc_response_timeout. +# (integer value) +#kombu_reconnect_timeout=60 + +# The RabbitMQ broker address where a single node is used. (string +# value) +# Deprecated group/name - [DEFAULT]/rabbit_host +#rabbit_host=localhost +rabbit_host=VARINET4ADDR + +# The RabbitMQ broker port where a single node is used. (integer +# value) +# Deprecated group/name - [DEFAULT]/rabbit_port +#rabbit_port=5672 +rabbit_port=5672 + +# RabbitMQ HA cluster host:port pairs. (list value) +# Deprecated group/name - [DEFAULT]/rabbit_hosts +#rabbit_hosts=$rabbit_host:$rabbit_port +rabbit_hosts=VARINET4ADDR:5672 + +# Connect over SSL for RabbitMQ. (boolean value) +# Deprecated group/name - [DEFAULT]/rabbit_use_ssl +#rabbit_use_ssl=false +rabbit_use_ssl=False + +# The RabbitMQ userid. (string value) +# Deprecated group/name - [DEFAULT]/rabbit_userid +#rabbit_userid=guest +rabbit_userid=guest + +# The RabbitMQ password. (string value) +# Deprecated group/name - [DEFAULT]/rabbit_password +#rabbit_password=guest +rabbit_password=guest + +# The RabbitMQ login method. (string value) +# Deprecated group/name - [DEFAULT]/rabbit_login_method +#rabbit_login_method=AMQPLAIN + +# The RabbitMQ virtual host. (string value) +# Deprecated group/name - [DEFAULT]/rabbit_virtual_host +#rabbit_virtual_host=/ +rabbit_virtual_host=/ + +# How frequently to retry connecting with RabbitMQ. (integer value) +#rabbit_retry_interval=1 + +# How long to backoff for between retries when connecting to RabbitMQ. +# (integer value) +# Deprecated group/name - [DEFAULT]/rabbit_retry_backoff +#rabbit_retry_backoff=2 + +# Maximum number of RabbitMQ connection retries. Default is 0 +# (infinite retry count). (integer value) +# Deprecated group/name - [DEFAULT]/rabbit_max_retries +#rabbit_max_retries=0 + +# Use HA queues in RabbitMQ (x-ha-policy: all). If you change this +# option, you must wipe the RabbitMQ database. (boolean value) +# Deprecated group/name - [DEFAULT]/rabbit_ha_queues +#rabbit_ha_queues=false +rabbit_ha_queues=False + +# Number of seconds after which the Rabbit broker is considered down +# if heartbeat's keep-alive fails (0 disable the heartbeat). +# EXPERIMENTAL (integer value) +#heartbeat_timeout_threshold=60 +heartbeat_timeout_threshold=0 + +# How often times during the heartbeat_timeout_threshold we check the +# heartbeat. (integer value) +#heartbeat_rate=2 +heartbeat_rate=2 + +# Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake +# (boolean value) +# Deprecated group/name - [DEFAULT]/fake_rabbit +#fake_rabbit=false +rabbit_notification_exchange=glance +rabbit_notification_topic=notifications + + +[oslo_policy] + +# +# From oslo.policy +# + +# The JSON file that defines policies. (string value) +# Deprecated group/name - [DEFAULT]/policy_file +#policy_file=policy.json + +# Default rule. Enforced when a requested rule is not found. (string +# value) +# Deprecated group/name - [DEFAULT]/policy_default_rule +#policy_default_rule=default + +# Directories where policy configuration files are stored. They can be +# relative to any directory in the search path defined by the +# config_dir option, or absolute paths. The file defined by +# policy_file must exist for these directories to be searched. +# Missing or empty directories are ignored. (multi valued) +# Deprecated group/name - [DEFAULT]/policy_dirs +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#policy_dirs=policy.d + + +[paste_deploy] + +# +# From glance.api +# + +# Partial name of a pipeline in your paste configuration file with the +# service name removed. For example, if your paste section name is +# [pipeline:glance-api-keystone] use the value "keystone" (string +# value) +#flavor= +flavor=keystone + +# Name of the paste configuration file. (string value) +#config_file=/usr/share/glance/glance-api-dist-paste.ini + + +[store_type_location_strategy] + +# +# From glance.api +# + +# The store names to use to get store preference order. The name must +# be registered by one of the stores defined by the 'stores' config +# option. This option will be applied when you using 'store_type' +# option as image location strategy defined by the 'location_strategy' +# config option. (list value) +#store_type_preference = + + +[task] + +# +# From glance.api +# + +# Time in hours for which a task lives after, either succeeding or +# failing (integer value) +# Deprecated group/name - [DEFAULT]/task_time_to_live +#task_time_to_live=48 + +# Specifies which task executor to be used to run the task scripts. +# (string value) +#task_executor=taskflow + +# Work dir for asynchronous task operations. The directory set here +# will be used to operate over images - normally before they are +# imported in the destination store. When providing work dir, make +# sure enough space is provided for concurrent tasks to run +# efficiently without running out of space. A rough estimation can be +# done by multiplying the number of `max_workers` - or the N of +# workers running - by an average image size (e.g 500MB). The image +# size estimation should be done based on the average size in your +# deployment. Note that depending on the tasks running you may need to +# multiply this number by some factor depending on what the task does. +# For example, you may want to double the available size if image +# conversion is enabled. All this being said, remember these are just +# estimations and you should do them based on the worst case scenario +# and be prepared to act in case they were wrong. (string value) +#work_dir= + + +[taskflow_executor] + +# +# From glance.api +# + +# The mode in which the engine will run. Can be 'serial' or +# 'parallel'. (string value) +# Allowed values: serial, parallel +#engine_mode=parallel + +# The number of parallel activities executed at the same time by the +# engine. The value can be greater than one when the engine mode is +# 'parallel'. (integer value) +# Deprecated group/name - [task]/eventlet_executor_pool_size +#max_workers=10 diff --git a/qa/qa_scripts/openstack/files/kilo.template.conf b/qa/qa_scripts/openstack/files/kilo.template.conf new file mode 100644 index 00000000..35d359c8 --- /dev/null +++ b/qa/qa_scripts/openstack/files/kilo.template.conf @@ -0,0 +1,1077 @@ +[general] + +# Path to a public key to install on servers. If a usable key has not +# been installed on the remote servers, the user is prompted for a +# password and this key is installed so the password will not be +# required again. +CONFIG_SSH_KEY=/root/.ssh/id_rsa.pub + +# Default password to be used everywhere (overridden by passwords set +# for individual services or users). +CONFIG_DEFAULT_PASSWORD= + +# Specify 'y' to install MariaDB. ['y', 'n'] +CONFIG_MARIADB_INSTALL=y + +# Specify 'y' to install OpenStack Image Service (glance). ['y', 'n'] +CONFIG_GLANCE_INSTALL=y + +# Specify 'y' to install OpenStack Block Storage (cinder). ['y', 'n'] +CONFIG_CINDER_INSTALL=y + +# Specify 'y' to install OpenStack Compute (nova). ['y', 'n'] +CONFIG_NOVA_INSTALL=y + +# Specify 'y' to install OpenStack Networking (neutron); otherwise, +# Compute Networking (nova) will be used. ['y', 'n'] +CONFIG_NEUTRON_INSTALL=y + +# Specify 'y' to install OpenStack Dashboard (horizon). ['y', 'n'] +CONFIG_HORIZON_INSTALL=y + +# Specify 'y' to install OpenStack Object Storage (swift). ['y', 'n'] +CONFIG_SWIFT_INSTALL=y + +# Specify 'y' to install OpenStack Metering (ceilometer). ['y', 'n'] +CONFIG_CEILOMETER_INSTALL=y + +# Specify 'y' to install OpenStack Data Processing (sahara). In case +# of sahara installation packstack also installs heat.['y', 'n'] +CONFIG_SAHARA_INSTALL=n + +# Specify 'y' to install OpenStack Orchestration (heat). ['y', 'n'] +CONFIG_HEAT_INSTALL=n + +# Specify 'y' to install OpenStack Database (trove) ['y', 'n'] +CONFIG_TROVE_INSTALL=n + +# Specify 'y' to install OpenStack Bare Metal Provisioning (ironic). +# ['y', 'n'] +CONFIG_IRONIC_INSTALL=n + +# Specify 'y' to install the OpenStack Client packages (command-line +# tools). An admin "rc" file will also be installed. ['y', 'n'] +CONFIG_CLIENT_INSTALL=y + +# Comma-separated list of NTP servers. Leave plain if Packstack +# should not install ntpd on instances. +CONFIG_NTP_SERVERS=clock.redhat.com + +# Specify 'y' to install Nagios to monitor OpenStack hosts. Nagios +# provides additional tools for monitoring the OpenStack environment. +# ['n'] +CONFIG_NAGIOS_INSTALL=n + +# Comma-separated list of servers to be excluded from the +# installation. This is helpful if you are running Packstack a second +# time with the same answer file and do not want Packstack to +# overwrite these server's configurations. Leave empty if you do not +# need to exclude any servers. +EXCLUDE_SERVERS= + +# Specify 'y' if you want to run OpenStack services in debug mode; +# otherwise, specify 'n'. ['y', 'n'] +CONFIG_DEBUG_MODE=y + +# Server on which to install OpenStack services specific to the +# controller role (for example, API servers or dashboard). +CONFIG_CONTROLLER_HOST=VARINET4ADDR + +# List the servers on which to install the Compute service. +CONFIG_COMPUTE_HOSTS=VARINET4ADDR + +# List of servers on which to install the network service such as +# Compute networking (nova network) or OpenStack Networking (neutron). +CONFIG_NETWORK_HOSTS=VARINET4ADDR + +# Specify 'y' if you want to use VMware vCenter as hypervisor and +# storage; otherwise, specify 'n'. ['y', 'n'] +CONFIG_VMWARE_BACKEND=n + +# Specify 'y' if you want to use unsupported parameters. This should +# be used only if you know what you are doing. Issues caused by using +# unsupported options will not be fixed before the next major release. +# ['y', 'n'] +CONFIG_UNSUPPORTED=n + +# Specify 'y' if you want to use subnet addresses (in CIDR format) +# instead of interface names in following options: +# CONFIG_NOVA_COMPUTE_PRIVIF, CONFIG_NOVA_NETWORK_PRIVIF, +# CONFIG_NOVA_NETWORK_PUBIF, CONFIG_NEUTRON_OVS_BRIDGE_IFACES, +# CONFIG_NEUTRON_LB_INTERFACE_MAPPINGS, CONFIG_NEUTRON_OVS_TUNNEL_IF. +# This is useful for cases when interface names are not same on all +# installation hosts. +CONFIG_USE_SUBNETS=n + +# IP address of the VMware vCenter server. +CONFIG_VCENTER_HOST= + +# User name for VMware vCenter server authentication. +CONFIG_VCENTER_USER= + +# Password for VMware vCenter server authentication. +CONFIG_VCENTER_PASSWORD= + +# Comma separated list of names of the VMware vCenter clusters. Note: +# if multiple clusters are specified each one is mapped to one +# compute, otherwise all computes are mapped to same cluster. +CONFIG_VCENTER_CLUSTER_NAMES= + +# (Unsupported!) Server on which to install OpenStack services +# specific to storage servers such as Image or Block Storage services. +CONFIG_STORAGE_HOST=VARINET4ADDR + +# (Unsupported!) Server on which to install OpenStack services +# specific to OpenStack Data Processing (sahara). +CONFIG_SAHARA_HOST=VARINET4ADDR + +# Specify 'y' to enable the EPEL repository (Extra Packages for +# Enterprise Linux). ['y', 'n'] +CONFIG_USE_EPEL=n + +# Comma-separated list of URLs for any additional yum repositories, +# to use for installation. +CONFIG_REPO= + +# Specify 'y' to enable the RDO testing repository. ['y', 'n'] +CONFIG_ENABLE_RDO_TESTING=n + +# To subscribe each server with Red Hat Subscription Manager, include +# this with CONFIG_RH_PW. +CONFIG_RH_USER= + +# To subscribe each server to receive updates from a Satellite +# server, provide the URL of the Satellite server. You must also +# provide a user name (CONFIG_SATELLITE_USERNAME) and password +# (CONFIG_SATELLITE_PASSWORD) or an access key (CONFIG_SATELLITE_AKEY) +# for authentication. +CONFIG_SATELLITE_URL= + +# To subscribe each server with Red Hat Subscription Manager, include +# this with CONFIG_RH_USER. +CONFIG_RH_PW= + +# Specify 'y' to enable RHEL optional repositories. ['y', 'n'] +CONFIG_RH_OPTIONAL=y + +# HTTP proxy to use with Red Hat Subscription Manager. +CONFIG_RH_PROXY= + +# Port to use for Red Hat Subscription Manager's HTTP proxy. +CONFIG_RH_PROXY_PORT= + +# User name to use for Red Hat Subscription Manager's HTTP proxy. +CONFIG_RH_PROXY_USER= + +# Password to use for Red Hat Subscription Manager's HTTP proxy. +CONFIG_RH_PROXY_PW= + +# User name to authenticate with the RHN Satellite server; if you +# intend to use an access key for Satellite authentication, leave this +# blank. +CONFIG_SATELLITE_USER= + +# Password to authenticate with the RHN Satellite server; if you +# intend to use an access key for Satellite authentication, leave this +# blank. +CONFIG_SATELLITE_PW= + +# Access key for the Satellite server; if you intend to use a user +# name and password for Satellite authentication, leave this blank. +CONFIG_SATELLITE_AKEY= + +# Certificate path or URL of the certificate authority to verify that +# the connection with the Satellite server is secure. If you are not +# using Satellite in your deployment, leave this blank. +CONFIG_SATELLITE_CACERT= + +# Profile name that should be used as an identifier for the system in +# RHN Satellite (if required). +CONFIG_SATELLITE_PROFILE= + +# Comma-separated list of flags passed to the rhnreg_ks command. +# Valid flags are: novirtinfo, norhnsd, nopackages ['novirtinfo', +# 'norhnsd', 'nopackages'] +CONFIG_SATELLITE_FLAGS= + +# HTTP proxy to use when connecting to the RHN Satellite server (if +# required). +CONFIG_SATELLITE_PROXY= + +# User name to authenticate with the Satellite-server HTTP proxy. +CONFIG_SATELLITE_PROXY_USER= + +# User password to authenticate with the Satellite-server HTTP proxy. +CONFIG_SATELLITE_PROXY_PW= + +# Specify filepath for CA cert file. If CONFIG_SSL_CACERT_SELFSIGN is +# set to 'n' it has to be preexisting file. +CONFIG_SSL_CACERT_FILE=/etc/pki/tls/certs/selfcert.crt + +# Specify filepath for CA cert key file. If +# CONFIG_SSL_CACERT_SELFSIGN is set to 'n' it has to be preexisting +# file. +CONFIG_SSL_CACERT_KEY_FILE=/etc/pki/tls/private/selfkey.key + +# Enter the path to use to store generated SSL certificates in. +CONFIG_SSL_CERT_DIR=~/packstackca/ + +# Specify 'y' if you want Packstack to pregenerate the CA +# Certificate. +CONFIG_SSL_CACERT_SELFSIGN=y + +# Enter the selfsigned CAcert subject country. +CONFIG_SELFSIGN_CACERT_SUBJECT_C=-- + +# Enter the selfsigned CAcert subject state. +CONFIG_SELFSIGN_CACERT_SUBJECT_ST=State + +# Enter the selfsigned CAcert subject location. +CONFIG_SELFSIGN_CACERT_SUBJECT_L=City + +# Enter the selfsigned CAcert subject organization. +CONFIG_SELFSIGN_CACERT_SUBJECT_O=openstack + +# Enter the selfsigned CAcert subject organizational unit. +CONFIG_SELFSIGN_CACERT_SUBJECT_OU=packstack + +# Enter the selfsigned CAcert subject common name. +CONFIG_SELFSIGN_CACERT_SUBJECT_CN=VARHOSTNAME + +CONFIG_SELFSIGN_CACERT_SUBJECT_MAIL=admin@VARHOSTNAME + +# Service to be used as the AMQP broker. Allowed values are: qpid, +# rabbitmq ['qpid', 'rabbitmq'] +CONFIG_AMQP_BACKEND=rabbitmq + +# IP address of the server on which to install the AMQP service. +CONFIG_AMQP_HOST=VARINET4ADDR + +# Specify 'y' to enable SSL for the AMQP service. ['y', 'n'] +CONFIG_AMQP_ENABLE_SSL=n + +# Specify 'y' to enable authentication for the AMQP service. ['y', +# 'n'] +CONFIG_AMQP_ENABLE_AUTH=n + +# Password for the NSS certificate database of the AMQP service. +CONFIG_AMQP_NSS_CERTDB_PW=PW_PLACEHOLDER + +# User for AMQP authentication. +CONFIG_AMQP_AUTH_USER=amqp_user + +# Password for AMQP authentication. +CONFIG_AMQP_AUTH_PASSWORD=PW_PLACEHOLDER + +# IP address of the server on which to install MariaDB. If a MariaDB +# installation was not specified in CONFIG_MARIADB_INSTALL, specify +# the IP address of an existing database server (a MariaDB cluster can +# also be specified). +CONFIG_MARIADB_HOST=VARINET4ADDR + +# User name for the MariaDB administrative user. +CONFIG_MARIADB_USER=root + +# Password for the MariaDB administrative user. +CONFIG_MARIADB_PW=qum5net + +# Password to use for the Identity service (keystone) to access the +# database. +CONFIG_KEYSTONE_DB_PW=qum5net + +# Enter y if cron job for removing soft deleted DB rows should be +# created. +CONFIG_KEYSTONE_DB_PURGE_ENABLE=True + +# Default region name to use when creating tenants in the Identity +# service. +CONFIG_KEYSTONE_REGION=RegionOne + +# Token to use for the Identity service API. +CONFIG_KEYSTONE_ADMIN_TOKEN=9390caff845749c3ac74453eb4f384e2 + +# Email address for the Identity service 'admin' user. Defaults to +CONFIG_KEYSTONE_ADMIN_EMAIL=root@localhost + +# User name for the Identity service 'admin' user. Defaults to +# 'admin'. +CONFIG_KEYSTONE_ADMIN_USERNAME=admin + +# Password to use for the Identity service 'admin' user. +CONFIG_KEYSTONE_ADMIN_PW=qum5net + +# Password to use for the Identity service 'demo' user. +CONFIG_KEYSTONE_DEMO_PW=qum5net + +# Identity service API version string. ['v2.0', 'v3'] +CONFIG_KEYSTONE_API_VERSION=v2.0 + +# Identity service token format (UUID or PKI). The recommended format +# for new deployments is UUID. ['UUID', 'PKI'] +CONFIG_KEYSTONE_TOKEN_FORMAT=UUID + +# Name of service to use to run the Identity service (keystone or +# httpd). ['keystone', 'httpd'] +CONFIG_KEYSTONE_SERVICE_NAME=httpd + +# Type of Identity service backend (sql or ldap). ['sql', 'ldap'] +CONFIG_KEYSTONE_IDENTITY_BACKEND=sql + +# URL for the Identity service LDAP backend. +CONFIG_KEYSTONE_LDAP_URL=ldap://VARINET4ADDR + +# User DN for the Identity service LDAP backend. Used to bind to the +# LDAP server if the LDAP server does not allow anonymous +# authentication. +CONFIG_KEYSTONE_LDAP_USER_DN= + +# User DN password for the Identity service LDAP backend. +CONFIG_KEYSTONE_LDAP_USER_PASSWORD= + +# Base suffix for the Identity service LDAP backend. +CONFIG_KEYSTONE_LDAP_SUFFIX= + +# Query scope for the Identity service LDAP backend. Use 'one' for +# onelevel/singleLevel or 'sub' for subtree/wholeSubtree ('base' is +# not actually used by the Identity service and is therefore +# deprecated). ['base', 'one', 'sub'] +CONFIG_KEYSTONE_LDAP_QUERY_SCOPE=one + +# Query page size for the Identity service LDAP backend. +CONFIG_KEYSTONE_LDAP_PAGE_SIZE=-1 + +# User subtree for the Identity service LDAP backend. +CONFIG_KEYSTONE_LDAP_USER_SUBTREE= + +# User query filter for the Identity service LDAP backend. +CONFIG_KEYSTONE_LDAP_USER_FILTER= + +# User object class for the Identity service LDAP backend. +CONFIG_KEYSTONE_LDAP_USER_OBJECTCLASS= + +# User ID attribute for the Identity service LDAP backend. +CONFIG_KEYSTONE_LDAP_USER_ID_ATTRIBUTE= + +# User name attribute for the Identity service LDAP backend. +CONFIG_KEYSTONE_LDAP_USER_NAME_ATTRIBUTE= + +# User email address attribute for the Identity service LDAP backend. +CONFIG_KEYSTONE_LDAP_USER_MAIL_ATTRIBUTE= + +# User-enabled attribute for the Identity service LDAP backend. +CONFIG_KEYSTONE_LDAP_USER_ENABLED_ATTRIBUTE= + +# Bit mask integer applied to user-enabled attribute for the Identity +# service LDAP backend. Indicate the bit that the enabled value is +# stored in if the LDAP server represents "enabled" as a bit on an +# integer rather than a boolean. A value of "0" indicates the mask is +# not used (default). If this is not set to "0", the typical value is +# "2", typically used when +# "CONFIG_KEYSTONE_LDAP_USER_ENABLED_ATTRIBUTE = userAccountControl". +CONFIG_KEYSTONE_LDAP_USER_ENABLED_MASK=-1 + +# Value of enabled attribute which indicates user is enabled for the +# Identity service LDAP backend. This should match an appropriate +# integer value if the LDAP server uses non-boolean (bitmask) values +# to indicate whether a user is enabled or disabled. If this is not +# set as 'y', the typical value is "512". This is typically used when +# "CONFIG_KEYSTONE_LDAP_USER_ENABLED_ATTRIBUTE = userAccountControl". +CONFIG_KEYSTONE_LDAP_USER_ENABLED_DEFAULT=TRUE + +# Specify 'y' if users are disabled (not enabled) in the Identity +# service LDAP backend (inverts boolean-enalbed values). Some LDAP +# servers use a boolean lock attribute where "y" means an account is +# disabled. Setting this to 'y' allows these lock attributes to be +# used. This setting will have no effect if +# "CONFIG_KEYSTONE_LDAP_USER_ENABLED_MASK" is in use. ['n', 'y'] +CONFIG_KEYSTONE_LDAP_USER_ENABLED_INVERT=n + +# Comma-separated list of attributes stripped from LDAP user entry +# upon update. +CONFIG_KEYSTONE_LDAP_USER_ATTRIBUTE_IGNORE= + +# Identity service LDAP attribute mapped to default_project_id for +# users. +CONFIG_KEYSTONE_LDAP_USER_DEFAULT_PROJECT_ID_ATTRIBUTE= + +# Specify 'y' if you want to be able to create Identity service users +# through the Identity service interface; specify 'n' if you will +# create directly in the LDAP backend. ['n', 'y'] +CONFIG_KEYSTONE_LDAP_USER_ALLOW_CREATE=n + +# Specify 'y' if you want to be able to update Identity service users +# through the Identity service interface; specify 'n' if you will +# update directly in the LDAP backend. ['n', 'y'] +CONFIG_KEYSTONE_LDAP_USER_ALLOW_UPDATE=n + +# Specify 'y' if you want to be able to delete Identity service users +# through the Identity service interface; specify 'n' if you will +# delete directly in the LDAP backend. ['n', 'y'] +CONFIG_KEYSTONE_LDAP_USER_ALLOW_DELETE=n + +# Identity service LDAP attribute mapped to password. +CONFIG_KEYSTONE_LDAP_USER_PASS_ATTRIBUTE= + +# DN of the group entry to hold enabled LDAP users when using enabled +# emulation. +CONFIG_KEYSTONE_LDAP_USER_ENABLED_EMULATION_DN= + +# List of additional LDAP attributes for mapping additional attribute +# mappings for users. The attribute-mapping format is +# :, where ldap_attr is the attribute in the +# LDAP entry and user_attr is the Identity API attribute. +CONFIG_KEYSTONE_LDAP_USER_ADDITIONAL_ATTRIBUTE_MAPPING= + +# Group subtree for the Identity service LDAP backend. +CONFIG_KEYSTONE_LDAP_GROUP_SUBTREE= + +# Group query filter for the Identity service LDAP backend. +CONFIG_KEYSTONE_LDAP_GROUP_FILTER= + +# Group object class for the Identity service LDAP backend. +CONFIG_KEYSTONE_LDAP_GROUP_OBJECTCLASS= + +# Group ID attribute for the Identity service LDAP backend. +CONFIG_KEYSTONE_LDAP_GROUP_ID_ATTRIBUTE= + +# Group name attribute for the Identity service LDAP backend. +CONFIG_KEYSTONE_LDAP_GROUP_NAME_ATTRIBUTE= + +# Group member attribute for the Identity service LDAP backend. +CONFIG_KEYSTONE_LDAP_GROUP_MEMBER_ATTRIBUTE= + +# Group description attribute for the Identity service LDAP backend. +CONFIG_KEYSTONE_LDAP_GROUP_DESC_ATTRIBUTE= + +# Comma-separated list of attributes stripped from LDAP group entry +# upon update. +CONFIG_KEYSTONE_LDAP_GROUP_ATTRIBUTE_IGNORE= + +# Specify 'y' if you want to be able to create Identity service +# groups through the Identity service interface; specify 'n' if you +# will create directly in the LDAP backend. ['n', 'y'] +CONFIG_KEYSTONE_LDAP_GROUP_ALLOW_CREATE=n + +# Specify 'y' if you want to be able to update Identity service +# groups through the Identity service interface; specify 'n' if you +# will update directly in the LDAP backend. ['n', 'y'] +CONFIG_KEYSTONE_LDAP_GROUP_ALLOW_UPDATE=n + +# Specify 'y' if you want to be able to delete Identity service +# groups through the Identity service interface; specify 'n' if you +# will delete directly in the LDAP backend. ['n', 'y'] +CONFIG_KEYSTONE_LDAP_GROUP_ALLOW_DELETE=n + +# List of additional LDAP attributes used for mapping additional +# attribute mappings for groups. The attribute=mapping format is +# :, where ldap_attr is the attribute in the +# LDAP entry and group_attr is the Identity API attribute. +CONFIG_KEYSTONE_LDAP_GROUP_ADDITIONAL_ATTRIBUTE_MAPPING= + +# Specify 'y' if the Identity service LDAP backend should use TLS. +# ['n', 'y'] +CONFIG_KEYSTONE_LDAP_USE_TLS=n + +# CA certificate directory for Identity service LDAP backend (if TLS +# is used). +CONFIG_KEYSTONE_LDAP_TLS_CACERTDIR= + +# CA certificate file for Identity service LDAP backend (if TLS is +# used). +CONFIG_KEYSTONE_LDAP_TLS_CACERTFILE= + +# Certificate-checking strictness level for Identity service LDAP +# backend; valid options are: never, allow, demand. ['never', 'allow', +# 'demand'] +CONFIG_KEYSTONE_LDAP_TLS_REQ_CERT=demand + +# Password to use for the Image service (glance) to access the +# database. +CONFIG_GLANCE_DB_PW=qum5net + +# Password to use for the Image service to authenticate with the +# Identity service. +CONFIG_GLANCE_KS_PW=qum5net + +# Storage backend for the Image service (controls how the Image +# service stores disk images). Valid options are: file or swift +# (Object Storage). The Object Storage service must be enabled to use +# it as a working backend; otherwise, Packstack falls back to 'file'. +# ['file', 'swift'] +CONFIG_GLANCE_BACKEND=file + +# Password to use for the Block Storage service (cinder) to access +# the database. +CONFIG_CINDER_DB_PW=qum5net + +# Enter y if cron job for removing soft deleted DB rows should be +# created. +CONFIG_CINDER_DB_PURGE_ENABLE=True + +# Password to use for the Block Storage service to authenticate with +# the Identity service. +CONFIG_CINDER_KS_PW=qum5net + +# Storage backend to use for the Block Storage service; valid options +# are: lvm, gluster, nfs, vmdk, netapp. ['lvm', 'gluster', 'nfs', +# 'vmdk', 'netapp'] +CONFIG_CINDER_BACKEND=lvm + +# Specify 'y' to create the Block Storage volumes group. That is, +# Packstack creates a raw disk image in /var/lib/cinder, and mounts it +# using a loopback device. This should only be used for testing on a +# proof-of-concept installation of the Block Storage service (a file- +# backed volume group is not suitable for production usage). ['y', +# 'n'] +CONFIG_CINDER_VOLUMES_CREATE=y + +# Size of Block Storage volumes group. Actual volume size will be +# extended with 3% more space for VG metadata. Remember that the size +# of the volume group will restrict the amount of disk space that you +# can expose to Compute instances, and that the specified amount must +# be available on the device used for /var/lib/cinder. +CONFIG_CINDER_VOLUMES_SIZE=20G + +# A single or comma-separated list of Red Hat Storage (gluster) +# volume shares to mount. Example: 'ip-address:/vol-name', 'domain +# :/vol-name' +CONFIG_CINDER_GLUSTER_MOUNTS= + +# A single or comma-separated list of NFS exports to mount. Example: +# 'ip-address:/export-name' +CONFIG_CINDER_NFS_MOUNTS= + +# Administrative user account name used to access the NetApp storage +# system or proxy server. +CONFIG_CINDER_NETAPP_LOGIN= + +# Password for the NetApp administrative user account specified in +# the CONFIG_CINDER_NETAPP_LOGIN parameter. +CONFIG_CINDER_NETAPP_PASSWORD= + +# Hostname (or IP address) for the NetApp storage system or proxy +# server. +CONFIG_CINDER_NETAPP_HOSTNAME= + +# The TCP port to use for communication with the storage system or +# proxy. If not specified, Data ONTAP drivers will use 80 for HTTP and +# 443 for HTTPS; E-Series will use 8080 for HTTP and 8443 for HTTPS. +# Defaults to 80. +CONFIG_CINDER_NETAPP_SERVER_PORT=80 + +# Storage family type used on the NetApp storage system; valid +# options are ontap_7mode for using Data ONTAP operating in 7-Mode, +# ontap_cluster for using clustered Data ONTAP, or E-Series for NetApp +# E-Series. Defaults to ontap_cluster. ['ontap_7mode', +# 'ontap_cluster', 'eseries'] +CONFIG_CINDER_NETAPP_STORAGE_FAMILY=ontap_cluster + +# The transport protocol used when communicating with the NetApp +# storage system or proxy server. Valid values are http or https. +# Defaults to 'http'. ['http', 'https'] +CONFIG_CINDER_NETAPP_TRANSPORT_TYPE=http + +# Storage protocol to be used on the data path with the NetApp +# storage system; valid options are iscsi, fc, nfs. Defaults to nfs. +# ['iscsi', 'fc', 'nfs'] +CONFIG_CINDER_NETAPP_STORAGE_PROTOCOL=nfs + +# Quantity to be multiplied by the requested volume size to ensure +# enough space is available on the virtual storage server (Vserver) to +# fulfill the volume creation request. Defaults to 1.0. +CONFIG_CINDER_NETAPP_SIZE_MULTIPLIER=1.0 + +# Time period (in minutes) that is allowed to elapse after the image +# is last accessed, before it is deleted from the NFS image cache. +# When a cache-cleaning cycle begins, images in the cache that have +# not been accessed in the last M minutes, where M is the value of +# this parameter, are deleted from the cache to create free space on +# the NFS share. Defaults to 720. +CONFIG_CINDER_NETAPP_EXPIRY_THRES_MINUTES=720 + +# If the percentage of available space for an NFS share has dropped +# below the value specified by this parameter, the NFS image cache is +# cleaned. Defaults to 20. +CONFIG_CINDER_NETAPP_THRES_AVL_SIZE_PERC_START=20 + +# When the percentage of available space on an NFS share has reached +# the percentage specified by this parameter, the driver stops +# clearing files from the NFS image cache that have not been accessed +# in the last M minutes, where M is the value of the +# CONFIG_CINDER_NETAPP_EXPIRY_THRES_MINUTES parameter. Defaults to 60. +CONFIG_CINDER_NETAPP_THRES_AVL_SIZE_PERC_STOP=60 + +# Single or comma-separated list of NetApp NFS shares for Block +# Storage to use. Format: ip-address:/export-name. Defaults to ''. +CONFIG_CINDER_NETAPP_NFS_SHARES= + +# File with the list of available NFS shares. Defaults to +# '/etc/cinder/shares.conf'. +CONFIG_CINDER_NETAPP_NFS_SHARES_CONFIG=/etc/cinder/shares.conf + +# This parameter is only utilized when the storage protocol is +# configured to use iSCSI or FC. This parameter is used to restrict +# provisioning to the specified controller volumes. Specify the value +# of this parameter to be a comma separated list of NetApp controller +# volume names to be used for provisioning. Defaults to ''. +CONFIG_CINDER_NETAPP_VOLUME_LIST= + +# The vFiler unit on which provisioning of block storage volumes will +# be done. This parameter is only used by the driver when connecting +# to an instance with a storage family of Data ONTAP operating in +# 7-Mode Only use this parameter when utilizing the MultiStore feature +# on the NetApp storage system. Defaults to ''. +CONFIG_CINDER_NETAPP_VFILER= + +# The name of the config.conf stanza for a Data ONTAP (7-mode) HA +# partner. This option is only used by the driver when connecting to +# an instance with a storage family of Data ONTAP operating in 7-Mode, +# and it is required if the storage protocol selected is FC. Defaults +# to ''. +CONFIG_CINDER_NETAPP_PARTNER_BACKEND_NAME= + +# This option specifies the virtual storage server (Vserver) name on +# the storage cluster on which provisioning of block storage volumes +# should occur. Defaults to ''. +CONFIG_CINDER_NETAPP_VSERVER= + +# Restricts provisioning to the specified controllers. Value must be +# a comma-separated list of controller hostnames or IP addresses to be +# used for provisioning. This option is only utilized when the storage +# family is configured to use E-Series. Defaults to ''. +CONFIG_CINDER_NETAPP_CONTROLLER_IPS= + +# Password for the NetApp E-Series storage array. Defaults to ''. +CONFIG_CINDER_NETAPP_SA_PASSWORD= + +# This option is used to define how the controllers in the E-Series +# storage array will work with the particular operating system on the +# hosts that are connected to it. Defaults to 'linux_dm_mp' +CONFIG_CINDER_NETAPP_ESERIES_HOST_TYPE=linux_dm_mp + +# Path to the NetApp E-Series proxy application on a proxy server. +# The value is combined with the value of the +# CONFIG_CINDER_NETAPP_TRANSPORT_TYPE, CONFIG_CINDER_NETAPP_HOSTNAME, +# and CONFIG_CINDER_NETAPP_HOSTNAME options to create the URL used by +# the driver to connect to the proxy application. Defaults to +# '/devmgr/v2'. +CONFIG_CINDER_NETAPP_WEBSERVICE_PATH=/devmgr/v2 + +# Restricts provisioning to the specified storage pools. Only dynamic +# disk pools are currently supported. The value must be a comma- +# separated list of disk pool names to be used for provisioning. +# Defaults to ''. +CONFIG_CINDER_NETAPP_STORAGE_POOLS= + +# Password to use for OpenStack Bare Metal Provisioning (ironic) to +# access the database. +CONFIG_IRONIC_DB_PW=PW_PLACEHOLDER + +# Password to use for OpenStack Bare Metal Provisioning to +# authenticate with the Identity service. +CONFIG_IRONIC_KS_PW=PW_PLACEHOLDER + +# Enter y if cron job for removing soft deleted DB rows should be +# created. +CONFIG_NOVA_DB_PURGE_ENABLE=True + +# Password to use for the Compute service (nova) to access the +# database. +CONFIG_NOVA_DB_PW=qum5net + +# Password to use for the Compute service to authenticate with the +# Identity service. +CONFIG_NOVA_KS_PW=qum5net + +# Overcommitment ratio for virtual to physical CPUs. Specify 1.0 to +# disable CPU overcommitment. +CONFIG_NOVA_SCHED_CPU_ALLOC_RATIO=16.0 + +# Overcommitment ratio for virtual to physical RAM. Specify 1.0 to +# disable RAM overcommitment. +CONFIG_NOVA_SCHED_RAM_ALLOC_RATIO=1.5 + +# Protocol used for instance migration. Valid options are: tcp and +# ssh. Note that by default, the Compute user is created with the +# /sbin/nologin shell so that the SSH protocol will not work. To make +# the SSH protocol work, you must configure the Compute user on +# compute hosts manually. ['tcp', 'ssh'] +CONFIG_NOVA_COMPUTE_MIGRATE_PROTOCOL=tcp + +# Manager that runs the Compute service. +CONFIG_NOVA_COMPUTE_MANAGER=nova.compute.manager.ComputeManager + +# PEM encoded certificate to be used for ssl on the https server, +# leave blank if one should be generated, this certificate should not +# require a passphrase. If CONFIG_HORIZON_SSL is set to 'n' this +# parameter is ignored. +CONFIG_VNC_SSL_CERT= + +# SSL keyfile corresponding to the certificate if one was entered. If +# CONFIG_HORIZON_SSL is set to 'n' this parameter is ignored. +CONFIG_VNC_SSL_KEY= + +# Enter the PCI passthrough array of hash in JSON style for +# controller eg. [{"vendor_id":"1234", "product_id":"5678", +# "name":"default"}, {...}] +CONFIG_NOVA_PCI_ALIAS= + +# Enter the PCI passthrough whitelist array of hash in JSON style for +# controller eg. [{"vendor_id":"1234", "product_id":"5678", +# "name':"default"}, {...}] +CONFIG_NOVA_PCI_PASSTHROUGH_WHITELIST= + +# Private interface for flat DHCP on the Compute servers. +CONFIG_NOVA_COMPUTE_PRIVIF= + +# Compute Network Manager. ['^nova\.network\.manager\.\w+Manager$'] +CONFIG_NOVA_NETWORK_MANAGER=nova.network.manager.FlatDHCPManager + +# Public interface on the Compute network server. +CONFIG_NOVA_NETWORK_PUBIF=eth0 + +# Private interface for flat DHCP on the Compute network server. +CONFIG_NOVA_NETWORK_PRIVIF= + +# IP Range for flat DHCP. ['^[\:\.\da-fA-f]+(\/\d+){0,1}$'] +CONFIG_NOVA_NETWORK_FIXEDRANGE=192.168.32.0/22 + +# IP Range for floating IP addresses. ['^[\:\.\da- +# fA-f]+(\/\d+){0,1}$'] +CONFIG_NOVA_NETWORK_FLOATRANGE=10.3.4.0/22 + +# Specify 'y' to automatically assign a floating IP to new instances. +# ['y', 'n'] +CONFIG_NOVA_NETWORK_AUTOASSIGNFLOATINGIP=n + +# First VLAN for private networks (Compute networking). +CONFIG_NOVA_NETWORK_VLAN_START=100 + +# Number of networks to support (Compute networking). +CONFIG_NOVA_NETWORK_NUMBER=1 + +# Number of addresses in each private subnet (Compute networking). +CONFIG_NOVA_NETWORK_SIZE=255 + +# Password to use for OpenStack Networking (neutron) to authenticate +# with the Identity service. +CONFIG_NEUTRON_KS_PW=qum5net + +# The password to use for OpenStack Networking to access the +# database. +CONFIG_NEUTRON_DB_PW=qum5net + +# The name of the Open vSwitch bridge (or empty for linuxbridge) for +# the OpenStack Networking L3 agent to use for external traffic. +# Specify 'provider' if you intend to use a provider network to handle +# external traffic. +CONFIG_NEUTRON_L3_EXT_BRIDGE=br-ex + +# Password for the OpenStack Networking metadata agent. +CONFIG_NEUTRON_METADATA_PW=qum5net + +# Specify 'y' to install OpenStack Networking's Load-Balancing- +# as-a-Service (LBaaS). ['y', 'n'] +CONFIG_LBAAS_INSTALL=n + +# Specify 'y' to install OpenStack Networking's L3 Metering agent +# ['y', 'n'] +CONFIG_NEUTRON_METERING_AGENT_INSTALL=n + +# Specify 'y' to configure OpenStack Networking's Firewall- +# as-a-Service (FWaaS). ['y', 'n'] +CONFIG_NEUTRON_FWAAS=n + +# Specify 'y' to configure OpenStack Networking's VPN-as-a-Service +# (VPNaaS). ['y', 'n'] +CONFIG_NEUTRON_VPNAAS=n + +# Comma-separated list of network-type driver entry points to be +# loaded from the neutron.ml2.type_drivers namespace. ['local', +# 'flat', 'vlan', 'gre', 'vxlan'] +CONFIG_NEUTRON_ML2_TYPE_DRIVERS=vxlan + +# Comma-separated, ordered list of network types to allocate as +# tenant networks. The 'local' value is only useful for single-box +# testing and provides no connectivity between hosts. ['local', +# 'vlan', 'gre', 'vxlan'] +CONFIG_NEUTRON_ML2_TENANT_NETWORK_TYPES=vxlan + +# Comma-separated ordered list of networking mechanism driver entry +# points to be loaded from the neutron.ml2.mechanism_drivers +# namespace. ['logger', 'test', 'linuxbridge', 'openvswitch', +# 'hyperv', 'ncs', 'arista', 'cisco_nexus', 'mlnx', 'l2population', +# 'sriovnicswitch'] +CONFIG_NEUTRON_ML2_MECHANISM_DRIVERS=openvswitch + +# Comma-separated list of physical_network names with which flat +# networks can be created. Use * to allow flat networks with arbitrary +# physical_network names. +CONFIG_NEUTRON_ML2_FLAT_NETWORKS=* + +# Comma-separated list of :: or +# specifying physical_network names usable for VLAN +# provider and tenant networks, as well as ranges of VLAN tags on each +# available for allocation to tenant networks. +CONFIG_NEUTRON_ML2_VLAN_RANGES= + +# Comma-separated list of : tuples enumerating +# ranges of GRE tunnel IDs that are available for tenant-network +# allocation. A tuple must be an array with tun_max +1 - tun_min > +# 1000000. +CONFIG_NEUTRON_ML2_TUNNEL_ID_RANGES= + +# Comma-separated list of addresses for VXLAN multicast group. If +# left empty, disables VXLAN from sending allocate broadcast traffic +# (disables multicast VXLAN mode). Should be a Multicast IP (v4 or v6) +# address. +CONFIG_NEUTRON_ML2_VXLAN_GROUP= + +# Comma-separated list of : tuples enumerating +# ranges of VXLAN VNI IDs that are available for tenant network +# allocation. Minimum value is 0 and maximum value is 16777215. +CONFIG_NEUTRON_ML2_VNI_RANGES=10:100 + +# Name of the L2 agent to be used with OpenStack Networking. +# ['linuxbridge', 'openvswitch'] +CONFIG_NEUTRON_L2_AGENT=openvswitch + +# Comma separated list of supported PCI vendor devices defined by +# vendor_id:product_id according to the PCI ID Repository. +CONFIG_NEUTRON_ML2_SUPPORTED_PCI_VENDOR_DEVS=['15b3:1004', '8086:10ca'] + +# Specify 'y' if the sriov agent is required +CONFIG_NEUTRON_ML2_SRIOV_AGENT_REQUIRED=n + +# Comma-separated list of interface mappings for the OpenStack +# Networking ML2 SRIOV agent. Each tuple in the list must be in the +# format :. Example: +# physnet1:eth1,physnet2:eth2,physnet3:eth3. +CONFIG_NEUTRON_ML2_SRIOV_INTERFACE_MAPPINGS= + +# Comma-separated list of interface mappings for the OpenStack +# Networking linuxbridge plugin. Each tuple in the list must be in the +# format :. Example: +# physnet1:eth1,physnet2:eth2,physnet3:eth3. +CONFIG_NEUTRON_LB_INTERFACE_MAPPINGS= + +# Comma-separated list of bridge mappings for the OpenStack +# Networking Open vSwitch plugin. Each tuple in the list must be in +# the format :. Example: physnet1:br- +# eth1,physnet2:br-eth2,physnet3:br-eth3 +CONFIG_NEUTRON_OVS_BRIDGE_MAPPINGS= + +# Comma-separated list of colon-separated Open vSwitch +# : pairs. The interface will be added to the +# associated bridge. If you desire the bridge to be persistent a value +# must be added to this directive, also +# CONFIG_NEUTRON_OVS_BRIDGE_MAPPINGS must be set in order to create +# the proper port. This can be achieved from the command line by +# issuing the following command: packstack --allinone --os-neutron- +# ovs-bridge-mappings=ext-net:br-ex --os-neutron-ovs-bridge-interfaces +# =br-ex:eth0 +CONFIG_NEUTRON_OVS_BRIDGE_IFACES= + +# Interface for the Open vSwitch tunnel. Packstack overrides the IP +# address used for tunnels on this hypervisor to the IP found on the +# specified interface (for example, eth1). +CONFIG_NEUTRON_OVS_TUNNEL_IF= + +# VXLAN UDP port. +CONFIG_NEUTRON_OVS_VXLAN_UDP_PORT=4789 + +# Specify 'y' to set up Horizon communication over https. ['y', 'n'] +CONFIG_HORIZON_SSL=n + +# Secret key to use for Horizon Secret Encryption Key. +CONFIG_HORIZON_SECRET_KEY=e2ba54f295f84d0c8d645de8e36fcc33 + +# PEM-encoded certificate to be used for SSL connections on the https +# server. To generate a certificate, leave blank. +CONFIG_HORIZON_SSL_CERT= + +# SSL keyfile corresponding to the certificate if one was specified. +# The certificate should not require a passphrase. +CONFIG_HORIZON_SSL_KEY= + +CONFIG_HORIZON_SSL_CACERT= + +# Password to use for the Object Storage service to authenticate with +# the Identity service. +CONFIG_SWIFT_KS_PW=qum5net + +# Comma-separated list of devices to use as storage device for Object +# Storage. Each entry must take the format /path/to/dev (for example, +# specifying /dev/vdb installs /dev/vdb as the Object Storage storage +# device; Packstack does not create the filesystem, you must do this +# first). If left empty, Packstack creates a loopback device for test +# setup. +CONFIG_SWIFT_STORAGES= + +# Number of Object Storage storage zones; this number MUST be no +# larger than the number of configured storage devices. +CONFIG_SWIFT_STORAGE_ZONES=1 + +# Number of Object Storage storage replicas; this number MUST be no +# larger than the number of configured storage zones. +CONFIG_SWIFT_STORAGE_REPLICAS=1 + +# File system type for storage nodes. ['xfs', 'ext4'] +CONFIG_SWIFT_STORAGE_FSTYPE=ext4 + +# Custom seed number to use for swift_hash_path_suffix in +# /etc/swift/swift.conf. If you do not provide a value, a seed number +# is automatically generated. +CONFIG_SWIFT_HASH=54760d6b88814b53 + +# Size of the Object Storage loopback file storage device. +CONFIG_SWIFT_STORAGE_SIZE=2G + +# Password used by Orchestration service user to authenticate against +# the database. +CONFIG_HEAT_DB_PW=PW_PLACEHOLDER + +# Encryption key to use for authentication in the Orchestration +# database (16, 24, or 32 chars). +CONFIG_HEAT_AUTH_ENC_KEY=2e06ca7c4aa3400c + +# Password to use for the Orchestration service to authenticate with +# the Identity service. +CONFIG_HEAT_KS_PW=PW_PLACEHOLDER + +# Specify 'y' to install the Orchestration CloudWatch API. ['y', 'n'] +CONFIG_HEAT_CLOUDWATCH_INSTALL=n + +# Specify 'y' to install the Orchestration CloudFormation API. ['y', +# 'n'] +CONFIG_HEAT_CFN_INSTALL=n + +# Name of the Identity domain for Orchestration. +CONFIG_HEAT_DOMAIN=heat + +# Name of the Identity domain administrative user for Orchestration. +CONFIG_HEAT_DOMAIN_ADMIN=heat_admin + +# Password for the Identity domain administrative user for +# Orchestration. +CONFIG_HEAT_DOMAIN_PASSWORD=PW_PLACEHOLDER + +# Specify 'y' to provision for demo usage and testing. ['y', 'n'] +CONFIG_PROVISION_DEMO=y + +# Specify 'y' to configure the OpenStack Integration Test Suite +# (tempest) for testing. The test suite requires OpenStack Networking +# to be installed. ['y', 'n'] +CONFIG_PROVISION_TEMPEST=n + +# CIDR network address for the floating IP subnet. +CONFIG_PROVISION_DEMO_FLOATRANGE=172.24.4.224/28 + +# The name to be assigned to the demo image in Glance (default +# "cirros"). +CONFIG_PROVISION_IMAGE_NAME=cirros + +# A URL or local file location for an image to download and provision +# in Glance (defaults to a URL for a recent "cirros" image). +CONFIG_PROVISION_IMAGE_URL=http://download.cirros-cloud.net/0.3.3/cirros-0.3.3-x86_64-disk.img + +# Format for the demo image (default "qcow2"). +CONFIG_PROVISION_IMAGE_FORMAT=qcow2 + +# User to use when connecting to instances booted from the demo +# image. +CONFIG_PROVISION_IMAGE_SSH_USER=cirros + +# Name of the Integration Test Suite provisioning user. If you do not +# provide a user name, Tempest is configured in a standalone mode. +CONFIG_PROVISION_TEMPEST_USER= + +# Password to use for the Integration Test Suite provisioning user. +CONFIG_PROVISION_TEMPEST_USER_PW=PW_PLACEHOLDER + +# CIDR network address for the floating IP subnet. +CONFIG_PROVISION_TEMPEST_FLOATRANGE=172.24.4.224/28 + +# URI of the Integration Test Suite git repository. +CONFIG_PROVISION_TEMPEST_REPO_URI=https://github.com/openstack/tempest.git + +# Revision (branch) of the Integration Test Suite git repository. +CONFIG_PROVISION_TEMPEST_REPO_REVISION=master + +# Specify 'y' to configure the Open vSwitch external bridge for an +# all-in-one deployment (the L3 external bridge acts as the gateway +# for virtual machines). ['y', 'n'] +CONFIG_PROVISION_OVS_BRIDGE=y + +# Password to use for OpenStack Data Processing (sahara) to access +# the database. +CONFIG_SAHARA_DB_PW=PW_PLACEHOLDER + +# Password to use for OpenStack Data Processing to authenticate with +# the Identity service. +CONFIG_SAHARA_KS_PW=PW_PLACEHOLDER + +# Secret key for signing Telemetry service (ceilometer) messages. +CONFIG_CEILOMETER_SECRET=d1cd21accf764049 + +# Password to use for Telemetry to authenticate with the Identity +# service. +CONFIG_CEILOMETER_KS_PW=qum5net + +# Backend driver for Telemetry's group membership coordination. +# ['redis', 'none'] +CONFIG_CEILOMETER_COORDINATION_BACKEND=redis + +# IP address of the server on which to install MongoDB. +CONFIG_MONGODB_HOST=VARINET4ADDR + +# IP address of the server on which to install the Redis master +# server. +CONFIG_REDIS_MASTER_HOST=VARINET4ADDR + +# Port on which the Redis server(s) listens. +CONFIG_REDIS_PORT=6379 + +# Specify 'y' to have Redis try to use HA. ['y', 'n'] +CONFIG_REDIS_HA=n + +# Hosts on which to install Redis slaves. +CONFIG_REDIS_SLAVE_HOSTS= + +# Hosts on which to install Redis sentinel servers. +CONFIG_REDIS_SENTINEL_HOSTS= + +# Host to configure as the Redis coordination sentinel. +CONFIG_REDIS_SENTINEL_CONTACT_HOST= + +# Port on which Redis sentinel servers listen. +CONFIG_REDIS_SENTINEL_PORT=26379 + +# Quorum value for Redis sentinel servers. +CONFIG_REDIS_SENTINEL_QUORUM=2 + +# Name of the master server watched by the Redis sentinel. ['[a-z]+'] +CONFIG_REDIS_MASTER_NAME=mymaster + +# Password to use for OpenStack Database-as-a-Service (trove) to +# access the database. +CONFIG_TROVE_DB_PW=PW_PLACEHOLDER + +# Password to use for OpenStack Database-as-a-Service to authenticate +# with the Identity service. +CONFIG_TROVE_KS_PW=PW_PLACEHOLDER + +# User name to use when OpenStack Database-as-a-Service connects to +# the Compute service. +CONFIG_TROVE_NOVA_USER=trove + +# Tenant to use when OpenStack Database-as-a-Service connects to the +# Compute service. +CONFIG_TROVE_NOVA_TENANT=services + +# Password to use when OpenStack Database-as-a-Service connects to +# the Compute service. +CONFIG_TROVE_NOVA_PW=PW_PLACEHOLDER + +# Password of the nagiosadmin user on the Nagios server. +CONFIG_NAGIOS_PW=PW_PLACEHOLDER diff --git a/qa/qa_scripts/openstack/files/nova.template.conf b/qa/qa_scripts/openstack/files/nova.template.conf new file mode 100644 index 00000000..c63c8648 --- /dev/null +++ b/qa/qa_scripts/openstack/files/nova.template.conf @@ -0,0 +1,3698 @@ +[DEFAULT] + +# +# From nova +# + +# Number of times to retry live-migration before failing. If == -1, try until +# out of hosts. If == 0, only try once, no retries. (integer value) +#migrate_max_retries=-1 + +# The topic console auth proxy nodes listen on (string value) +#consoleauth_topic=consoleauth + +# The driver to use for database access (string value) +#db_driver=nova.db + +# Backend to use for IPv6 generation (string value) +#ipv6_backend=rfc2462 + +# The driver for servicegroup service (valid options are: db, zk, mc) (string +# value) +#servicegroup_driver=db + +# The availability_zone to show internal services under (string value) +#internal_service_availability_zone=internal +internal_service_availability_zone=internal + +# Default compute node availability_zone (string value) +#default_availability_zone=nova +default_availability_zone=nova + +# The topic cert nodes listen on (string value) +#cert_topic=cert + +# Image ID used when starting up a cloudpipe vpn server (string value) +#vpn_image_id=0 + +# Flavor for vpn instances (string value) +#vpn_flavor=m1.tiny + +# Template for cloudpipe instance boot script (string value) +#boot_script_template=$pybasedir/nova/cloudpipe/bootscript.template + +# Network to push into openvpn config (string value) +#dmz_net=10.0.0.0 + +# Netmask to push into openvpn config (string value) +#dmz_mask=255.255.255.0 + +# Suffix to add to project name for vpn key and secgroups (string value) +#vpn_key_suffix=-vpn + +# Record sessions to FILE.[session_number] (boolean value) +#record=false + +# Become a daemon (background process) (boolean value) +#daemon=false + +# Disallow non-encrypted connections (boolean value) +#ssl_only=false + +# Source is ipv6 (boolean value) +#source_is_ipv6=false + +# SSL certificate file (string value) +#cert=self.pem + +# SSL key file (if separate from cert) (string value) +#key= + +# Run webserver on same port. Serve files from DIR. (string value) +#web=/usr/share/spice-html5 + +# Host on which to listen for incoming requests (string value) +#novncproxy_host=0.0.0.0 +novncproxy_host=0.0.0.0 + +# Port on which to listen for incoming requests (integer value) +# Minimum value: 1 +# Maximum value: 65535 +#novncproxy_port=6080 +novncproxy_port=6080 + +# Host on which to listen for incoming requests (string value) +#serialproxy_host=0.0.0.0 + +# Port on which to listen for incoming requests (integer value) +# Minimum value: 1 +# Maximum value: 65535 +#serialproxy_port=6083 + +# Host on which to listen for incoming requests (string value) +#html5proxy_host=0.0.0.0 + +# Port on which to listen for incoming requests (integer value) +# Minimum value: 1 +# Maximum value: 65535 +#html5proxy_port=6082 + +# Driver to use for the console proxy (string value) +#console_driver=nova.console.xvp.XVPConsoleProxy + +# Stub calls to compute worker for tests (boolean value) +#stub_compute=false + +# Publicly visible name for this console host (string value) +#console_public_hostname=x86-017.build.eng.bos.redhat.com + +# The topic console proxy nodes listen on (string value) +#console_topic=console + +# XVP conf template (string value) +#console_xvp_conf_template=$pybasedir/nova/console/xvp.conf.template + +# Generated XVP conf file (string value) +#console_xvp_conf=/etc/xvp.conf + +# XVP master process pid file (string value) +#console_xvp_pid=/var/run/xvp.pid + +# XVP log file (string value) +#console_xvp_log=/var/log/xvp.log + +# Port for XVP to multiplex VNC connections on (integer value) +# Minimum value: 1 +# Maximum value: 65535 +#console_xvp_multiplex_port=5900 + +# How many seconds before deleting tokens (integer value) +#console_token_ttl=600 + +# Filename of root CA (string value) +#ca_file=cacert.pem + +# Filename of private key (string value) +#key_file=private/cakey.pem + +# Filename of root Certificate Revocation List (string value) +#crl_file=crl.pem + +# Where we keep our keys (string value) +#keys_path=$state_path/keys + +# Where we keep our root CA (string value) +#ca_path=$state_path/CA + +# Should we use a CA for each project? (boolean value) +#use_project_ca=false + +# Subject for certificate for users, %s for project, user, timestamp (string +# value) +#user_cert_subject=/C=US/ST=California/O=OpenStack/OU=NovaDev/CN=%.16s-%.16s-%s + +# Subject for certificate for projects, %s for project, timestamp (string +# value) +#project_cert_subject=/C=US/ST=California/O=OpenStack/OU=NovaDev/CN=project-ca-%.16s-%s + +# Services to be added to the available pool on create (boolean value) +#enable_new_services=true + +# Template string to be used to generate instance names (string value) +#instance_name_template=instance-%08x + +# Template string to be used to generate snapshot names (string value) +#snapshot_name_template=snapshot-%s + +# When set, compute API will consider duplicate hostnames invalid within the +# specified scope, regardless of case. Should be empty, "project" or "global". +# (string value) +#osapi_compute_unique_server_name_scope = + +# Make exception message format errors fatal (boolean value) +#fatal_exception_format_errors=false + +# Parent directory for tempdir used for image decryption (string value) +#image_decryption_dir=/tmp + +# Hostname or IP for OpenStack to use when accessing the S3 api (string value) +#s3_host=$my_ip + +# Port used when accessing the S3 api (integer value) +# Minimum value: 1 +# Maximum value: 65535 +#s3_port=3333 + +# Access key to use for S3 server for images (string value) +#s3_access_key=notchecked + +# Secret key to use for S3 server for images (string value) +#s3_secret_key=notchecked + +# Whether to use SSL when talking to S3 (boolean value) +#s3_use_ssl=false + +# Whether to affix the tenant id to the access key when downloading from S3 +# (boolean value) +#s3_affix_tenant=false + +# IP address of this host (string value) +#my_ip=10.16.48.92 + +# Block storage IP address of this host (string value) +#my_block_storage_ip=$my_ip + +# Name of this node. This can be an opaque identifier. It is not necessarily +# a hostname, FQDN, or IP address. However, the node name must be valid within +# an AMQP key, and if using ZeroMQ, a valid hostname, FQDN, or IP address +# (string value) +#host=x86-017.build.eng.bos.redhat.com + +# Use IPv6 (boolean value) +#use_ipv6=false +use_ipv6=False + +# If set, send compute.instance.update notifications on instance state changes. +# Valid values are None for no notifications, "vm_state" for notifications on +# VM state changes, or "vm_and_task_state" for notifications on VM and task +# state changes. (string value) +#notify_on_state_change= + +# If set, send api.fault notifications on caught exceptions in the API service. +# (boolean value) +#notify_api_faults=false +notify_api_faults=False + +# Default notification level for outgoing notifications (string value) +# Allowed values: DEBUG, INFO, WARN, ERROR, CRITICAL +#default_notification_level=INFO + +# Default publisher_id for outgoing notifications (string value) +#default_publisher_id= + +# DEPRECATED: THIS VALUE SHOULD BE SET WHEN CREATING THE NETWORK. If True in +# multi_host mode, all compute hosts share the same dhcp address. The same IP +# address used for DHCP will be added on each nova-network node which is only +# visible to the vms on the same host. (boolean value) +#share_dhcp_address=false + +# DEPRECATED: THIS VALUE SHOULD BE SET WHEN CREATING THE NETWORK. MTU setting +# for network interface. (integer value) +#network_device_mtu= + +# Path to S3 buckets (string value) +#buckets_path=$state_path/buckets + +# IP address for S3 API to listen (string value) +#s3_listen=0.0.0.0 + +# Port for S3 API to listen (integer value) +# Minimum value: 1 +# Maximum value: 65535 +#s3_listen_port=3333 + +# Directory where the nova python module is installed (string value) +#pybasedir=/builddir/build/BUILD/nova-12.0.2 + +# Directory where nova binaries are installed (string value) +#bindir=/usr/local/bin + +# Top-level directory for maintaining nova's state (string value) +#state_path=/var/lib/nova +state_path=/var/lib/nova + +# An alias for a PCI passthrough device requirement. This allows users to +# specify the alias in the extra_spec for a flavor, without needing to repeat +# all the PCI property requirements. For example: pci_alias = { "name": +# "QuickAssist", "product_id": "0443", "vendor_id": "8086", +# "device_type": "ACCEL" } defines an alias for the Intel QuickAssist card. +# (multi valued) (multi valued) +#pci_alias = + +# White list of PCI devices available to VMs. For example: +# pci_passthrough_whitelist = [{"vendor_id": "8086", "product_id": "0443"}] +# (multi valued) +#pci_passthrough_whitelist = + +# Number of instances allowed per project (integer value) +#quota_instances=10 + +# Number of instance cores allowed per project (integer value) +#quota_cores=20 + +# Megabytes of instance RAM allowed per project (integer value) +#quota_ram=51200 + +# Number of floating IPs allowed per project (integer value) +#quota_floating_ips=10 + +# Number of fixed IPs allowed per project (this should be at least the number +# of instances allowed) (integer value) +#quota_fixed_ips=-1 + +# Number of metadata items allowed per instance (integer value) +#quota_metadata_items=128 + +# Number of injected files allowed (integer value) +#quota_injected_files=5 + +# Number of bytes allowed per injected file (integer value) +#quota_injected_file_content_bytes=10240 + +# Length of injected file path (integer value) +#quota_injected_file_path_length=255 + +# Number of security groups per project (integer value) +#quota_security_groups=10 + +# Number of security rules per security group (integer value) +#quota_security_group_rules=20 + +# Number of key pairs per user (integer value) +#quota_key_pairs=100 + +# Number of server groups per project (integer value) +#quota_server_groups=10 + +# Number of servers per server group (integer value) +#quota_server_group_members=10 + +# Number of seconds until a reservation expires (integer value) +#reservation_expire=86400 + +# Count of reservations until usage is refreshed. This defaults to 0(off) to +# avoid additional load but it is useful to turn on to help keep quota usage up +# to date and reduce the impact of out of sync usage issues. (integer value) +#until_refresh=0 + +# Number of seconds between subsequent usage refreshes. This defaults to 0(off) +# to avoid additional load but it is useful to turn on to help keep quota usage +# up to date and reduce the impact of out of sync usage issues. Note that +# quotas are not updated on a periodic task, they will update on a new +# reservation if max_age has passed since the last reservation (integer value) +#max_age=0 + +# Default driver to use for quota checks (string value) +#quota_driver=nova.quota.DbQuotaDriver + +# Seconds between nodes reporting state to datastore (integer value) +#report_interval=10 +report_interval=10 + +# Enable periodic tasks (boolean value) +#periodic_enable=true + +# Range of seconds to randomly delay when starting the periodic task scheduler +# to reduce stampeding. (Disable by setting to 0) (integer value) +#periodic_fuzzy_delay=60 + +# A list of APIs to enable by default (list value) +#enabled_apis=ec2,osapi_compute,metadata +enabled_apis=ec2,osapi_compute,metadata + +# A list of APIs with enabled SSL (list value) +#enabled_ssl_apis = + +# The IP address on which the EC2 API will listen. (string value) +#ec2_listen=0.0.0.0 +ec2_listen=0.0.0.0 + +# The port on which the EC2 API will listen. (integer value) +# Minimum value: 1 +# Maximum value: 65535 +#ec2_listen_port=8773 +ec2_listen_port=8773 + +# Number of workers for EC2 API service. The default will be equal to the +# number of CPUs available. (integer value) +#ec2_workers= +ec2_workers=12 + +# The IP address on which the OpenStack API will listen. (string value) +#osapi_compute_listen=0.0.0.0 +osapi_compute_listen=0.0.0.0 + +# The port on which the OpenStack API will listen. (integer value) +# Minimum value: 1 +# Maximum value: 65535 +#osapi_compute_listen_port=8774 +osapi_compute_listen_port=8774 + +# Number of workers for OpenStack API service. The default will be the number +# of CPUs available. (integer value) +#osapi_compute_workers= +osapi_compute_workers=12 + +# OpenStack metadata service manager (string value) +#metadata_manager=nova.api.manager.MetadataManager + +# The IP address on which the metadata API will listen. (string value) +#metadata_listen=0.0.0.0 +metadata_listen=0.0.0.0 + +# The port on which the metadata API will listen. (integer value) +# Minimum value: 1 +# Maximum value: 65535 +#metadata_listen_port=8775 +metadata_listen_port=8775 + +# Number of workers for metadata service. The default will be the number of +# CPUs available. (integer value) +#metadata_workers= +metadata_workers=12 + +# Full class name for the Manager for compute (string value) +#compute_manager=nova.compute.manager.ComputeManager +compute_manager=nova.compute.manager.ComputeManager + +# Full class name for the Manager for console proxy (string value) +#console_manager=nova.console.manager.ConsoleProxyManager + +# Manager for console auth (string value) +#consoleauth_manager=nova.consoleauth.manager.ConsoleAuthManager + +# Full class name for the Manager for cert (string value) +#cert_manager=nova.cert.manager.CertManager + +# Full class name for the Manager for network (string value) +#network_manager=nova.network.manager.FlatDHCPManager + +# Full class name for the Manager for scheduler (string value) +#scheduler_manager=nova.scheduler.manager.SchedulerManager + +# Maximum time since last check-in for up service (integer value) +#service_down_time=60 +service_down_time=60 + +# Whether to log monkey patching (boolean value) +#monkey_patch=false + +# List of modules/decorators to monkey patch (list value) +#monkey_patch_modules=nova.api.ec2.cloud:nova.notifications.notify_decorator,nova.compute.api:nova.notifications.notify_decorator + +# Length of generated instance admin passwords (integer value) +#password_length=12 + +# Time period to generate instance usages for. Time period must be hour, day, +# month or year (string value) +#instance_usage_audit_period=month + +# Start and use a daemon that can run the commands that need to be run with +# root privileges. This option is usually enabled on nodes that run nova +# compute processes (boolean value) +#use_rootwrap_daemon=false + +# Path to the rootwrap configuration file to use for running commands as root +# (string value) +#rootwrap_config=/etc/nova/rootwrap.conf +rootwrap_config=/etc/nova/rootwrap.conf + +# Explicitly specify the temporary working directory (string value) +#tempdir= + +# Port that the XCP VNC proxy should bind to (integer value) +# Minimum value: 1 +# Maximum value: 65535 +#xvpvncproxy_port=6081 + +# Address that the XCP VNC proxy should bind to (string value) +#xvpvncproxy_host=0.0.0.0 + +# The full class name of the volume API class to use (string value) +#volume_api_class=nova.volume.cinder.API +volume_api_class=nova.volume.cinder.API + +# File name for the paste.deploy config for nova-api (string value) +#api_paste_config=api-paste.ini +api_paste_config=api-paste.ini + +# A python format string that is used as the template to generate log lines. +# The following values can be formatted into it: client_ip, date_time, +# request_line, status_code, body_length, wall_seconds. (string value) +#wsgi_log_format=%(client_ip)s "%(request_line)s" status: %(status_code)s len: %(body_length)s time: %(wall_seconds).7f + +# The HTTP header used to determine the scheme for the original request, even +# if it was removed by an SSL terminating proxy. Typical value is +# "HTTP_X_FORWARDED_PROTO". (string value) +#secure_proxy_ssl_header= + +# CA certificate file to use to verify connecting clients (string value) +#ssl_ca_file= + +# SSL certificate of API server (string value) +#ssl_cert_file= + +# SSL private key of API server (string value) +#ssl_key_file= + +# Sets the value of TCP_KEEPIDLE in seconds for each server socket. Not +# supported on OS X. (integer value) +#tcp_keepidle=600 + +# Size of the pool of greenthreads used by wsgi (integer value) +#wsgi_default_pool_size=1000 + +# Maximum line size of message headers to be accepted. max_header_line may need +# to be increased when using large tokens (typically those generated by the +# Keystone v3 API with big service catalogs). (integer value) +#max_header_line=16384 + +# If False, closes the client socket connection explicitly. (boolean value) +#wsgi_keep_alive=true + +# Timeout for client connections' socket operations. If an incoming connection +# is idle for this number of seconds it will be closed. A value of '0' means +# wait forever. (integer value) +#client_socket_timeout=900 + +# +# From nova.api +# + +# File to load JSON formatted vendor data from (string value) +#vendordata_jsonfile_path= + +# Permit instance snapshot operations. (boolean value) +#allow_instance_snapshots=true + +# Whether to use per-user rate limiting for the api. This option is only used +# by v2 api. Rate limiting is removed from v2.1 api. (boolean value) +#api_rate_limit=false + +# +# The strategy to use for auth: keystone or noauth2. noauth2 is designed for +# testing only, as it does no actual credential checking. noauth2 provides +# administrative credentials only if 'admin' is specified as the username. +# (string value) +#auth_strategy=keystone +auth_strategy=keystone + +# Treat X-Forwarded-For as the canonical remote address. Only enable this if +# you have a sanitizing proxy. (boolean value) +#use_forwarded_for=false +use_forwarded_for=False + +# The IP address of the EC2 API server (string value) +#ec2_host=$my_ip + +# The internal IP address of the EC2 API server (string value) +#ec2_dmz_host=$my_ip + +# The port of the EC2 API server (integer value) +# Minimum value: 1 +# Maximum value: 65535 +#ec2_port=8773 + +# The protocol to use when connecting to the EC2 API server (string value) +# Allowed values: http, https +#ec2_scheme=http + +# The path prefix used to call the ec2 API server (string value) +#ec2_path=/ + +# List of region=fqdn pairs separated by commas (list value) +#region_list = + +# Number of failed auths before lockout. (integer value) +#lockout_attempts=5 + +# Number of minutes to lockout if triggered. (integer value) +#lockout_minutes=15 + +# Number of minutes for lockout window. (integer value) +#lockout_window=15 + +# URL to get token from ec2 request. (string value) +#keystone_ec2_url=http://localhost:5000/v2.0/ec2tokens + +# Return the IP address as private dns hostname in describe instances (boolean +# value) +#ec2_private_dns_show_ip=false + +# Validate security group names according to EC2 specification (boolean value) +#ec2_strict_validation=true + +# Time in seconds before ec2 timestamp expires (integer value) +#ec2_timestamp_expiry=300 + +# Disable SSL certificate verification. (boolean value) +#keystone_ec2_insecure=false + +# List of metadata versions to skip placing into the config drive (string +# value) +#config_drive_skip_versions=1.0 2007-01-19 2007-03-01 2007-08-29 2007-10-10 2007-12-15 2008-02-01 2008-09-01 + +# Driver to use for vendor data (string value) +#vendordata_driver=nova.api.metadata.vendordata_json.JsonFileVendorData + +# Time in seconds to cache metadata; 0 to disable metadata caching entirely +# (not recommended). Increasingthis should improve response times of the +# metadata API when under heavy load. Higher values may increase memoryusage +# and result in longer times for host metadata changes to take effect. (integer +# value) +#metadata_cache_expiration=15 + +# The maximum number of items returned in a single response from a collection +# resource (integer value) +#osapi_max_limit=1000 + +# Base URL that will be presented to users in links to the OpenStack Compute +# API (string value) +#osapi_compute_link_prefix= + +# Base URL that will be presented to users in links to glance resources (string +# value) +#osapi_glance_link_prefix= + +# DEPRECATED: Specify list of extensions to load when using +# osapi_compute_extension option with +# nova.api.openstack.compute.legacy_v2.contrib.select_extensions This option +# will be removed in the near future. After that point you have to run all of +# the API. (list value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#osapi_compute_ext_list = + +# Full path to fping. (string value) +#fping_path=/usr/sbin/fping +fping_path=/usr/sbin/fping + +# Enables or disables quota checking for tenant networks (boolean value) +#enable_network_quota=false + +# Control for checking for default networks (string value) +#use_neutron_default_nets=False + +# Default tenant id when creating neutron networks (string value) +#neutron_default_tenant_id=default + +# Number of private networks allowed per project (integer value) +#quota_networks=3 + +# osapi compute extension to load. This option will be removed in the near +# future. After that point you have to run all of the API. (multi valued) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#osapi_compute_extension=nova.api.openstack.compute.legacy_v2.contrib.standard_extensions + +# List of instance states that should hide network info (list value) +#osapi_hide_server_address_states=building + +# Enables returning of the instance password by the relevant server API calls +# such as create, rebuild or rescue, If the hypervisor does not support +# password injection then the password returned will not be correct (boolean +# value) +#enable_instance_password=true + +# +# From nova.compute +# + +# Allow destination machine to match source for resize. Useful when testing in +# single-host environments. (boolean value) +#allow_resize_to_same_host=false +allow_resize_to_same_host=False + +# Availability zone to use when user doesn't specify one (string value) +#default_schedule_zone= + +# These are image properties which a snapshot should not inherit from an +# instance (list value) +#non_inheritable_image_properties=cache_in_nova,bittorrent + +# Kernel image that indicates not to use a kernel, but to use a raw disk image +# instead (string value) +#null_kernel=nokernel + +# When creating multiple instances with a single request using the os-multiple- +# create API extension, this template will be used to build the display name +# for each instance. The benefit is that the instances end up with different +# hostnames. To restore legacy behavior of every instance having the same name, +# set this option to "%(name)s". Valid keys for the template are: name, uuid, +# count. (string value) +#multi_instance_display_name_template=%(name)s-%(count)d + +# Maximum number of devices that will result in a local image being created on +# the hypervisor node. A negative number means unlimited. Setting +# max_local_block_devices to 0 means that any request that attempts to create a +# local disk will fail. This option is meant to limit the number of local discs +# (so root local disc that is the result of --image being used, and any other +# ephemeral and swap disks). 0 does not mean that images will be automatically +# converted to volumes and boot instances from volumes - it just means that all +# requests that attempt to create a local disk will fail. (integer value) +#max_local_block_devices=3 + +# Default flavor to use for the EC2 API only. The Nova API does not support a +# default flavor. (string value) +#default_flavor=m1.small + +# Console proxy host to use to connect to instances on this host. (string +# value) +#console_host=x86-017.build.eng.bos.redhat.com + +# Name of network to use to set access IPs for instances (string value) +#default_access_ip_network_name= + +# Whether to batch up the application of IPTables rules during a host restart +# and apply all at the end of the init phase (boolean value) +#defer_iptables_apply=false + +# Where instances are stored on disk (string value) +#instances_path=$state_path/instances + +# Generate periodic compute.instance.exists notifications (boolean value) +#instance_usage_audit=false + +# Number of 1 second retries needed in live_migration (integer value) +#live_migration_retry_count=30 + +# Whether to start guests that were running before the host rebooted (boolean +# value) +#resume_guests_state_on_host_boot=false + +# Number of times to retry network allocation on failures (integer value) +#network_allocate_retries=0 + +# Maximum number of instance builds to run concurrently (integer value) +#max_concurrent_builds=10 + +# Maximum number of live migrations to run concurrently. This limit is enforced +# to avoid outbound live migrations overwhelming the host/network and causing +# failures. It is not recommended that you change this unless you are very sure +# that doing so is safe and stable in your environment. (integer value) +#max_concurrent_live_migrations=1 + +# Number of times to retry block device allocation on failures (integer value) +#block_device_allocate_retries=60 + +# The number of times to attempt to reap an instance's files. (integer value) +#maximum_instance_delete_attempts=5 + +# Interval to pull network bandwidth usage info. Not supported on all +# hypervisors. Set to -1 to disable. Setting this to 0 will run at the default +# rate. (integer value) +#bandwidth_poll_interval=600 + +# Interval to sync power states between the database and the hypervisor. Set to +# -1 to disable. Setting this to 0 will run at the default rate. (integer +# value) +#sync_power_state_interval=600 + +# Number of seconds between instance network information cache updates (integer +# value) +#heal_instance_info_cache_interval=60 +heal_instance_info_cache_interval=60 + +# Interval in seconds for reclaiming deleted instances (integer value) +#reclaim_instance_interval=0 + +# Interval in seconds for gathering volume usages (integer value) +#volume_usage_poll_interval=0 + +# Interval in seconds for polling shelved instances to offload. Set to -1 to +# disable.Setting this to 0 will run at the default rate. (integer value) +#shelved_poll_interval=3600 + +# Time in seconds before a shelved instance is eligible for removing from a +# host. -1 never offload, 0 offload immediately when shelved (integer value) +#shelved_offload_time=0 + +# Interval in seconds for retrying failed instance file deletes. Set to -1 to +# disable. Setting this to 0 will run at the default rate. (integer value) +#instance_delete_interval=300 + +# Waiting time interval (seconds) between block device allocation retries on +# failures (integer value) +#block_device_allocate_retries_interval=3 + +# Waiting time interval (seconds) between sending the scheduler a list of +# current instance UUIDs to verify that its view of instances is in sync with +# nova. If the CONF option `scheduler_tracks_instance_changes` is False, +# changing this option will have no effect. (integer value) +#scheduler_instance_sync_interval=120 + +# Interval in seconds for updating compute resources. A number less than 0 +# means to disable the task completely. Leaving this at the default of 0 will +# cause this to run at the default periodic interval. Setting it to any +# positive value will cause it to run at approximately that number of seconds. +# (integer value) +#update_resources_interval=0 + +# Action to take if a running deleted instance is detected.Set to 'noop' to +# take no action. (string value) +# Allowed values: noop, log, shutdown, reap +#running_deleted_instance_action=reap + +# Number of seconds to wait between runs of the cleanup task. (integer value) +#running_deleted_instance_poll_interval=1800 + +# Number of seconds after being deleted when a running instance should be +# considered eligible for cleanup. (integer value) +#running_deleted_instance_timeout=0 + +# Automatically hard reboot an instance if it has been stuck in a rebooting +# state longer than N seconds. Set to 0 to disable. (integer value) +#reboot_timeout=0 + +# Amount of time in seconds an instance can be in BUILD before going into ERROR +# status. Set to 0 to disable. (integer value) +#instance_build_timeout=0 + +# Automatically unrescue an instance after N seconds. Set to 0 to disable. +# (integer value) +#rescue_timeout=0 + +# Automatically confirm resizes after N seconds. Set to 0 to disable. (integer +# value) +#resize_confirm_window=0 + +# Total amount of time to wait in seconds for an instance to perform a clean +# shutdown. (integer value) +#shutdown_timeout=60 + +# Monitor classes available to the compute which may be specified more than +# once. This option is DEPRECATED and no longer used. Use setuptools entry +# points to list available monitor plugins. (multi valued) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#compute_available_monitors = + +# A list of monitors that can be used for getting compute metrics. You can use +# the alias/name from the setuptools entry points for nova.compute.monitors.* +# namespaces. If no namespace is supplied, the "cpu." namespace is assumed for +# backwards-compatibility. An example value that would enable both the CPU and +# NUMA memory bandwidth monitors that used the virt driver variant: +# ["cpu.virt_driver", "numa_mem_bw.virt_driver"] (list value) +#compute_monitors = + +# Amount of disk in MB to reserve for the host (integer value) +#reserved_host_disk_mb=0 + +# Amount of memory in MB to reserve for the host (integer value) +#reserved_host_memory_mb=512 +reserved_host_memory_mb=512 + +# Class that will manage stats for the local compute host (string value) +#compute_stats_class=nova.compute.stats.Stats + +# The names of the extra resources to track. (list value) +#compute_resources=vcpu + +# Virtual CPU to physical CPU allocation ratio which affects all CPU filters. +# This configuration specifies a global ratio for CoreFilter. For +# AggregateCoreFilter, it will fall back to this configuration value if no per- +# aggregate setting found. NOTE: This can be set per-compute, or if set to 0.0, +# the value set on the scheduler node(s) will be used and defaulted to 16.0 +# (floating point value) +#cpu_allocation_ratio=0.0 +cpu_allocation_ratio=16.0 + +# Virtual ram to physical ram allocation ratio which affects all ram filters. +# This configuration specifies a global ratio for RamFilter. For +# AggregateRamFilter, it will fall back to this configuration value if no per- +# aggregate setting found. NOTE: This can be set per-compute, or if set to 0.0, +# the value set on the scheduler node(s) will be used and defaulted to 1.5 +# (floating point value) +#ram_allocation_ratio=0.0 +ram_allocation_ratio=1.5 + +# The topic compute nodes listen on (string value) +#compute_topic=compute + +# +# From nova.network +# + +# The full class name of the network API class to use (string value) +#network_api_class=nova.network.api.API +network_api_class=nova.network.neutronv2.api.API + +# Driver to use for network creation (string value) +#network_driver=nova.network.linux_net + +# Default pool for floating IPs (string value) +#default_floating_pool=nova +default_floating_pool=public + +# Autoassigning floating IP to VM (boolean value) +#auto_assign_floating_ip=false + +# Full class name for the DNS Manager for floating IPs (string value) +#floating_ip_dns_manager=nova.network.noop_dns_driver.NoopDNSDriver + +# Full class name for the DNS Manager for instance IPs (string value) +#instance_dns_manager=nova.network.noop_dns_driver.NoopDNSDriver + +# Full class name for the DNS Zone for instance IPs (string value) +#instance_dns_domain = + +# URL for LDAP server which will store DNS entries (string value) +#ldap_dns_url=ldap://ldap.example.com:389 + +# User for LDAP DNS (string value) +#ldap_dns_user=uid=admin,ou=people,dc=example,dc=org + +# Password for LDAP DNS (string value) +#ldap_dns_password=password + +# Hostmaster for LDAP DNS driver Statement of Authority (string value) +#ldap_dns_soa_hostmaster=hostmaster@example.org + +# DNS Servers for LDAP DNS driver (multi valued) +#ldap_dns_servers=dns.example.org + +# Base DN for DNS entries in LDAP (string value) +#ldap_dns_base_dn=ou=hosts,dc=example,dc=org + +# Refresh interval (in seconds) for LDAP DNS driver Statement of Authority +# (string value) +#ldap_dns_soa_refresh=1800 + +# Retry interval (in seconds) for LDAP DNS driver Statement of Authority +# (string value) +#ldap_dns_soa_retry=3600 + +# Expiry interval (in seconds) for LDAP DNS driver Statement of Authority +# (string value) +#ldap_dns_soa_expiry=86400 + +# Minimum interval (in seconds) for LDAP DNS driver Statement of Authority +# (string value) +#ldap_dns_soa_minimum=7200 + +# Location of flagfiles for dhcpbridge (multi valued) +#dhcpbridge_flagfile=/etc/nova/nova.conf + +# Location to keep network config files (string value) +#networks_path=$state_path/networks + +# Interface for public IP addresses (string value) +#public_interface=eth0 + +# Location of nova-dhcpbridge (string value) +#dhcpbridge=/usr/bin/nova-dhcpbridge + +# Public IP of network host (string value) +#routing_source_ip=$my_ip + +# Lifetime of a DHCP lease in seconds (integer value) +#dhcp_lease_time=86400 + +# If set, uses specific DNS server for dnsmasq. Can be specified multiple +# times. (multi valued) +#dns_server = + +# If set, uses the dns1 and dns2 from the network ref. as dns servers. (boolean +# value) +#use_network_dns_servers=false + +# A list of dmz ranges that should be accepted (list value) +#dmz_cidr = + +# Traffic to this range will always be snatted to the fallback ip, even if it +# would normally be bridged out of the node. Can be specified multiple times. +# (multi valued) +#force_snat_range = +force_snat_range =0.0.0.0/0 + +# Override the default dnsmasq settings with this file (string value) +#dnsmasq_config_file = + +# Driver used to create ethernet devices. (string value) +#linuxnet_interface_driver=nova.network.linux_net.LinuxBridgeInterfaceDriver + +# Name of Open vSwitch bridge used with linuxnet (string value) +#linuxnet_ovs_integration_bridge=br-int + +# Send gratuitous ARPs for HA setup (boolean value) +#send_arp_for_ha=false + +# Send this many gratuitous ARPs for HA setup (integer value) +#send_arp_for_ha_count=3 + +# Use single default gateway. Only first nic of vm will get default gateway +# from dhcp server (boolean value) +#use_single_default_gateway=false + +# An interface that bridges can forward to. If this is set to all then all +# traffic will be forwarded. Can be specified multiple times. (multi valued) +#forward_bridge_interface=all + +# The IP address for the metadata API server (string value) +#metadata_host=$my_ip +metadata_host=VARINET4ADDR + +# The port for the metadata API port (integer value) +# Minimum value: 1 +# Maximum value: 65535 +#metadata_port=8775 + +# Regular expression to match the iptables rule that should always be on the +# top. (string value) +#iptables_top_regex = + +# Regular expression to match the iptables rule that should always be on the +# bottom. (string value) +#iptables_bottom_regex = + +# The table that iptables to jump to when a packet is to be dropped. (string +# value) +#iptables_drop_action=DROP + +# Amount of time, in seconds, that ovs_vsctl should wait for a response from +# the database. 0 is to wait forever. (integer value) +#ovs_vsctl_timeout=120 + +# If passed, use fake network devices and addresses (boolean value) +#fake_network=false + +# Number of times to retry ebtables commands on failure. (integer value) +#ebtables_exec_attempts=3 + +# Number of seconds to wait between ebtables retries. (floating point value) +#ebtables_retry_interval=1.0 + +# Bridge for simple network instances (string value) +#flat_network_bridge= + +# DNS server for simple network (string value) +#flat_network_dns=8.8.4.4 + +# Whether to attempt to inject network setup into guest (boolean value) +#flat_injected=false + +# FlatDhcp will bridge into this interface if set (string value) +#flat_interface= + +# First VLAN for private networks (integer value) +# Minimum value: 1 +# Maximum value: 4094 +#vlan_start=100 + +# VLANs will bridge into this interface if set (string value) +#vlan_interface= + +# Number of networks to support (integer value) +#num_networks=1 + +# Public IP for the cloudpipe VPN servers (string value) +#vpn_ip=$my_ip + +# First Vpn port for private networks (integer value) +#vpn_start=1000 + +# Number of addresses in each private subnet (integer value) +#network_size=256 + +# Fixed IPv6 address block (string value) +#fixed_range_v6=fd00::/48 + +# Default IPv4 gateway (string value) +#gateway= + +# Default IPv6 gateway (string value) +#gateway_v6= + +# Number of addresses reserved for vpn clients (integer value) +#cnt_vpn_clients=0 + +# Seconds after which a deallocated IP is disassociated (integer value) +#fixed_ip_disassociate_timeout=600 + +# Number of attempts to create unique mac address (integer value) +#create_unique_mac_address_attempts=5 + +# If True, skip using the queue and make local calls (boolean value) +#fake_call=false + +# If True, unused gateway devices (VLAN and bridge) are deleted in VLAN network +# mode with multi hosted networks (boolean value) +#teardown_unused_network_gateway=false + +# If True, send a dhcp release on instance termination (boolean value) +#force_dhcp_release=True + +# If True, when a DNS entry must be updated, it sends a fanout cast to all +# network hosts to update their DNS entries in multi host mode (boolean value) +#update_dns_entries=false + +# Number of seconds to wait between runs of updates to DNS entries. (integer +# value) +#dns_update_periodic_interval=-1 + +# Domain to use for building the hostnames (string value) +#dhcp_domain=novalocal +dhcp_domain=novalocal + +# Indicates underlying L3 management library (string value) +#l3_lib=nova.network.l3.LinuxNetL3 + +# The topic network nodes listen on (string value) +#network_topic=network + +# Default value for multi_host in networks. Also, if set, some rpc network +# calls will be sent directly to host. (boolean value) +#multi_host=false + +# The full class name of the security API class (string value) +#security_group_api=nova +security_group_api=neutron + +# +# From nova.openstack.common.memorycache +# + +# Memcached servers or None for in process cache. (list value) +#memcached_servers= + +# +# From nova.openstack.common.policy +# + +# The JSON file that defines policies. (string value) +#policy_file=policy.json + +# Default rule. Enforced when a requested rule is not found. (string value) +#policy_default_rule=default + +# Directories where policy configuration files are stored. They can be relative +# to any directory in the search path defined by the config_dir option, or +# absolute paths. The file defined by policy_file must exist for these +# directories to be searched. Missing or empty directories are ignored. (multi +# valued) +#policy_dirs=policy.d + +# +# From nova.scheduler +# + +# Virtual disk to physical disk allocation ratio (floating point value) +#disk_allocation_ratio=1.0 + +# Tells filters to ignore hosts that have this many or more instances currently +# in build, resize, snapshot, migrate, rescue or unshelve task states (integer +# value) +#max_io_ops_per_host=8 + +# Ignore hosts that have too many instances (integer value) +#max_instances_per_host=50 + +# Absolute path to scheduler configuration JSON file. (string value) +#scheduler_json_config_location = + +# The scheduler host manager class to use (string value) +#scheduler_host_manager=nova.scheduler.host_manager.HostManager + +# New instances will be scheduled on a host chosen randomly from a subset of +# the N best hosts. This property defines the subset size that a host is chosen +# from. A value of 1 chooses the first host returned by the weighing functions. +# This value must be at least 1. Any value less than 1 will be ignored, and 1 +# will be used instead (integer value) +#scheduler_host_subset_size=1 + +# Force the filter to consider only keys matching the given namespace. (string +# value) +#aggregate_image_properties_isolation_namespace= + +# The separator used between the namespace and keys (string value) +#aggregate_image_properties_isolation_separator=. + +# Images to run on isolated host (list value) +#isolated_images = + +# Host reserved for specific images (list value) +#isolated_hosts = + +# Whether to force isolated hosts to run only isolated images (boolean value) +#restrict_isolated_hosts_to_isolated_images=true + +# Filter classes available to the scheduler which may be specified more than +# once. An entry of "nova.scheduler.filters.all_filters" maps to all filters +# included with nova. (multi valued) +#scheduler_available_filters=nova.scheduler.filters.all_filters + +# Which filter class names to use for filtering hosts when not specified in the +# request. (list value) +#scheduler_default_filters=RetryFilter,AvailabilityZoneFilter,RamFilter,DiskFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter +scheduler_default_filters=RetryFilter,AvailabilityZoneFilter,RamFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,CoreFilter + +# Which weight class names to use for weighing hosts (list value) +#scheduler_weight_classes=nova.scheduler.weights.all_weighers + +# Determines if the Scheduler tracks changes to instances to help with its +# filtering decisions. (boolean value) +#scheduler_tracks_instance_changes=true + +# Which filter class names to use for filtering baremetal hosts when not +# specified in the request. (list value) +#baremetal_scheduler_default_filters=RetryFilter,AvailabilityZoneFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ExactRamFilter,ExactDiskFilter,ExactCoreFilter + +# Flag to decide whether to use baremetal_scheduler_default_filters or not. +# (boolean value) +#scheduler_use_baremetal_filters=false + +# Default driver to use for the scheduler (string value) +#scheduler_driver=nova.scheduler.filter_scheduler.FilterScheduler +scheduler_driver=nova.scheduler.filter_scheduler.FilterScheduler + +# How often (in seconds) to run periodic tasks in the scheduler driver of your +# choice. Please note this is likely to interact with the value of +# service_down_time, but exactly how they interact will depend on your choice +# of scheduler driver. (integer value) +#scheduler_driver_task_period=60 + +# The topic scheduler nodes listen on (string value) +#scheduler_topic=scheduler + +# Maximum number of attempts to schedule an instance (integer value) +#scheduler_max_attempts=3 + +# Multiplier used for weighing host io ops. Negative numbers mean a preference +# to choose light workload compute hosts. (floating point value) +#io_ops_weight_multiplier=-1.0 + +# Multiplier used for weighing ram. Negative numbers mean to stack vs spread. +# (floating point value) +#ram_weight_multiplier=1.0 + +# +# From nova.virt +# + +# Config drive format. (string value) +# Allowed values: iso9660, vfat +#config_drive_format=iso9660 + +# Set to "always" to force injection to take place on a config drive. NOTE: The +# "always" will be deprecated in the Liberty release cycle. (string value) +# Allowed values: always, True, False +#force_config_drive= + +# Name and optionally path of the tool used for ISO image creation (string +# value) +#mkisofs_cmd=genisoimage + +# Name of the mkfs commands for ephemeral device. The format is = (multi valued) +#virt_mkfs = + +# Attempt to resize the filesystem by accessing the image over a block device. +# This is done by the host and may not be necessary if the image contains a +# recent version of cloud-init. Possible mechanisms require the nbd driver (for +# qcow and raw), or loop (for raw). (boolean value) +#resize_fs_using_block_device=false + +# Amount of time, in seconds, to wait for NBD device start up. (integer value) +#timeout_nbd=10 + +# Driver to use for controlling virtualization. Options include: +# libvirt.LibvirtDriver, xenapi.XenAPIDriver, fake.FakeDriver, +# ironic.IronicDriver, vmwareapi.VMwareVCDriver, hyperv.HyperVDriver (string +# value) +#compute_driver=libvirt.LibvirtDriver +compute_driver=libvirt.LibvirtDriver + +# The default format an ephemeral_volume will be formatted with on creation. +# (string value) +#default_ephemeral_format= + +# VM image preallocation mode: "none" => no storage provisioning is done up +# front, "space" => storage is fully allocated at instance start (string value) +# Allowed values: none, space +#preallocate_images=none + +# Whether to use cow images (boolean value) +#use_cow_images=true + +# Fail instance boot if vif plugging fails (boolean value) +#vif_plugging_is_fatal=true +vif_plugging_is_fatal=True + +# Number of seconds to wait for neutron vif plugging events to arrive before +# continuing or failing (see vif_plugging_is_fatal). If this is set to zero and +# vif_plugging_is_fatal is False, events should not be expected to arrive at +# all. (integer value) +#vif_plugging_timeout=300 +vif_plugging_timeout=300 + +# Firewall driver (defaults to hypervisor specific iptables driver) (string +# value) +#firewall_driver=nova.virt.libvirt.firewall.IptablesFirewallDriver +firewall_driver=nova.virt.firewall.NoopFirewallDriver + +# Whether to allow network traffic from same network (boolean value) +#allow_same_net_traffic=true + +# Defines which pcpus that instance vcpus can use. For example, "4-12,^8,15" +# (string value) +#vcpu_pin_set= + +# Number of seconds to wait between runs of the image cache manager. Set to -1 +# to disable. Setting this to 0 will run at the default rate. (integer value) +#image_cache_manager_interval=2400 + +# Where cached images are stored under $instances_path. This is NOT the full +# path - just a folder name. For per-compute-host cached images, set to +# _base_$my_ip (string value) +#image_cache_subdirectory_name=_base + +# Should unused base images be removed? (boolean value) +#remove_unused_base_images=true + +# Unused unresized base images younger than this will not be removed (integer +# value) +#remove_unused_original_minimum_age_seconds=86400 + +# Force backing images to raw format (boolean value) +#force_raw_images=true +force_raw_images=True + +# Template file for injected network (string value) +#injected_network_template=/usr/share/nova/interfaces.template + +# +# From oslo.log +# + +# Print debugging output (set logging level to DEBUG instead of default INFO +# level). (boolean value) +#debug=false +debug=True + +# If set to false, will disable INFO logging level, making WARNING the default. +# (boolean value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#verbose=true +verbose=True + +# The name of a logging configuration file. This file is appended to any +# existing logging configuration files. For details about logging configuration +# files, see the Python logging module documentation. (string value) +# Deprecated group;name - DEFAULT;log_config +#log_config_append= + +# DEPRECATED. A logging.Formatter log message format string which may use any +# of the available logging.LogRecord attributes. This option is deprecated. +# Please use logging_context_format_string and logging_default_format_string +# instead. (string value) +#log_format= + +# Format string for %%(asctime)s in log records. Default: %(default)s . (string +# value) +#log_date_format=%Y-%m-%d %H:%M:%S + +# (Optional) Name of log file to output to. If no default is set, logging will +# go to stdout. (string value) +# Deprecated group;name - DEFAULT;logfile +#log_file= + +# (Optional) The base directory used for relative --log-file paths. (string +# value) +# Deprecated group;name - DEFAULT;logdir +#log_dir=/var/log/nova +log_dir=/var/log/nova + +# Use syslog for logging. Existing syslog format is DEPRECATED and will be +# changed later to honor RFC5424. (boolean value) +#use_syslog=false +use_syslog=False + +# (Optional) Enables or disables syslog rfc5424 format for logging. If enabled, +# prefixes the MSG part of the syslog message with APP-NAME (RFC5424). The +# format without the APP-NAME is deprecated in Kilo, and will be removed in +# Mitaka, along with this option. (boolean value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#use_syslog_rfc_format=true + +# Syslog facility to receive log lines. (string value) +#syslog_log_facility=LOG_USER +syslog_log_facility=LOG_USER + +# Log output to standard error. (boolean value) +#use_stderr=False +use_stderr=True + +# Format string to use for log messages with context. (string value) +#logging_context_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s + +# Format string to use for log messages without context. (string value) +#logging_default_format_string=%(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s + +# Data to append to log format when level is DEBUG. (string value) +#logging_debug_format_suffix=%(funcName)s %(pathname)s:%(lineno)d + +# Prefix each line of exception output with this format. (string value) +#logging_exception_prefix=%(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s + +# List of logger=LEVEL pairs. (list value) +#default_log_levels=amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN + +# Enables or disables publication of error events. (boolean value) +#publish_errors=false + +# The format for an instance that is passed with the log message. (string +# value) +#instance_format="[instance: %(uuid)s] " + +# The format for an instance UUID that is passed with the log message. (string +# value) +#instance_uuid_format="[instance: %(uuid)s] " + +# Enables or disables fatal status of deprecations. (boolean value) +#fatal_deprecations=false + +# +# From oslo.messaging +# + +# Size of RPC connection pool. (integer value) +# Deprecated group;name - DEFAULT;rpc_conn_pool_size +#rpc_conn_pool_size=30 + +# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP. +# The "host" option should point or resolve to this address. (string value) +#rpc_zmq_bind_address=* + +# MatchMaker driver. (string value) +#rpc_zmq_matchmaker=local + +# ZeroMQ receiver listening port. (integer value) +#rpc_zmq_port=9501 + +# Number of ZeroMQ contexts, defaults to 1. (integer value) +#rpc_zmq_contexts=1 + +# Maximum number of ingress messages to locally buffer per topic. Default is +# unlimited. (integer value) +#rpc_zmq_topic_backlog= + +# Directory for holding IPC sockets. (string value) +#rpc_zmq_ipc_dir=/var/run/openstack + +# Name of this node. Must be a valid hostname, FQDN, or IP address. Must match +# "host" option, if running Nova. (string value) +#rpc_zmq_host=localhost + +# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq. +# (integer value) +#rpc_cast_timeout=30 + +# Heartbeat frequency. (integer value) +#matchmaker_heartbeat_freq=300 + +# Heartbeat time-to-live. (integer value) +#matchmaker_heartbeat_ttl=600 + +# Size of executor thread pool. (integer value) +# Deprecated group;name - DEFAULT;rpc_thread_pool_size +#executor_thread_pool_size=64 + +# The Drivers(s) to handle sending notifications. Possible values are +# messaging, messagingv2, routing, log, test, noop (multi valued) +#notification_driver = +notification_driver =nova.openstack.common.notifier.rabbit_notifier,ceilometer.compute.nova_notifier + +# AMQP topic used for OpenStack notifications. (list value) +# Deprecated group;name - [rpc_notifier2]/topics +#notification_topics=notifications +notification_topics=notifications + +# Seconds to wait for a response from a call. (integer value) +#rpc_response_timeout=60 + +# A URL representing the messaging driver to use and its full configuration. If +# not set, we fall back to the rpc_backend option and driver specific +# configuration. (string value) +#transport_url= + +# The messaging driver to use, defaults to rabbit. Other drivers include qpid +# and zmq. (string value) +#rpc_backend=rabbit +rpc_backend=rabbit + +# The default exchange under which topics are scoped. May be overridden by an +# exchange name specified in the transport_url option. (string value) +#control_exchange=openstack + +# +# From oslo.service.periodic_task +# + +# Some periodic tasks can be run in a separate process. Should we run them +# here? (boolean value) +#run_external_periodic_tasks=true + +# +# From oslo.service.service +# + +# Enable eventlet backdoor. Acceptable values are 0, , and +# :, where 0 results in listening on a random tcp port number; +# results in listening on the specified port number (and not enabling +# backdoor if that port is in use); and : results in listening on +# the smallest unused port number within the specified range of port numbers. +# The chosen port is displayed in the service's log file. (string value) +#backdoor_port= + +# Enables or disables logging values of all registered options when starting a +# service (at DEBUG level). (boolean value) +#log_options=true +sql_connection=mysql+pymysql://nova:qum5net@VARINET4ADDR/nova +image_service=nova.image.glance.GlanceImageService +lock_path=/var/lib/nova/tmp +osapi_volume_listen=0.0.0.0 +vncserver_proxyclient_address=VARHOSTNAME.ceph.redhat.com +vnc_keymap=en-us +vnc_enabled=True +vncserver_listen=0.0.0.0 +novncproxy_base_url=http://VARINET4ADDR:6080/vnc_auto.html + +rbd_user = cinder +rbd_secret_uuid = RBDSECRET + +[api_database] + +# +# From nova +# + +# The SQLAlchemy connection string to use to connect to the Nova API database. +# (string value) +#connection=mysql://nova:nova@localhost/nova + +# If True, SQLite uses synchronous mode. (boolean value) +#sqlite_synchronous=true + +# The SQLAlchemy connection string to use to connect to the slave database. +# (string value) +#slave_connection= + +# The SQL mode to be used for MySQL sessions. This option, including the +# default, overrides any server-set SQL mode. To use whatever SQL mode is set +# by the server configuration, set this to no value. Example: mysql_sql_mode= +# (string value) +#mysql_sql_mode=TRADITIONAL + +# Timeout before idle SQL connections are reaped. (integer value) +#idle_timeout=3600 + +# Maximum number of SQL connections to keep open in a pool. (integer value) +#max_pool_size= + +# Maximum number of database connection retries during startup. Set to -1 to +# specify an infinite retry count. (integer value) +#max_retries=-1 + +# Interval between retries of opening a SQL connection. (integer value) +#retry_interval=10 + +# If set, use this value for max_overflow with SQLAlchemy. (integer value) +#max_overflow= + +# Verbosity of SQL debugging information: 0=None, 100=Everything. (integer +# value) +#connection_debug=0 + +# Add Python stack traces to SQL as comment strings. (boolean value) +#connection_trace=false + +# If set, use this value for pool_timeout with SQLAlchemy. (integer value) +#pool_timeout= + + +[barbican] + +# +# From nova +# + +# Info to match when looking for barbican in the service catalog. Format is: +# separated values of the form: :: +# (string value) +#catalog_info=key-manager:barbican:public + +# Override service catalog lookup with template for barbican endpoint e.g. +# http://localhost:9311/v1/%(project_id)s (string value) +#endpoint_template= + +# Region name of this node (string value) +#os_region_name= + + +[cells] + +# +# From nova.cells +# + +# Enable cell functionality (boolean value) +#enable=false + +# The topic cells nodes listen on (string value) +#topic=cells + +# Manager for cells (string value) +#manager=nova.cells.manager.CellsManager + +# Name of this cell (string value) +#name=nova + +# Key/Multi-value list with the capabilities of the cell (list value) +#capabilities=hypervisor=xenserver;kvm,os=linux;windows + +# Seconds to wait for response from a call to a cell. (integer value) +#call_timeout=60 + +# Percentage of cell capacity to hold in reserve. Affects both memory and disk +# utilization (floating point value) +#reserve_percent=10.0 + +# Type of cell (string value) +# Allowed values: api, compute +#cell_type=compute + +# Number of seconds after which a lack of capability and capacity updates +# signals the child cell is to be treated as a mute. (integer value) +#mute_child_interval=300 + +# Seconds between bandwidth updates for cells. (integer value) +#bandwidth_update_interval=600 + +# Cells communication driver to use (string value) +#driver=nova.cells.rpc_driver.CellsRPCDriver + +# Number of seconds after an instance was updated or deleted to continue to +# update cells (integer value) +#instance_updated_at_threshold=3600 + +# Number of instances to update per periodic task run (integer value) +#instance_update_num_instances=1 + +# Maximum number of hops for cells routing. (integer value) +#max_hop_count=10 + +# Cells scheduler to use (string value) +#scheduler=nova.cells.scheduler.CellsScheduler + +# Base queue name to use when communicating between cells. Various topics by +# message type will be appended to this. (string value) +#rpc_driver_queue_base=cells.intercell + +# Filter classes the cells scheduler should use. An entry of +# "nova.cells.filters.all_filters" maps to all cells filters included with +# nova. (list value) +#scheduler_filter_classes=nova.cells.filters.all_filters + +# Weigher classes the cells scheduler should use. An entry of +# "nova.cells.weights.all_weighers" maps to all cell weighers included with +# nova. (list value) +#scheduler_weight_classes=nova.cells.weights.all_weighers + +# How many retries when no cells are available. (integer value) +#scheduler_retries=10 + +# How often to retry in seconds when no cells are available. (integer value) +#scheduler_retry_delay=2 + +# Interval, in seconds, for getting fresh cell information from the database. +# (integer value) +#db_check_interval=60 + +# Configuration file from which to read cells configuration. If given, +# overrides reading cells from the database. (string value) +#cells_config= + +# Multiplier used to weigh mute children. (The value should be negative.) +# (floating point value) +#mute_weight_multiplier=-10000.0 + +# Multiplier used for weighing ram. Negative numbers mean to stack vs spread. +# (floating point value) +#ram_weight_multiplier=10.0 + +# Multiplier used to weigh offset weigher. (floating point value) +#offset_weight_multiplier=1.0 + + +[cinder] + +# +# From nova +# + +# Info to match when looking for cinder in the service catalog. Format is: +# separated values of the form: :: +# (string value) +#catalog_info=volumev2:cinderv2:publicURL +catalog_info=volumev2:cinderv2:publicURL + +# Override service catalog lookup with template for cinder endpoint e.g. +# http://localhost:8776/v1/%(project_id)s (string value) +#endpoint_template= + +# Region name of this node (string value) +#os_region_name= + +# Number of cinderclient retries on failed http calls (integer value) +#http_retries=3 + +# Allow attach between instance and volume in different availability zones. +# (boolean value) +#cross_az_attach=true + + +[conductor] + +# +# From nova +# + +# Perform nova-conductor operations locally (boolean value) +#use_local=false +use_local=False + +# The topic on which conductor nodes listen (string value) +#topic=conductor + +# Full class name for the Manager for conductor (string value) +#manager=nova.conductor.manager.ConductorManager + +# Number of workers for OpenStack Conductor service. The default will be the +# number of CPUs available. (integer value) +#workers= + + +[cors] + +# +# From oslo.middleware +# + +# Indicate whether this resource may be shared with the domain received in the +# requests "origin" header. (string value) +#allowed_origin= + +# Indicate that the actual request can include user credentials (boolean value) +#allow_credentials=true + +# Indicate which headers are safe to expose to the API. Defaults to HTTP Simple +# Headers. (list value) +#expose_headers=Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma + +# Maximum cache age of CORS preflight requests. (integer value) +#max_age=3600 + +# Indicate which methods can be used during the actual request. (list value) +#allow_methods=GET,POST,PUT,DELETE,OPTIONS + +# Indicate which header field names may be used during the actual request. +# (list value) +#allow_headers=Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma + + +[cors.subdomain] + +# +# From oslo.middleware +# + +# Indicate whether this resource may be shared with the domain received in the +# requests "origin" header. (string value) +#allowed_origin= + +# Indicate that the actual request can include user credentials (boolean value) +#allow_credentials=true + +# Indicate which headers are safe to expose to the API. Defaults to HTTP Simple +# Headers. (list value) +#expose_headers=Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma + +# Maximum cache age of CORS preflight requests. (integer value) +#max_age=3600 + +# Indicate which methods can be used during the actual request. (list value) +#allow_methods=GET,POST,PUT,DELETE,OPTIONS + +# Indicate which header field names may be used during the actual request. +# (list value) +#allow_headers=Content-Type,Cache-Control,Content-Language,Expires,Last-Modified,Pragma + + +[database] + +# +# From nova +# + +# The file name to use with SQLite. (string value) +# Deprecated group;name - DEFAULT;sqlite_db +#sqlite_db=oslo.sqlite + +# If True, SQLite uses synchronous mode. (boolean value) +# Deprecated group;name - DEFAULT;sqlite_synchronous +#sqlite_synchronous=true + +# The back end to use for the database. (string value) +# Deprecated group;name - DEFAULT;db_backend +#backend=sqlalchemy + +# The SQLAlchemy connection string to use to connect to the database. (string +# value) +# Deprecated group;name - DEFAULT;sql_connection +# Deprecated group;name - [DATABASE]/sql_connection +# Deprecated group;name - [sql]/connection +#connection= + +# The SQLAlchemy connection string to use to connect to the slave database. +# (string value) +#slave_connection= + +# The SQL mode to be used for MySQL sessions. This option, including the +# default, overrides any server-set SQL mode. To use whatever SQL mode is set +# by the server configuration, set this to no value. Example: mysql_sql_mode= +# (string value) +#mysql_sql_mode=TRADITIONAL + +# Timeout before idle SQL connections are reaped. (integer value) +# Deprecated group;name - DEFAULT;sql_idle_timeout +# Deprecated group;name - [DATABASE]/sql_idle_timeout +# Deprecated group;name - [sql]/idle_timeout +#idle_timeout=3600 + +# Minimum number of SQL connections to keep open in a pool. (integer value) +# Deprecated group;name - DEFAULT;sql_min_pool_size +# Deprecated group;name - [DATABASE]/sql_min_pool_size +#min_pool_size=1 + +# Maximum number of SQL connections to keep open in a pool. (integer value) +# Deprecated group;name - DEFAULT;sql_max_pool_size +# Deprecated group;name - [DATABASE]/sql_max_pool_size +#max_pool_size= + +# Maximum number of database connection retries during startup. Set to -1 to +# specify an infinite retry count. (integer value) +# Deprecated group;name - DEFAULT;sql_max_retries +# Deprecated group;name - [DATABASE]/sql_max_retries +#max_retries=10 + +# Interval between retries of opening a SQL connection. (integer value) +# Deprecated group;name - DEFAULT;sql_retry_interval +# Deprecated group;name - [DATABASE]/reconnect_interval +#retry_interval=10 + +# If set, use this value for max_overflow with SQLAlchemy. (integer value) +# Deprecated group;name - DEFAULT;sql_max_overflow +# Deprecated group;name - [DATABASE]/sqlalchemy_max_overflow +#max_overflow= + +# Verbosity of SQL debugging information: 0=None, 100=Everything. (integer +# value) +# Deprecated group;name - DEFAULT;sql_connection_debug +#connection_debug=0 + +# Add Python stack traces to SQL as comment strings. (boolean value) +# Deprecated group;name - DEFAULT;sql_connection_trace +#connection_trace=false + +# If set, use this value for pool_timeout with SQLAlchemy. (integer value) +# Deprecated group;name - [DATABASE]/sqlalchemy_pool_timeout +#pool_timeout= + +# Enable the experimental use of database reconnect on connection lost. +# (boolean value) +#use_db_reconnect=false + +# Seconds between retries of a database transaction. (integer value) +#db_retry_interval=1 + +# If True, increases the interval between retries of a database operation up to +# db_max_retry_interval. (boolean value) +#db_inc_retry_interval=true + +# If db_inc_retry_interval is set, the maximum seconds between retries of a +# database operation. (integer value) +#db_max_retry_interval=10 + +# Maximum retries in case of connection error or deadlock error before error is +# raised. Set to -1 to specify an infinite retry count. (integer value) +#db_max_retries=20 + +# +# From oslo.db +# + +# The file name to use with SQLite. (string value) +# Deprecated group;name - DEFAULT;sqlite_db +#sqlite_db=oslo.sqlite + +# If True, SQLite uses synchronous mode. (boolean value) +# Deprecated group;name - DEFAULT;sqlite_synchronous +#sqlite_synchronous=true + +# The back end to use for the database. (string value) +# Deprecated group;name - DEFAULT;db_backend +#backend=sqlalchemy + +# The SQLAlchemy connection string to use to connect to the database. (string +# value) +# Deprecated group;name - DEFAULT;sql_connection +# Deprecated group;name - [DATABASE]/sql_connection +# Deprecated group;name - [sql]/connection +#connection= + +# The SQLAlchemy connection string to use to connect to the slave database. +# (string value) +#slave_connection= + +# The SQL mode to be used for MySQL sessions. This option, including the +# default, overrides any server-set SQL mode. To use whatever SQL mode is set +# by the server configuration, set this to no value. Example: mysql_sql_mode= +# (string value) +#mysql_sql_mode=TRADITIONAL + +# Timeout before idle SQL connections are reaped. (integer value) +# Deprecated group;name - DEFAULT;sql_idle_timeout +# Deprecated group;name - [DATABASE]/sql_idle_timeout +# Deprecated group;name - [sql]/idle_timeout +#idle_timeout=3600 + +# Minimum number of SQL connections to keep open in a pool. (integer value) +# Deprecated group;name - DEFAULT;sql_min_pool_size +# Deprecated group;name - [DATABASE]/sql_min_pool_size +#min_pool_size=1 + +# Maximum number of SQL connections to keep open in a pool. (integer value) +# Deprecated group;name - DEFAULT;sql_max_pool_size +# Deprecated group;name - [DATABASE]/sql_max_pool_size +#max_pool_size= + +# Maximum number of database connection retries during startup. Set to -1 to +# specify an infinite retry count. (integer value) +# Deprecated group;name - DEFAULT;sql_max_retries +# Deprecated group;name - [DATABASE]/sql_max_retries +#max_retries=10 + +# Interval between retries of opening a SQL connection. (integer value) +# Deprecated group;name - DEFAULT;sql_retry_interval +# Deprecated group;name - [DATABASE]/reconnect_interval +#retry_interval=10 + +# If set, use this value for max_overflow with SQLAlchemy. (integer value) +# Deprecated group;name - DEFAULT;sql_max_overflow +# Deprecated group;name - [DATABASE]/sqlalchemy_max_overflow +#max_overflow= + +# Verbosity of SQL debugging information: 0=None, 100=Everything. (integer +# value) +# Deprecated group;name - DEFAULT;sql_connection_debug +#connection_debug=0 + +# Add Python stack traces to SQL as comment strings. (boolean value) +# Deprecated group;name - DEFAULT;sql_connection_trace +#connection_trace=false + +# If set, use this value for pool_timeout with SQLAlchemy. (integer value) +# Deprecated group;name - [DATABASE]/sqlalchemy_pool_timeout +#pool_timeout= + +# Enable the experimental use of database reconnect on connection lost. +# (boolean value) +#use_db_reconnect=false + +# Seconds between retries of a database transaction. (integer value) +#db_retry_interval=1 + +# If True, increases the interval between retries of a database operation up to +# db_max_retry_interval. (boolean value) +#db_inc_retry_interval=true + +# If db_inc_retry_interval is set, the maximum seconds between retries of a +# database operation. (integer value) +#db_max_retry_interval=10 + +# Maximum retries in case of connection error or deadlock error before error is +# raised. Set to -1 to specify an infinite retry count. (integer value) +#db_max_retries=20 + + +[ephemeral_storage_encryption] + +# +# From nova.compute +# + +# Whether to encrypt ephemeral storage (boolean value) +#enabled=false + +# The cipher and mode to be used to encrypt ephemeral storage. Which ciphers +# are available ciphers depends on kernel support. See /proc/crypto for the +# list of available options. (string value) +#cipher=aes-xts-plain64 + +# The bit length of the encryption key to be used to encrypt ephemeral storage +# (in XTS mode only half of the bits are used for encryption key) (integer +# value) +#key_size=512 + + +[glance] + +# +# From nova +# + +# Default glance hostname or IP address (string value) +#host=$my_ip + +# Default glance port (integer value) +# Minimum value: 1 +# Maximum value: 65535 +#port=9292 + +# Default protocol to use when connecting to glance. Set to https for SSL. +# (string value) +# Allowed values: http, https +#protocol=http + +# A list of the glance api servers available to nova. Prefix with https:// for +# ssl-based glance api servers. ([hostname|ip]:port) (list value) +#api_servers= +api_servers=VARINET4ADDR:9292 + +# Allow to perform insecure SSL (https) requests to glance (boolean value) +#api_insecure=false + +# Number of retries when uploading / downloading an image to / from glance. +# (integer value) +#num_retries=0 + +# A list of url scheme that can be downloaded directly via the direct_url. +# Currently supported schemes: [file]. (list value) +#allowed_direct_url_schemes = + + +[guestfs] + +# +# From nova.virt +# + +# Enable guestfs debug (boolean value) +#debug=false + + +[hyperv] + +# +# From nova.virt +# + +# The name of a Windows share name mapped to the "instances_path" dir and used +# by the resize feature to copy files to the target host. If left blank, an +# administrative share will be used, looking for the same "instances_path" used +# locally (string value) +#instances_path_share = + +# Force V1 WMI utility classes (boolean value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#force_hyperv_utils_v1=false + +# Force V1 volume utility class (boolean value) +#force_volumeutils_v1=false + +# External virtual switch Name, if not provided, the first external virtual +# switch is used (string value) +#vswitch_name= + +# Required for live migration among hosts with different CPU features (boolean +# value) +#limit_cpu_features=false + +# Sets the admin password in the config drive image (boolean value) +#config_drive_inject_password=false + +# Path of qemu-img command which is used to convert between different image +# types (string value) +#qemu_img_cmd=qemu-img.exe + +# Attaches the Config Drive image as a cdrom drive instead of a disk drive +# (boolean value) +#config_drive_cdrom=false + +# Enables metrics collections for an instance by using Hyper-V's metric APIs. +# Collected data can by retrieved by other apps and services, e.g.: Ceilometer. +# Requires Hyper-V / Windows Server 2012 and above (boolean value) +#enable_instance_metrics_collection=false + +# Enables dynamic memory allocation (ballooning) when set to a value greater +# than 1. The value expresses the ratio between the total RAM assigned to an +# instance and its startup RAM amount. For example a ratio of 2.0 for an +# instance with 1024MB of RAM implies 512MB of RAM allocated at startup +# (floating point value) +#dynamic_memory_ratio=1.0 + +# Number of seconds to wait for instance to shut down after soft reboot request +# is made. We fall back to hard reboot if instance does not shutdown within +# this window. (integer value) +#wait_soft_reboot_seconds=60 + +# The number of times to retry to attach a volume (integer value) +#volume_attach_retry_count=10 + +# Interval between volume attachment attempts, in seconds (integer value) +#volume_attach_retry_interval=5 + +# The number of times to retry checking for a disk mounted via iSCSI. (integer +# value) +#mounted_disk_query_retry_count=10 + +# Interval between checks for a mounted iSCSI disk, in seconds. (integer value) +#mounted_disk_query_retry_interval=5 + + +[image_file_url] + +# +# From nova +# + +# List of file systems that are configured in this file in the +# image_file_url: sections (list value) +#filesystems = + + +[ironic] + +# +# From nova.virt +# + +# Version of Ironic API service endpoint. (integer value) +#api_version=1 + +# URL for Ironic API endpoint. (string value) +#api_endpoint= + +# Ironic keystone admin name (string value) +#admin_username= + +# Ironic keystone admin password. (string value) +#admin_password= + +# Ironic keystone auth token.DEPRECATED: use admin_username, admin_password, +# and admin_tenant_name instead (string value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#admin_auth_token= + +# Keystone public API endpoint. (string value) +#admin_url= + +# Log level override for ironicclient. Set this in order to override the global +# "default_log_levels", "verbose", and "debug" settings. DEPRECATED: use +# standard logging configuration. (string value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#client_log_level= + +# Ironic keystone tenant name. (string value) +#admin_tenant_name= + +# How many retries when a request does conflict. If <= 0, only try once, no +# retries. (integer value) +#api_max_retries=60 + +# How often to retry in seconds when a request does conflict (integer value) +#api_retry_interval=2 + + +[keymgr] + +# +# From nova +# + +# Fixed key returned by key manager, specified in hex (string value) +#fixed_key= + +# The full class name of the key manager API class (string value) +#api_class=nova.keymgr.conf_key_mgr.ConfKeyManager + + +[keystone_authtoken] + +# +# From keystonemiddleware.auth_token +# + +# Complete public Identity API endpoint. (string value) +#auth_uri= +auth_uri=http://VARINET4ADDR:5000/v2.0 + +# API version of the admin Identity API endpoint. (string value) +#auth_version= + +# Do not handle authorization requests within the middleware, but delegate the +# authorization decision to downstream WSGI components. (boolean value) +#delay_auth_decision=false + +# Request timeout value for communicating with Identity API server. (integer +# value) +#http_connect_timeout= + +# How many times are we trying to reconnect when communicating with Identity +# API Server. (integer value) +#http_request_max_retries=3 + +# Env key for the swift cache. (string value) +#cache= + +# Required if identity server requires client certificate (string value) +#certfile= + +# Required if identity server requires client certificate (string value) +#keyfile= + +# A PEM encoded Certificate Authority to use when verifying HTTPs connections. +# Defaults to system CAs. (string value) +#cafile= + +# Verify HTTPS connections. (boolean value) +#insecure=false + +# The region in which the identity server can be found. (string value) +#region_name= + +# Directory used to cache files related to PKI tokens. (string value) +#signing_dir= + +# Optionally specify a list of memcached server(s) to use for caching. If left +# undefined, tokens will instead be cached in-process. (list value) +# Deprecated group;name - DEFAULT;memcache_servers +#memcached_servers= + +# In order to prevent excessive effort spent validating tokens, the middleware +# caches previously-seen tokens for a configurable duration (in seconds). Set +# to -1 to disable caching completely. (integer value) +#token_cache_time=300 + +# Determines the frequency at which the list of revoked tokens is retrieved +# from the Identity service (in seconds). A high number of revocation events +# combined with a low cache duration may significantly reduce performance. +# (integer value) +#revocation_cache_time=10 + +# (Optional) If defined, indicate whether token data should be authenticated or +# authenticated and encrypted. Acceptable values are MAC or ENCRYPT. If MAC, +# token data is authenticated (with HMAC) in the cache. If ENCRYPT, token data +# is encrypted and authenticated in the cache. If the value is not one of these +# options or empty, auth_token will raise an exception on initialization. +# (string value) +#memcache_security_strategy= + +# (Optional, mandatory if memcache_security_strategy is defined) This string is +# used for key derivation. (string value) +#memcache_secret_key= + +# (Optional) Number of seconds memcached server is considered dead before it is +# tried again. (integer value) +#memcache_pool_dead_retry=300 + +# (Optional) Maximum total number of open connections to every memcached +# server. (integer value) +#memcache_pool_maxsize=10 + +# (Optional) Socket timeout in seconds for communicating with a memcached +# server. (integer value) +#memcache_pool_socket_timeout=3 + +# (Optional) Number of seconds a connection to memcached is held unused in the +# pool before it is closed. (integer value) +#memcache_pool_unused_timeout=60 + +# (Optional) Number of seconds that an operation will wait to get a memcached +# client connection from the pool. (integer value) +#memcache_pool_conn_get_timeout=10 + +# (Optional) Use the advanced (eventlet safe) memcached client pool. The +# advanced pool will only work under python 2.x. (boolean value) +#memcache_use_advanced_pool=false + +# (Optional) Indicate whether to set the X-Service-Catalog header. If False, +# middleware will not ask for service catalog on token validation and will not +# set the X-Service-Catalog header. (boolean value) +#include_service_catalog=true + +# Used to control the use and type of token binding. Can be set to: "disabled" +# to not check token binding. "permissive" (default) to validate binding +# information if the bind type is of a form known to the server and ignore it +# if not. "strict" like "permissive" but if the bind type is unknown the token +# will be rejected. "required" any form of token binding is needed to be +# allowed. Finally the name of a binding method that must be present in tokens. +# (string value) +#enforce_token_bind=permissive + +# If true, the revocation list will be checked for cached tokens. This requires +# that PKI tokens are configured on the identity server. (boolean value) +#check_revocations_for_cached=false + +# Hash algorithms to use for hashing PKI tokens. This may be a single algorithm +# or multiple. The algorithms are those supported by Python standard +# hashlib.new(). The hashes will be tried in the order given, so put the +# preferred one first for performance. The result of the first hash will be +# stored in the cache. This will typically be set to multiple values only while +# migrating from a less secure algorithm to a more secure one. Once all the old +# tokens are expired this option should be set to a single value for better +# performance. (list value) +#hash_algorithms=md5 + +# Prefix to prepend at the beginning of the path. Deprecated, use identity_uri. +# (string value) +#auth_admin_prefix = + +# Host providing the admin Identity API endpoint. Deprecated, use identity_uri. +# (string value) +#auth_host=127.0.0.1 + +# Port of the admin Identity API endpoint. Deprecated, use identity_uri. +# (integer value) +#auth_port=35357 + +# Protocol of the admin Identity API endpoint (http or https). Deprecated, use +# identity_uri. (string value) +#auth_protocol=http + +# Complete admin Identity API endpoint. This should specify the unversioned +# root endpoint e.g. https://localhost:35357/ (string value) +#identity_uri= +identity_uri=http://VARINET4ADDR:35357 + +# This option is deprecated and may be removed in a future release. Single +# shared secret with the Keystone configuration used for bootstrapping a +# Keystone installation, or otherwise bypassing the normal authentication +# process. This option should not be used, use `admin_user` and +# `admin_password` instead. (string value) +#admin_token= + +# Service username. (string value) +#admin_user= +admin_user=nova + +# Service user password. (string value) +#admin_password= +admin_password=qum5net + +# Service tenant name. (string value) +#admin_tenant_name=admin +admin_tenant_name=services + + +[libvirt] + +# +# From nova.virt +# + +# Rescue ami image. This will not be used if an image id is provided by the +# user. (string value) +#rescue_image_id= + +# Rescue aki image (string value) +#rescue_kernel_id= + +# Rescue ari image (string value) +#rescue_ramdisk_id= + +# Libvirt domain type (string value) +# Allowed values: kvm, lxc, qemu, uml, xen, parallels +#virt_type=kvm +virt_type=kvm + +# Override the default libvirt URI (which is dependent on virt_type) (string +# value) +#connection_uri = + +# Inject the admin password at boot time, without an agent. (boolean value) +#inject_password=false +inject_password=False + +# Inject the ssh public key at boot time (boolean value) +#inject_key=false +inject_key=False + +# The partition to inject to : -2 => disable, -1 => inspect (libguestfs only), +# 0 => not partitioned, >0 => partition number (integer value) +#inject_partition=-2 +inject_partition=-2 + +# Sync virtual and real mouse cursors in Windows VMs (boolean value) +#use_usb_tablet=true + +# Migration target URI (any included "%s" is replaced with the migration target +# hostname) (string value) +#live_migration_uri=qemu+tcp://%s/system +live_migration_uri=qemu+tcp://nova@%s/system + +# Migration flags to be set for live migration (string value) +#live_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED +live_migration_flag="VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_LIVE, VIR_MIGRATE_PERSIST_DEST, VIR_MIGRATE_TUNNELLED" + +# Migration flags to be set for block migration (string value) +#block_migration_flag=VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, VIR_MIGRATE_LIVE, VIR_MIGRATE_TUNNELLED, VIR_MIGRATE_NON_SHARED_INC + +# Maximum bandwidth(in MiB/s) to be used during migration. If set to 0, will +# choose a suitable default. Some hypervisors do not support this feature and +# will return an error if bandwidth is not 0. Please refer to the libvirt +# documentation for further details (integer value) +#live_migration_bandwidth=0 + +# Maximum permitted downtime, in milliseconds, for live migration switchover. +# Will be rounded up to a minimum of 100ms. Use a large value if guest liveness +# is unimportant. (integer value) +#live_migration_downtime=500 + +# Number of incremental steps to reach max downtime value. Will be rounded up +# to a minimum of 3 steps (integer value) +#live_migration_downtime_steps=10 + +# Time to wait, in seconds, between each step increase of the migration +# downtime. Minimum delay is 10 seconds. Value is per GiB of guest RAM + disk +# to be transferred, with lower bound of a minimum of 2 GiB per device (integer +# value) +#live_migration_downtime_delay=75 + +# Time to wait, in seconds, for migration to successfully complete transferring +# data before aborting the operation. Value is per GiB of guest RAM + disk to +# be transferred, with lower bound of a minimum of 2 GiB. Should usually be +# larger than downtime delay * downtime steps. Set to 0 to disable timeouts. +# (integer value) +#live_migration_completion_timeout=800 + +# Time to wait, in seconds, for migration to make forward progress in +# transferring data before aborting the operation. Set to 0 to disable +# timeouts. (integer value) +#live_migration_progress_timeout=150 + +# Snapshot image format. Defaults to same as source image (string value) +# Allowed values: raw, qcow2, vmdk, vdi +#snapshot_image_format= + +# Override the default disk prefix for the devices attached to a server, which +# is dependent on virt_type. (valid options are: sd, xvd, uvd, vd) (string +# value) +#disk_prefix= + +# Number of seconds to wait for instance to shut down after soft reboot request +# is made. We fall back to hard reboot if instance does not shutdown within +# this window. (integer value) +#wait_soft_reboot_seconds=120 + +# Set to "host-model" to clone the host CPU feature flags; to "host- +# passthrough" to use the host CPU model exactly; to "custom" to use a named +# CPU model; to "none" to not set any CPU model. If virt_type="kvm|qemu", it +# will default to "host-model", otherwise it will default to "none" (string +# value) +# Allowed values: host-model, host-passthrough, custom, none +#cpu_mode= +cpu_mode=host-model + +# Set to a named libvirt CPU model (see names listed in +# /usr/share/libvirt/cpu_map.xml). Only has effect if cpu_mode="custom" and +# virt_type="kvm|qemu" (string value) +#cpu_model= + +# Location where libvirt driver will store snapshots before uploading them to +# image service (string value) +#snapshots_directory=$instances_path/snapshots + +# Location where the Xen hvmloader is kept (string value) +#xen_hvmloader_path=/usr/lib/xen/boot/hvmloader + +# Specific cachemodes to use for different disk types e.g: +# file=directsync,block=none (list value) +#disk_cachemodes = +disk_cachemodes="network=writeback" + +# A path to a device that will be used as source of entropy on the host. +# Permitted options are: /dev/random or /dev/hwrng (string value) +#rng_dev_path= + +# For qemu or KVM guests, set this option to specify a default machine type per +# host architecture. You can find a list of supported machine types in your +# environment by checking the output of the "virsh capabilities"command. The +# format of the value for this config option is host-arch=machine-type. For +# example: x86_64=machinetype1,armv7l=machinetype2 (list value) +#hw_machine_type= + +# The data source used to the populate the host "serial" UUID exposed to guest +# in the virtual BIOS. (string value) +# Allowed values: none, os, hardware, auto +#sysinfo_serial=auto + +# A number of seconds to memory usage statistics period. Zero or negative value +# mean to disable memory usage statistics. (integer value) +#mem_stats_period_seconds=10 + +# List of uid targets and ranges.Syntax is guest-uid:host-uid:countMaximum of 5 +# allowed. (list value) +#uid_maps = + +# List of guid targets and ranges.Syntax is guest-gid:host-gid:countMaximum of +# 5 allowed. (list value) +#gid_maps = + +# In a realtime host context vCPUs for guest will run in that scheduling +# priority. Priority depends on the host kernel (usually 1-99) (integer value) +#realtime_scheduler_priority=1 + +# VM Images format. If default is specified, then use_cow_images flag is used +# instead of this one. (string value) +# Allowed values: raw, qcow2, lvm, rbd, ploop, default +#images_type=default +images_type=rbd + +# LVM Volume Group that is used for VM images, when you specify +# images_type=lvm. (string value) +#images_volume_group= + +# Create sparse logical volumes (with virtualsize) if this flag is set to True. +# (boolean value) +#sparse_logical_volumes=false + +# The RADOS pool in which rbd volumes are stored (string value) +#images_rbd_pool=rbd +images_rbd_pool=vms + +# Path to the ceph configuration file to use (string value) +#images_rbd_ceph_conf = +images_rbd_ceph_conf = /etc/ceph/ceph.conf +rbd_user = cinder +rbd_secret_uuid = RBDSECRET + +# Discard option for nova managed disks. Need Libvirt(1.0.6) Qemu1.5 (raw +# format) Qemu1.6(qcow2 format) (string value) +# Allowed values: ignore, unmap +#hw_disk_discard= +hw_disk_discard=unmap + +# Allows image information files to be stored in non-standard locations (string +# value) +#image_info_filename_pattern=$instances_path/$image_cache_subdirectory_name/%(image)s.info + +# DEPRECATED: Should unused kernel images be removed? This is only safe to +# enable if all compute nodes have been updated to support this option (running +# Grizzly or newer level compute). This will be the default behavior in the +# 13.0.0 release. (boolean value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#remove_unused_kernels=true + +# Unused resized base images younger than this will not be removed (integer +# value) +#remove_unused_resized_minimum_age_seconds=3600 + +# Write a checksum for files in _base to disk (boolean value) +#checksum_base_images=false + +# How frequently to checksum base images (integer value) +#checksum_interval_seconds=3600 + +# Method used to wipe old volumes. (string value) +# Allowed values: none, zero, shred +#volume_clear=zero + +# Size in MiB to wipe at start of old volumes. 0 => all (integer value) +#volume_clear_size=0 + +# Compress snapshot images when possible. This currently applies exclusively to +# qcow2 images (boolean value) +#snapshot_compression=false + +# Use virtio for bridge interfaces with KVM/QEMU (boolean value) +#use_virtio_for_bridges=true + +# Protocols listed here will be accessed directly from QEMU. Currently +# supported protocols: [gluster] (list value) +#qemu_allowed_storage_drivers = +vif_driver=nova.virt.libvirt.vif.LibvirtGenericVIFDriver + + +[matchmaker_redis] + +# +# From oslo.messaging +# + +# Host to locate redis. (string value) +#host=127.0.0.1 + +# Use this port to connect to redis host. (integer value) +#port=6379 + +# Password for Redis server (optional). (string value) +#password= + + +[matchmaker_ring] + +# +# From oslo.messaging +# + +# Matchmaker ring file (JSON). (string value) +# Deprecated group;name - DEFAULT;matchmaker_ringfile +#ringfile=/etc/oslo/matchmaker_ring.json + + +[metrics] + +# +# From nova.scheduler +# + +# Multiplier used for weighing metrics. (floating point value) +#weight_multiplier=1.0 + +# How the metrics are going to be weighed. This should be in the form of +# "=, =, ...", where is one of the +# metrics to be weighed, and is the corresponding ratio. So for +# "name1=1.0, name2=-1.0" The final weight would be name1.value * 1.0 + +# name2.value * -1.0. (list value) +#weight_setting = + +# How to treat the unavailable metrics. When a metric is NOT available for a +# host, if it is set to be True, it would raise an exception, so it is +# recommended to use the scheduler filter MetricFilter to filter out those +# hosts. If it is set to be False, the unavailable metric would be treated as a +# negative factor in weighing process, the returned value would be set by the +# option weight_of_unavailable. (boolean value) +#required=true + +# The final weight value to be returned if required is set to False and any one +# of the metrics set by weight_setting is unavailable. (floating point value) +#weight_of_unavailable=-10000.0 + + +[neutron] + +# +# From nova.api +# + +# Set flag to indicate Neutron will proxy metadata requests and resolve +# instance ids. (boolean value) +#service_metadata_proxy=false +service_metadata_proxy=True + +# Shared secret to validate proxies Neutron metadata requests (string value) +#metadata_proxy_shared_secret = +metadata_proxy_shared_secret =qum5net + +# +# From nova.network +# + +# URL for connecting to neutron (string value) +#url=http://127.0.0.1:9696 +url=http://VARINET4ADDR:9696 + +# User id for connecting to neutron in admin context. DEPRECATED: specify an +# auth_plugin and appropriate credentials instead. (string value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#admin_user_id= + +# Username for connecting to neutron in admin context DEPRECATED: specify an +# auth_plugin and appropriate credentials instead. (string value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#admin_username= +admin_username=neutron + +# Password for connecting to neutron in admin context DEPRECATED: specify an +# auth_plugin and appropriate credentials instead. (string value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#admin_password= +admin_password=qum5net + +# Tenant id for connecting to neutron in admin context DEPRECATED: specify an +# auth_plugin and appropriate credentials instead. (string value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#admin_tenant_id= + +# Tenant name for connecting to neutron in admin context. This option will be +# ignored if neutron_admin_tenant_id is set. Note that with Keystone V3 tenant +# names are only unique within a domain. DEPRECATED: specify an auth_plugin and +# appropriate credentials instead. (string value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#admin_tenant_name= +admin_tenant_name=services + +# Region name for connecting to neutron in admin context (string value) +#region_name= +region_name=RegionOne + +# Authorization URL for connecting to neutron in admin context. DEPRECATED: +# specify an auth_plugin and appropriate credentials instead. (string value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#admin_auth_url=http://localhost:5000/v2.0 +admin_auth_url=http://VARINET4ADDR:5000/v2.0 + +# Authorization strategy for connecting to neutron in admin context. +# DEPRECATED: specify an auth_plugin and appropriate credentials instead. If an +# auth_plugin is specified strategy will be ignored. (string value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#auth_strategy=keystone +auth_strategy=keystone + +# Name of Integration Bridge used by Open vSwitch (string value) +#ovs_bridge=br-int +ovs_bridge=br-int + +# Number of seconds before querying neutron for extensions (integer value) +#extension_sync_interval=600 +extension_sync_interval=600 + +# +# From nova.network.neutronv2 +# + +# Authentication URL (string value) +#auth_url= + +# Name of the plugin to load (string value) +#auth_plugin= + +# PEM encoded Certificate Authority to use when verifying HTTPs connections. +# (string value) +# Deprecated group;name - [neutron]/ca_certificates_file +#cafile= + +# PEM encoded client certificate cert file (string value) +#certfile= + +# Domain ID to scope to (string value) +#domain_id= + +# Domain name to scope to (string value) +#domain_name= + +# Verify HTTPS connections. (boolean value) +# Deprecated group;name - [neutron]/api_insecure +#insecure=false + +# PEM encoded client certificate key file (string value) +#keyfile= + +# User's password (string value) +#password= + +# Domain ID containing project (string value) +#project_domain_id= + +# Domain name containing project (string value) +#project_domain_name= + +# Project ID to scope to (string value) +#project_id= + +# Project name to scope to (string value) +#project_name= + +# Tenant ID to scope to (string value) +#tenant_id= + +# Tenant name to scope to (string value) +#tenant_name= + +# Timeout value for http requests (integer value) +# Deprecated group;name - [neutron]/url_timeout +#timeout= +timeout=30 + +# Trust ID (string value) +#trust_id= + +# User's domain id (string value) +#user_domain_id= + +# User's domain name (string value) +#user_domain_name= + +# User id (string value) +#user_id= + +# Username (string value) +# Deprecated group;name - DEFAULT;username +#username= +default_tenant_id=default + + +[osapi_v21] + +# +# From nova.api +# + +# DEPRECATED: Whether the V2.1 API is enabled or not. This option will be +# removed in the near future. (boolean value) +# Deprecated group;name - [osapi_v21]/enabled +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#enabled=true + +# DEPRECATED: A list of v2.1 API extensions to never load. Specify the +# extension aliases here. This option will be removed in the near future. After +# that point you have to run all of the API. (list value) +# Deprecated group;name - [osapi_v21]/extensions_blacklist +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#extensions_blacklist = + +# DEPRECATED: If the list is not empty then a v2.1 API extension will only be +# loaded if it exists in this list. Specify the extension aliases here. This +# option will be removed in the near future. After that point you have to run +# all of the API. (list value) +# Deprecated group;name - [osapi_v21]/extensions_whitelist +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#extensions_whitelist = + + +[oslo_concurrency] + +# +# From oslo.concurrency +# + +# Enables or disables inter-process locks. (boolean value) +# Deprecated group;name - DEFAULT;disable_process_locking +#disable_process_locking=false + +# Directory to use for lock files. For security, the specified directory +# should only be writable by the user running the processes that need locking. +# Defaults to environment variable OSLO_LOCK_PATH. If external locks are used, +# a lock path must be set. (string value) +# Deprecated group;name - DEFAULT;lock_path +#lock_path=/var/lib/nova/tmp + + +[oslo_messaging_amqp] + +# +# From oslo.messaging +# + +# address prefix used when sending to a specific server (string value) +# Deprecated group;name - [amqp1]/server_request_prefix +#server_request_prefix=exclusive + +# address prefix used when broadcasting to all servers (string value) +# Deprecated group;name - [amqp1]/broadcast_prefix +#broadcast_prefix=broadcast + +# address prefix when sending to any server in group (string value) +# Deprecated group;name - [amqp1]/group_request_prefix +#group_request_prefix=unicast + +# Name for the AMQP container (string value) +# Deprecated group;name - [amqp1]/container_name +#container_name= + +# Timeout for inactive connections (in seconds) (integer value) +# Deprecated group;name - [amqp1]/idle_timeout +#idle_timeout=0 + +# Debug: dump AMQP frames to stdout (boolean value) +# Deprecated group;name - [amqp1]/trace +#trace=false + +# CA certificate PEM file to verify server certificate (string value) +# Deprecated group;name - [amqp1]/ssl_ca_file +#ssl_ca_file = + +# Identifying certificate PEM file to present to clients (string value) +# Deprecated group;name - [amqp1]/ssl_cert_file +#ssl_cert_file = + +# Private key PEM file used to sign cert_file certificate (string value) +# Deprecated group;name - [amqp1]/ssl_key_file +#ssl_key_file = + +# Password for decrypting ssl_key_file (if encrypted) (string value) +# Deprecated group;name - [amqp1]/ssl_key_password +#ssl_key_password= + +# Accept clients using either SSL or plain TCP (boolean value) +# Deprecated group;name - [amqp1]/allow_insecure_clients +#allow_insecure_clients=false + + +[oslo_messaging_qpid] + +# +# From oslo.messaging +# + +# Use durable queues in AMQP. (boolean value) +# Deprecated group;name - DEFAULT;amqp_durable_queues +# Deprecated group;name - DEFAULT;rabbit_durable_queues +#amqp_durable_queues=false + +# Auto-delete queues in AMQP. (boolean value) +# Deprecated group;name - DEFAULT;amqp_auto_delete +#amqp_auto_delete=false + +# Send a single AMQP reply to call message. The current behaviour since oslo- +# incubator is to send two AMQP replies - first one with the payload, a second +# one to ensure the other have finish to send the payload. We are going to +# remove it in the N release, but we must keep backward compatible at the same +# time. This option provides such compatibility - it defaults to False in +# Liberty and can be turned on for early adopters with a new installations or +# for testing. Please note, that this option will be removed in the Mitaka +# release. (boolean value) +#send_single_reply=false + +# Qpid broker hostname. (string value) +# Deprecated group;name - DEFAULT;qpid_hostname +#qpid_hostname=localhost + +# Qpid broker port. (integer value) +# Deprecated group;name - DEFAULT;qpid_port +#qpid_port=5672 + +# Qpid HA cluster host:port pairs. (list value) +# Deprecated group;name - DEFAULT;qpid_hosts +#qpid_hosts=$qpid_hostname:$qpid_port + +# Username for Qpid connection. (string value) +# Deprecated group;name - DEFAULT;qpid_username +#qpid_username = + +# Password for Qpid connection. (string value) +# Deprecated group;name - DEFAULT;qpid_password +#qpid_password = + +# Space separated list of SASL mechanisms to use for auth. (string value) +# Deprecated group;name - DEFAULT;qpid_sasl_mechanisms +#qpid_sasl_mechanisms = + +# Seconds between connection keepalive heartbeats. (integer value) +# Deprecated group;name - DEFAULT;qpid_heartbeat +#qpid_heartbeat=60 + +# Transport to use, either 'tcp' or 'ssl'. (string value) +# Deprecated group;name - DEFAULT;qpid_protocol +#qpid_protocol=tcp + +# Whether to disable the Nagle algorithm. (boolean value) +# Deprecated group;name - DEFAULT;qpid_tcp_nodelay +#qpid_tcp_nodelay=true + +# The number of prefetched messages held by receiver. (integer value) +# Deprecated group;name - DEFAULT;qpid_receiver_capacity +#qpid_receiver_capacity=1 + +# The qpid topology version to use. Version 1 is what was originally used by +# impl_qpid. Version 2 includes some backwards-incompatible changes that allow +# broker federation to work. Users should update to version 2 when they are +# able to take everything down, as it requires a clean break. (integer value) +# Deprecated group;name - DEFAULT;qpid_topology_version +#qpid_topology_version=1 + + +[oslo_messaging_rabbit] + +# +# From oslo.messaging +# + +# Use durable queues in AMQP. (boolean value) +# Deprecated group;name - DEFAULT;amqp_durable_queues +# Deprecated group;name - DEFAULT;rabbit_durable_queues +#amqp_durable_queues=false +amqp_durable_queues=False + +# Auto-delete queues in AMQP. (boolean value) +# Deprecated group;name - DEFAULT;amqp_auto_delete +#amqp_auto_delete=false + +# Send a single AMQP reply to call message. The current behaviour since oslo- +# incubator is to send two AMQP replies - first one with the payload, a second +# one to ensure the other have finish to send the payload. We are going to +# remove it in the N release, but we must keep backward compatible at the same +# time. This option provides such compatibility - it defaults to False in +# Liberty and can be turned on for early adopters with a new installations or +# for testing. Please note, that this option will be removed in the Mitaka +# release. (boolean value) +#send_single_reply=false + +# SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and +# SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some +# distributions. (string value) +# Deprecated group;name - DEFAULT;kombu_ssl_version +#kombu_ssl_version = + +# SSL key file (valid only if SSL enabled). (string value) +# Deprecated group;name - DEFAULT;kombu_ssl_keyfile +#kombu_ssl_keyfile = + +# SSL cert file (valid only if SSL enabled). (string value) +# Deprecated group;name - DEFAULT;kombu_ssl_certfile +#kombu_ssl_certfile = + +# SSL certification authority file (valid only if SSL enabled). (string value) +# Deprecated group;name - DEFAULT;kombu_ssl_ca_certs +#kombu_ssl_ca_certs = + +# How long to wait before reconnecting in response to an AMQP consumer cancel +# notification. (floating point value) +# Deprecated group;name - DEFAULT;kombu_reconnect_delay +#kombu_reconnect_delay=1.0 +kombu_reconnect_delay=1.0 + +# How long to wait before considering a reconnect attempt to have failed. This +# value should not be longer than rpc_response_timeout. (integer value) +#kombu_reconnect_timeout=60 + +# Determines how the next RabbitMQ node is chosen in case the one we are +# currently connected to becomes unavailable. Takes effect only if more than +# one RabbitMQ node is provided in config. (string value) +# Allowed values: round-robin, shuffle +#kombu_failover_strategy=round-robin + +# The RabbitMQ broker address where a single node is used. (string value) +# Deprecated group;name - DEFAULT;rabbit_host +#rabbit_host=localhost +rabbit_host=VARINET4ADDR + +# The RabbitMQ broker port where a single node is used. (integer value) +# Deprecated group;name - DEFAULT;rabbit_port +#rabbit_port=5672 +rabbit_port=5672 + +# RabbitMQ HA cluster host:port pairs. (list value) +# Deprecated group;name - DEFAULT;rabbit_hosts +#rabbit_hosts=$rabbit_host:$rabbit_port +rabbit_hosts=VARINET4ADDR:5672 + +# Connect over SSL for RabbitMQ. (boolean value) +# Deprecated group;name - DEFAULT;rabbit_use_ssl +#rabbit_use_ssl=false +rabbit_use_ssl=False + +# The RabbitMQ userid. (string value) +# Deprecated group;name - DEFAULT;rabbit_userid +#rabbit_userid=guest +rabbit_userid=guest + +# The RabbitMQ password. (string value) +# Deprecated group;name - DEFAULT;rabbit_password +#rabbit_password=guest +rabbit_password=guest + +# The RabbitMQ login method. (string value) +# Deprecated group;name - DEFAULT;rabbit_login_method +#rabbit_login_method=AMQPLAIN + +# The RabbitMQ virtual host. (string value) +# Deprecated group;name - DEFAULT;rabbit_virtual_host +#rabbit_virtual_host=/ +rabbit_virtual_host=/ + +# How frequently to retry connecting with RabbitMQ. (integer value) +#rabbit_retry_interval=1 + +# How long to backoff for between retries when connecting to RabbitMQ. (integer +# value) +# Deprecated group;name - DEFAULT;rabbit_retry_backoff +#rabbit_retry_backoff=2 + +# Maximum number of RabbitMQ connection retries. Default is 0 (infinite retry +# count). (integer value) +# Deprecated group;name - DEFAULT;rabbit_max_retries +#rabbit_max_retries=0 + +# Use HA queues in RabbitMQ (x-ha-policy: all). If you change this option, you +# must wipe the RabbitMQ database. (boolean value) +# Deprecated group;name - DEFAULT;rabbit_ha_queues +#rabbit_ha_queues=false +rabbit_ha_queues=False + +# Specifies the number of messages to prefetch. Setting to zero allows +# unlimited messages. (integer value) +#rabbit_qos_prefetch_count=0 + +# Number of seconds after which the Rabbit broker is considered down if +# heartbeat's keep-alive fails (0 disable the heartbeat). EXPERIMENTAL (integer +# value) +#heartbeat_timeout_threshold=60 +heartbeat_timeout_threshold=0 + +# How often times during the heartbeat_timeout_threshold we check the +# heartbeat. (integer value) +#heartbeat_rate=2 +heartbeat_rate=2 + +# Deprecated, use rpc_backend=kombu+memory or rpc_backend=fake (boolean value) +# Deprecated group;name - DEFAULT;fake_rabbit +#fake_rabbit=false + + +[oslo_middleware] + +# +# From oslo.middleware +# + +# The maximum body size for each request, in bytes. (integer value) +# Deprecated group;name - DEFAULT;osapi_max_request_body_size +# Deprecated group;name - DEFAULT;max_request_body_size +#max_request_body_size=114688 + +# +# From oslo.middleware +# + +# The HTTP Header that will be used to determine what the original request +# protocol scheme was, even if it was hidden by an SSL termination proxy. +# (string value) +#secure_proxy_ssl_header=X-Forwarded-Proto + + +[rdp] + +# +# From nova +# + +# Location of RDP html5 console proxy, in the form "http://127.0.0.1:6083/" +# (string value) +#html5_proxy_base_url=http://127.0.0.1:6083/ + +# Enable RDP related features (boolean value) +#enabled=false + + +[serial_console] + +# +# From nova +# + +# Host on which to listen for incoming requests (string value) +#serialproxy_host=0.0.0.0 + +# Port on which to listen for incoming requests (integer value) +# Minimum value: 1 +# Maximum value: 65535 +#serialproxy_port=6083 + +# Enable serial console related features (boolean value) +#enabled=false + +# Range of TCP ports to use for serial ports on compute hosts (string value) +#port_range=10000:20000 + +# Location of serial console proxy. (string value) +#base_url=ws://127.0.0.1:6083/ + +# IP address on which instance serial console should listen (string value) +#listen=127.0.0.1 + +# The address to which proxy clients (like nova-serialproxy) should connect +# (string value) +#proxyclient_address=127.0.0.1 + + +[spice] + +# +# From nova +# + +# Host on which to listen for incoming requests (string value) +#html5proxy_host=0.0.0.0 + +# Port on which to listen for incoming requests (integer value) +# Minimum value: 1 +# Maximum value: 65535 +#html5proxy_port=6082 + +# Location of spice HTML5 console proxy, in the form +# "http://127.0.0.1:6082/spice_auto.html" (string value) +#html5proxy_base_url=http://127.0.0.1:6082/spice_auto.html + +# IP address on which instance spice server should listen (string value) +#server_listen=127.0.0.1 + +# The address to which proxy clients (like nova-spicehtml5proxy) should connect +# (string value) +#server_proxyclient_address=127.0.0.1 + +# Enable spice related features (boolean value) +#enabled=false + +# Enable spice guest agent support (boolean value) +#agent_enabled=true + +# Keymap for spice (string value) +#keymap=en-us + + +[ssl] + +# +# From oslo.service.sslutils +# + +# CA certificate file to use to verify connecting clients. (string value) +#ca_file= + +# Certificate file to use when starting the server securely. (string value) +#cert_file= + +# Private key file to use when starting the server securely. (string value) +#key_file= + + +[trusted_computing] + +# +# From nova.scheduler +# + +# Attestation server HTTP (string value) +#attestation_server= + +# Attestation server Cert file for Identity verification (string value) +#attestation_server_ca_file= + +# Attestation server port (string value) +#attestation_port=8443 + +# Attestation web API URL (string value) +#attestation_api_url=/OpenAttestationWebServices/V1.0 + +# Attestation authorization blob - must change (string value) +#attestation_auth_blob= + +# Attestation status cache valid period length (integer value) +#attestation_auth_timeout=60 + +# Disable SSL cert verification for Attestation service (boolean value) +#attestation_insecure_ssl=false + + +[upgrade_levels] + +# +# From nova +# + +# Set a version cap for messages sent to the base api in any service (string +# value) +#baseapi= + +# Set a version cap for messages sent to cert services (string value) +#cert= + +# Set a version cap for messages sent to conductor services (string value) +#conductor= + +# Set a version cap for messages sent to console services (string value) +#console= + +# Set a version cap for messages sent to consoleauth services (string value) +#consoleauth= + +# +# From nova.cells +# + +# Set a version cap for messages sent between cells services (string value) +#intercell= + +# Set a version cap for messages sent to local cells services (string value) +#cells= + +# +# From nova.compute +# + +# Set a version cap for messages sent to compute services. If you plan to do a +# live upgrade from an old version to a newer version, you should set this +# option to the old version before beginning the live upgrade procedure. Only +# upgrading to the next version is supported, so you cannot skip a release for +# the live upgrade procedure. (string value) +#compute= + +# +# From nova.network +# + +# Set a version cap for messages sent to network services (string value) +#network= + +# +# From nova.scheduler +# + +# Set a version cap for messages sent to scheduler services (string value) +#scheduler= + + +[vmware] + +# +# From nova.virt +# + +# The maximum number of ObjectContent data objects that should be returned in a +# single result. A positive value will cause the operation to suspend the +# retrieval when the count of objects reaches the specified maximum. The server +# may still limit the count to something less than the configured value. Any +# remaining objects may be retrieved with additional requests. (integer value) +#maximum_objects=100 + +# The PBM status. (boolean value) +#pbm_enabled=false + +# PBM service WSDL file location URL. e.g. +# file:///opt/SDK/spbm/wsdl/pbmService.wsdl Not setting this will disable +# storage policy based placement of instances. (string value) +#pbm_wsdl_location= + +# The PBM default policy. If pbm_wsdl_location is set and there is no defined +# storage policy for the specific request then this policy will be used. +# (string value) +#pbm_default_policy= + +# Hostname or IP address for connection to VMware vCenter host. (string value) +#host_ip= + +# Port for connection to VMware vCenter host. (integer value) +# Minimum value: 1 +# Maximum value: 65535 +#host_port=443 + +# Username for connection to VMware vCenter host. (string value) +#host_username= + +# Password for connection to VMware vCenter host. (string value) +#host_password= + +# Specify a CA bundle file to use in verifying the vCenter server certificate. +# (string value) +#ca_file= + +# If true, the vCenter server certificate is not verified. If false, then the +# default CA truststore is used for verification. This option is ignored if +# "ca_file" is set. (boolean value) +#insecure=false + +# Name of a VMware Cluster ComputeResource. (string value) +#cluster_name= + +# Regex to match the name of a datastore. (string value) +#datastore_regex= + +# The interval used for polling of remote tasks. (floating point value) +#task_poll_interval=0.5 + +# The number of times we retry on failures, e.g., socket error, etc. (integer +# value) +#api_retry_count=10 + +# VNC starting port (integer value) +# Minimum value: 1 +# Maximum value: 65535 +#vnc_port=5900 + +# Total number of VNC ports (integer value) +#vnc_port_total=10000 + +# Whether to use linked clone (boolean value) +#use_linked_clone=true + +# Optional VIM Service WSDL Location e.g http:///vimService.wsdl. +# Optional over-ride to default location for bug work-arounds (string value) +#wsdl_location= + +# Physical ethernet adapter name for vlan networking (string value) +#vlan_interface=vmnic0 + +# Name of Integration Bridge (string value) +#integration_bridge=br-int + +# Set this value if affected by an increased network latency causing repeated +# characters when typing in a remote console. (integer value) +#console_delay_seconds= + +# Identifies the remote system that serial port traffic will be sent to. If +# this is not set, no serial ports will be added to the created VMs. (string +# value) +#serial_port_service_uri= + +# Identifies a proxy service that provides network access to the +# serial_port_service_uri. This option is ignored if serial_port_service_uri is +# not specified. (string value) +#serial_port_proxy_uri= + +# The prefix for where cached images are stored. This is NOT the full path - +# just a folder prefix. This should only be used when a datastore cache should +# be shared between compute nodes. Note: this should only be used when the +# compute nodes have a shared file system. (string value) +#cache_prefix= + + +[vnc] + +# +# From nova +# + +# Location of VNC console proxy, in the form +# "http://127.0.0.1:6080/vnc_auto.html" (string value) +# Deprecated group;name - DEFAULT;novncproxy_base_url +#novncproxy_base_url=http://127.0.0.1:6080/vnc_auto.html + +# Location of nova xvp VNC console proxy, in the form +# "http://127.0.0.1:6081/console" (string value) +# Deprecated group;name - DEFAULT;xvpvncproxy_base_url +#xvpvncproxy_base_url=http://127.0.0.1:6081/console + +# IP address on which instance vncservers should listen (string value) +# Deprecated group;name - DEFAULT;vncserver_listen +#vncserver_listen=127.0.0.1 + +# The address to which proxy clients (like nova-xvpvncproxy) should connect +# (string value) +# Deprecated group;name - DEFAULT;vncserver_proxyclient_address +#vncserver_proxyclient_address=127.0.0.1 + +# Enable VNC related features (boolean value) +# Deprecated group;name - DEFAULT;vnc_enabled +#enabled=true + +# Keymap for VNC (string value) +# Deprecated group;name - DEFAULT;vnc_keymap +#keymap=en-us + + +[workarounds] + +# +# From nova +# + +# This option allows a fallback to sudo for performance reasons. For example +# see https://bugs.launchpad.net/nova/+bug/1415106 (boolean value) +#disable_rootwrap=false + +# When using libvirt 1.2.2 live snapshots fail intermittently under load. This +# config option provides a mechanism to enable live snapshot while this is +# resolved. See https://bugs.launchpad.net/nova/+bug/1334398 (boolean value) +#disable_libvirt_livesnapshot=true + +# DEPRECATED: Whether to destroy instances on startup when we suspect they have +# previously been evacuated. This can result in data loss if undesired. See +# https://launchpad.net/bugs/1419785 (boolean value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#destroy_after_evacuate=true + +# Whether or not to handle events raised from the compute driver's 'emit_event' +# method. These are lifecycle events raised from compute drivers that implement +# the method. An example of a lifecycle event is an instance starting or +# stopping. If the instance is going through task state changes due to an API +# operation, like resize, the events are ignored. However, this is an advanced +# feature which allows the hypervisor to signal to the compute service that an +# unexpected state change has occurred in an instance and the instance can be +# shutdown automatically - which can inherently race in reboot operations or +# when the compute service or host is rebooted, either planned or due to an +# unexpected outage. Care should be taken when using this and +# sync_power_state_interval is negative since then if any instances are out of +# sync between the hypervisor and the Nova database they will have to be +# synchronized manually. See https://bugs.launchpad.net/bugs/1444630 (boolean +# value) +#handle_virt_lifecycle_events=true + + +[xenserver] + +# +# From nova.virt +# + +# Name of Integration Bridge used by Open vSwitch (string value) +#ovs_integration_bridge=xapi1 + +# Number of seconds to wait for agent reply (integer value) +#agent_timeout=30 + +# Number of seconds to wait for agent to be fully operational (integer value) +#agent_version_timeout=300 + +# Number of seconds to wait for agent reply to resetnetwork request (integer +# value) +#agent_resetnetwork_timeout=60 + +# Specifies the path in which the XenAPI guest agent should be located. If the +# agent is present, network configuration is not injected into the image. Used +# if compute_driver=xenapi.XenAPIDriver and flat_injected=True (string value) +#agent_path=usr/sbin/xe-update-networking + +# Disables the use of the XenAPI agent in any image regardless of what image +# properties are present. (boolean value) +#disable_agent=false + +# Determines if the XenAPI agent should be used when the image used does not +# contain a hint to declare if the agent is present or not. The hint is a +# glance property "xenapi_use_agent" that has the value "True" or "False". Note +# that waiting for the agent when it is not present will significantly increase +# server boot times. (boolean value) +#use_agent_default=false + +# Timeout in seconds for XenAPI login. (integer value) +#login_timeout=10 + +# Maximum number of concurrent XenAPI connections. Used only if +# compute_driver=xenapi.XenAPIDriver (integer value) +#connection_concurrent=5 + +# URL for connection to XenServer/Xen Cloud Platform. A special value of +# unix://local can be used to connect to the local unix socket. Required if +# compute_driver=xenapi.XenAPIDriver (string value) +#connection_url= + +# Username for connection to XenServer/Xen Cloud Platform. Used only if +# compute_driver=xenapi.XenAPIDriver (string value) +#connection_username=root + +# Password for connection to XenServer/Xen Cloud Platform. Used only if +# compute_driver=xenapi.XenAPIDriver (string value) +#connection_password= + +# The interval used for polling of coalescing vhds. Used only if +# compute_driver=xenapi.XenAPIDriver (floating point value) +#vhd_coalesce_poll_interval=5.0 + +# Ensure compute service is running on host XenAPI connects to. (boolean value) +#check_host=true + +# Max number of times to poll for VHD to coalesce. Used only if +# compute_driver=xenapi.XenAPIDriver (integer value) +#vhd_coalesce_max_attempts=20 + +# Base path to the storage repository (string value) +#sr_base_path=/var/run/sr-mount + +# The iSCSI Target Host (string value) +#target_host= + +# The iSCSI Target Port, default is port 3260 (string value) +#target_port=3260 + +# IQN Prefix (string value) +#iqn_prefix=iqn.2010-10.org.openstack + +# Used to enable the remapping of VBD dev (Works around an issue in Ubuntu +# Maverick) (boolean value) +#remap_vbd_dev=false + +# Specify prefix to remap VBD dev to (ex. /dev/xvdb -> /dev/sdb) (string value) +#remap_vbd_dev_prefix=sd + +# Base URL for torrent files; must contain a slash character (see RFC 1808, +# step 6) (string value) +#torrent_base_url= + +# Probability that peer will become a seeder. (1.0 = 100%) (floating point +# value) +#torrent_seed_chance=1.0 + +# Number of seconds after downloading an image via BitTorrent that it should be +# seeded for other peers. (integer value) +#torrent_seed_duration=3600 + +# Cached torrent files not accessed within this number of seconds can be reaped +# (integer value) +#torrent_max_last_accessed=86400 + +# Beginning of port range to listen on (integer value) +# Minimum value: 1 +# Maximum value: 65535 +#torrent_listen_port_start=6881 + +# End of port range to listen on (integer value) +# Minimum value: 1 +# Maximum value: 65535 +#torrent_listen_port_end=6891 + +# Number of seconds a download can remain at the same progress percentage w/o +# being considered a stall (integer value) +#torrent_download_stall_cutoff=600 + +# Maximum number of seeder processes to run concurrently within a given dom0. +# (-1 = no limit) (integer value) +#torrent_max_seeder_processes_per_host=1 + +# To use for hosts with different CPUs (boolean value) +#use_join_force=true + +# Cache glance images locally. `all` will cache all images, `some` will only +# cache images that have the image_property `cache_in_nova=True`, and `none` +# turns off caching entirely (string value) +# Allowed values: all, some, none +#cache_images=all + +# Compression level for images, e.g., 9 for gzip -9. Range is 1-9, 9 being most +# compressed but most CPU intensive on dom0. (integer value) +# Minimum value: 1 +# Maximum value: 9 +#image_compression_level= + +# Default OS type (string value) +#default_os_type=linux + +# Time to wait for a block device to be created (integer value) +#block_device_creation_timeout=10 + +# Maximum size in bytes of kernel or ramdisk images (integer value) +#max_kernel_ramdisk_size=16777216 + +# Filter for finding the SR to be used to install guest instances on. To use +# the Local Storage in default XenServer/XCP installations set this flag to +# other-config:i18n-key=local-storage. To select an SR with a different +# matching criteria, you could set it to other-config:my_favorite_sr=true. On +# the other hand, to fall back on the Default SR, as displayed by XenCenter, +# set this flag to: default-sr:true (string value) +#sr_matching_filter=default-sr:true + +# Whether to use sparse_copy for copying data on a resize down (False will use +# standard dd). This speeds up resizes down considerably since large runs of +# zeros won't have to be rsynced (boolean value) +#sparse_copy=true + +# Maximum number of retries to unplug VBD. if <=0, should try once and no retry +# (integer value) +#num_vbd_unplug_retries=10 + +# Whether or not to download images via Bit Torrent. (string value) +# Allowed values: all, some, none +#torrent_images=none + +# Name of network to use for booting iPXE ISOs (string value) +#ipxe_network_name= + +# URL to the iPXE boot menu (string value) +#ipxe_boot_menu_url= + +# Name and optionally path of the tool used for ISO image creation (string +# value) +#ipxe_mkisofs_cmd=mkisofs + +# Number of seconds to wait for instance to go to running state (integer value) +#running_timeout=60 + +# The XenAPI VIF driver using XenServer Network APIs. (string value) +#vif_driver=nova.virt.xenapi.vif.XenAPIBridgeDriver + +# Dom0 plugin driver used to handle image uploads. (string value) +#image_upload_handler=nova.virt.xenapi.image.glance.GlanceStore + +# Number of seconds to wait for an SR to settle if the VDI does not exist when +# first introduced (integer value) +#introduce_vdi_retry_wait=20 + + +[zookeeper] + +# +# From nova +# + +# The ZooKeeper addresses for servicegroup service in the format of +# host1:port,host2:port,host3:port (string value) +#address= + +# The recv_timeout parameter for the zk session (integer value) +#recv_timeout=4000 + +# The prefix used in ZooKeeper to store ephemeral nodes (string value) +#sg_prefix=/servicegroups + +# Number of seconds to wait until retrying to join the session (integer value) +#sg_retry_interval=5 + +[osapi_v3] +enabled=False diff --git a/qa/qa_scripts/openstack/fix_conf_file.sh b/qa/qa_scripts/openstack/fix_conf_file.sh new file mode 100755 index 00000000..8ccd2724 --- /dev/null +++ b/qa/qa_scripts/openstack/fix_conf_file.sh @@ -0,0 +1,28 @@ +source ./copy_func.sh +# +# Take a templated file, modify a local copy, and write it to the +# remote site. +# +# Usage: fix_conf_file [] +# -- site where we want this modified file stored. +# -- name of the remote file. +# -- directory where the file will be stored +# -- (optional) rbd_secret used by libvirt +# +function fix_conf_file() { + if [[ $# < 3 ]]; then + echo 'fix_conf_file: Too few parameters' + exit 1 + fi + openstack_node_local=${1} + cp files/${2}.template.conf ${2}.conf + hostname=`ssh $openstack_node_local hostname` + inet4addr=`ssh $openstack_node_local hostname -i` + sed -i s/VARHOSTNAME/$hostname/g ${2}.conf + sed -i s/VARINET4ADDR/$inet4addr/g ${2}.conf + if [[ $# == 4 ]]; then + sed -i s/RBDSECRET/${4}/g ${2}.conf + fi + copy_file ${2}.conf $openstack_node_local ${3} 0644 "root:root" + rm ${2}.conf +} diff --git a/qa/qa_scripts/openstack/image_create.sh b/qa/qa_scripts/openstack/image_create.sh new file mode 100755 index 00000000..ee7f61f3 --- /dev/null +++ b/qa/qa_scripts/openstack/image_create.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash +# +# Set up a vm on packstack. Use the iso in RHEL_ISO (defaults to home dir) +# +set -fv +source ./copy_func.sh +source ./fix_conf_file.sh +openstack_node=${1} +ceph_node=${2} + +RHEL_ISO=${RHEL_ISO:-~/rhel-server-7.2-x86_64-boot.iso} +copy_file ${RHEL_ISO} $openstack_node . +copy_file execs/run_openstack.sh $openstack_node . 0755 +filler=`date +%s` +ssh $openstack_node ./run_openstack.sh "${openstack_node}X${filler}" rhel-server-7.2-x86_64-boot.iso +ssh $ceph_node sudo ceph df diff --git a/qa/qa_scripts/openstack/openstack.sh b/qa/qa_scripts/openstack/openstack.sh new file mode 100755 index 00000000..1c1e6c00 --- /dev/null +++ b/qa/qa_scripts/openstack/openstack.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash +# +# Install Openstack. +# Usage: openstack +# +# This script installs Openstack on one node, and connects it to a ceph +# cluster on another set of nodes. It is intended to run from a third +# node. +# +# Assumes a single node Openstack cluster and a single monitor ceph +# cluster. +# +# The execs directory contains scripts to be run on remote sites. +# The files directory contains files to be copied to remote sites. +# + +set -fv +source ./copy_func.sh +source ./fix_conf_file.sh +openstack_node=${1} +ceph_node=${2} +./packstack.sh $openstack_node $ceph_node +echo 'done running packstack' +sleep 60 +./connectceph.sh $openstack_node $ceph_node +echo 'done connecting' +sleep 60 +./image_create.sh $openstack_node $ceph_node diff --git a/qa/qa_scripts/openstack/packstack.sh b/qa/qa_scripts/openstack/packstack.sh new file mode 100755 index 00000000..3f891f98 --- /dev/null +++ b/qa/qa_scripts/openstack/packstack.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash +# +# Install openstack by running packstack. +# +# Implements the operations in: +# https://docs.google.com/document/d/1us18KR3LuLyINgGk2rmI-SVj9UksCE7y4C2D_68Aa8o/edit?ts=56a78fcb +# +# The directory named files contains a template for the kilo.conf file used by packstack. +# +set -fv +source ./copy_func.sh +source ./fix_conf_file.sh +openstack_node=${1} +ceph_node=${2} + +copy_file execs/openstack-preinstall.sh $openstack_node . 0777 +fix_conf_file $openstack_node kilo . +ssh $openstack_node sudo ./openstack-preinstall.sh +sleep 240 +ssh $openstack_node sudo packstack --answer-file kilo.conf diff --git a/qa/rbd/common.sh b/qa/rbd/common.sh new file mode 100644 index 00000000..232cf45a --- /dev/null +++ b/qa/rbd/common.sh @@ -0,0 +1,103 @@ +#!/usr/bin/env bash + +die() { + echo "$*" + exit 1 +} + +cleanup() { + rm -rf $TDIR + TDIR="" +} + +set_variables() { + # defaults + [ -z "$bindir" ] && bindir=$PWD # location of init-ceph + if [ -z "$conf" ]; then + conf="$basedir/ceph.conf" + [ -e $conf ] || conf="/etc/ceph/ceph.conf" + fi + [ -e $conf ] || die "conf file not found" + + CCONF="ceph-conf -c $conf" + + [ -z "$mnt" ] && mnt="/c" + if [ -z "$monhost" ]; then + $CCONF -t mon -i 0 'mon addr' > $TDIR/cconf_mon + if [ $? -ne 0 ]; then + $CCONF -t mon.a -i 0 'mon addr' > $TDIR/cconf_mon + [ $? -ne 0 ] && die "can't figure out \$monhost" + fi + read monhost < $TDIR/cconf_mon + fi + + [ -z "$imgsize" ] && imgsize=1024 + [ -z "$user" ] && user=admin + [ -z "$keyring" ] && keyring="`$CCONF keyring`" + [ -z "$secret" ] && secret="`ceph-authtool $keyring -n client.$user -p`" + + monip="`echo $monhost | sed 's/:/ /g' | awk '{print $1}'`" + monport="`echo $monhost | sed 's/:/ /g' | awk '{print $2}'`" + + [ -z "$monip" ] && die "bad mon address" + + [ -z "$monport" ] && monport=6789 + + set -e + + mydir=`hostname`_`echo $0 | sed 's/\//_/g'` + + img_name=test.`hostname`.$$ +} + +rbd_load() { + modprobe rbd +} + +rbd_create_image() { + id=$1 + rbd create $img_name.$id --size=$imgsize +} + +rbd_add() { + id=$1 + echo "$monip:$monport name=$user,secret=$secret rbd $img_name.$id" \ + > /sys/bus/rbd/add + + pushd /sys/bus/rbd/devices &> /dev/null + [ $? -eq 0 ] || die "failed to cd" + devid="" + rm -f "$TDIR/rbd_devs" + for f in *; do echo $f >> "$TDIR/rbd_devs"; done + sort -nr "$TDIR/rbd_devs" > "$TDIR/rev_rbd_devs" + while read f < "$TDIR/rev_rbd_devs"; do + read d_img_name < "$f/name" + if [ "x$d_img_name" == "x$img_name.$id" ]; then + devid=$f + break + fi + done + popd &> /dev/null + + [ "x$devid" == "x" ] && die "failed to find $img_name.$id" + + export rbd$id=$devid + while [ ! -e /dev/rbd$devid ]; do sleep 1; done +} + +rbd_test_init() { + rbd_load +} + +rbd_remove() { + echo $1 > /sys/bus/rbd/remove +} + +rbd_rm_image() { + id=$1 + rbd rm $imgname.$id +} + +TDIR=`mktemp -d` +trap cleanup INT TERM EXIT +set_variables diff --git a/qa/rbd/krbd_blkroset.t b/qa/rbd/krbd_blkroset.t new file mode 100644 index 00000000..bbbd26aa --- /dev/null +++ b/qa/rbd/krbd_blkroset.t @@ -0,0 +1,364 @@ + +Setup +===== + + $ RO_KEY=$(ceph auth get-or-create-key client.ro mon 'profile rbd' mgr 'profile rbd' osd 'profile rbd-read-only') + $ rbd create --size 10 img + $ rbd snap create img@snap + $ rbd snap protect img@snap + $ rbd clone img@snap cloneimg + $ rbd create --size 1 imgpart + $ DEV=$(sudo rbd map imgpart) + $ cat </dev/null 2>&1 + > unit: sectors + > /dev/rbd0p1 : start= 512, size= 512, Id=83 + > /dev/rbd0p2 : start= 1024, size= 512, Id=83 + > EOF + $ sudo rbd unmap $DEV + $ rbd snap create imgpart@snap + + +Image HEAD +========== + +R/W, unpartitioned: + + $ DEV=$(sudo rbd map img) + $ blockdev --getro $DEV + 0 + $ dd if=/dev/urandom of=$DEV bs=1k seek=1 count=1 status=none + $ blkdiscard $DEV + $ blockdev --setro $DEV + .*BLKROSET: Permission denied (re) + [1] + $ sudo blockdev --setro $DEV + $ blockdev --getro $DEV + 1 + $ dd if=/dev/urandom of=$DEV bs=1k seek=1 count=1 status=none + dd: error writing '/dev/rbd?': Operation not permitted (glob) + [1] + $ blkdiscard $DEV + blkdiscard: /dev/rbd?: BLKDISCARD ioctl failed: Operation not permitted (glob) + [1] + $ blockdev --setrw $DEV + .*BLKROSET: Permission denied (re) + [1] + $ sudo blockdev --setrw $DEV + $ blockdev --getro $DEV + 0 + $ dd if=/dev/urandom of=$DEV bs=1k seek=1 count=1 status=none + $ blkdiscard $DEV + $ sudo rbd unmap $DEV + +R/W, partitioned: + + $ DEV=$(sudo rbd map imgpart) + $ udevadm settle + $ blockdev --getro ${DEV}p1 + 0 + $ blockdev --getro ${DEV}p2 + 0 + $ dd if=/dev/urandom of=${DEV}p1 bs=1k seek=1 count=1 status=none + $ blkdiscard ${DEV}p1 + $ dd if=/dev/urandom of=${DEV}p2 bs=1k seek=1 count=1 status=none + $ blkdiscard ${DEV}p2 + $ blockdev --setro ${DEV}p1 + .*BLKROSET: Permission denied (re) + [1] + $ sudo blockdev --setro ${DEV}p1 + $ blockdev --getro ${DEV}p1 + 1 + $ blockdev --getro ${DEV}p2 + 0 + $ dd if=/dev/urandom of=${DEV}p1 bs=1k seek=1 count=1 status=none + dd: error writing '/dev/rbd?p1': Operation not permitted (glob) + [1] + $ blkdiscard ${DEV}p1 + blkdiscard: /dev/rbd?p1: BLKDISCARD ioctl failed: Operation not permitted (glob) + [1] + $ dd if=/dev/urandom of=${DEV}p2 bs=1k seek=1 count=1 status=none + $ blkdiscard ${DEV}p2 + $ blockdev --setrw ${DEV}p1 + .*BLKROSET: Permission denied (re) + [1] + $ sudo blockdev --setrw ${DEV}p1 + $ blockdev --getro ${DEV}p1 + 0 + $ blockdev --getro ${DEV}p2 + 0 + $ dd if=/dev/urandom of=${DEV}p1 bs=1k seek=1 count=1 status=none + $ blkdiscard ${DEV}p1 + $ dd if=/dev/urandom of=${DEV}p2 bs=1k seek=1 count=1 status=none + $ blkdiscard ${DEV}p2 + $ sudo rbd unmap $DEV + + $ DEV=$(sudo rbd map imgpart) + $ udevadm settle + $ blockdev --getro ${DEV}p1 + 0 + $ blockdev --getro ${DEV}p2 + 0 + $ dd if=/dev/urandom of=${DEV}p1 bs=1k seek=1 count=1 status=none + $ blkdiscard ${DEV}p1 + $ dd if=/dev/urandom of=${DEV}p2 bs=1k seek=1 count=1 status=none + $ blkdiscard ${DEV}p2 + $ blockdev --setro ${DEV}p2 + .*BLKROSET: Permission denied (re) + [1] + $ sudo blockdev --setro ${DEV}p2 + $ blockdev --getro ${DEV}p1 + 0 + $ blockdev --getro ${DEV}p2 + 1 + $ dd if=/dev/urandom of=${DEV}p1 bs=1k seek=1 count=1 status=none + $ blkdiscard ${DEV}p1 + $ dd if=/dev/urandom of=${DEV}p2 bs=1k seek=1 count=1 status=none + dd: error writing '/dev/rbd?p2': Operation not permitted (glob) + [1] + $ blkdiscard ${DEV}p2 + blkdiscard: /dev/rbd?p2: BLKDISCARD ioctl failed: Operation not permitted (glob) + [1] + $ blockdev --setrw ${DEV}p2 + .*BLKROSET: Permission denied (re) + [1] + $ sudo blockdev --setrw ${DEV}p2 + $ blockdev --getro ${DEV}p1 + 0 + $ blockdev --getro ${DEV}p2 + 0 + $ dd if=/dev/urandom of=${DEV}p1 bs=1k seek=1 count=1 status=none + $ blkdiscard ${DEV}p1 + $ dd if=/dev/urandom of=${DEV}p2 bs=1k seek=1 count=1 status=none + $ blkdiscard ${DEV}p2 + $ sudo rbd unmap $DEV + +R/O, unpartitioned: + + $ DEV=$(sudo rbd map --read-only img) + $ blockdev --getro $DEV + 1 + $ dd if=/dev/urandom of=$DEV bs=1k seek=1 count=1 status=none + dd: error writing '/dev/rbd?': Operation not permitted (glob) + [1] + $ blkdiscard $DEV + blkdiscard: /dev/rbd?: BLKDISCARD ioctl failed: Operation not permitted (glob) + [1] + $ blockdev --setrw $DEV + .*BLKROSET: Permission denied (re) + [1] + $ sudo blockdev --setrw $DEV + .*BLKROSET: Read-only file system (re) + [1] + $ blockdev --getro $DEV + 1 + $ dd if=/dev/urandom of=$DEV bs=1k seek=1 count=1 status=none + dd: error writing '/dev/rbd?': Operation not permitted (glob) + [1] + $ blkdiscard $DEV + blkdiscard: /dev/rbd?: BLKDISCARD ioctl failed: Operation not permitted (glob) + [1] + $ sudo rbd unmap $DEV + +R/O, partitioned: + + $ DEV=$(sudo rbd map --read-only imgpart) + $ udevadm settle + $ blockdev --getro ${DEV}p1 + 1 + $ blockdev --getro ${DEV}p2 + 1 + $ dd if=/dev/urandom of=${DEV}p1 bs=1k seek=1 count=1 status=none + dd: error writing '/dev/rbd?p1': Operation not permitted (glob) + [1] + $ blkdiscard ${DEV}p1 + blkdiscard: /dev/rbd?p1: BLKDISCARD ioctl failed: Operation not permitted (glob) + [1] + $ dd if=/dev/urandom of=${DEV}p2 bs=1k seek=1 count=1 status=none + dd: error writing '/dev/rbd?p2': Operation not permitted (glob) + [1] + $ blkdiscard ${DEV}p2 + blkdiscard: /dev/rbd?p2: BLKDISCARD ioctl failed: Operation not permitted (glob) + [1] + $ blockdev --setrw ${DEV}p1 + .*BLKROSET: Permission denied (re) + [1] + $ sudo blockdev --setrw ${DEV}p1 + .*BLKROSET: Read-only file system (re) + [1] + $ blockdev --setrw ${DEV}p2 + .*BLKROSET: Permission denied (re) + [1] + $ sudo blockdev --setrw ${DEV}p2 + .*BLKROSET: Read-only file system (re) + [1] + $ blockdev --getro ${DEV}p1 + 1 + $ blockdev --getro ${DEV}p2 + 1 + $ dd if=/dev/urandom of=${DEV}p1 bs=1k seek=1 count=1 status=none + dd: error writing '/dev/rbd?p1': Operation not permitted (glob) + [1] + $ blkdiscard ${DEV}p1 + blkdiscard: /dev/rbd?p1: BLKDISCARD ioctl failed: Operation not permitted (glob) + [1] + $ dd if=/dev/urandom of=${DEV}p2 bs=1k seek=1 count=1 status=none + dd: error writing '/dev/rbd?p2': Operation not permitted (glob) + [1] + $ blkdiscard ${DEV}p2 + blkdiscard: /dev/rbd?p2: BLKDISCARD ioctl failed: Operation not permitted (glob) + [1] + $ sudo rbd unmap $DEV + + +Image snapshot +============== + +Unpartitioned: + + $ DEV=$(sudo rbd map img@snap) + $ blockdev --getro $DEV + 1 + $ dd if=/dev/urandom of=$DEV bs=1k seek=1 count=1 status=none + dd: error writing '/dev/rbd?': Operation not permitted (glob) + [1] + $ blkdiscard $DEV + blkdiscard: /dev/rbd?: BLKDISCARD ioctl failed: Operation not permitted (glob) + [1] + $ blockdev --setrw $DEV + .*BLKROSET: Permission denied (re) + [1] + $ sudo blockdev --setrw $DEV + .*BLKROSET: Read-only file system (re) + [1] + $ blockdev --getro $DEV + 1 + $ dd if=/dev/urandom of=$DEV bs=1k seek=1 count=1 status=none + dd: error writing '/dev/rbd?': Operation not permitted (glob) + [1] + $ blkdiscard $DEV + blkdiscard: /dev/rbd?: BLKDISCARD ioctl failed: Operation not permitted (glob) + [1] + $ sudo rbd unmap $DEV + +Partitioned: + + $ DEV=$(sudo rbd map imgpart@snap) + $ udevadm settle + $ blockdev --getro ${DEV}p1 + 1 + $ blockdev --getro ${DEV}p2 + 1 + $ dd if=/dev/urandom of=${DEV}p1 bs=1k seek=1 count=1 status=none + dd: error writing '/dev/rbd?p1': Operation not permitted (glob) + [1] + $ blkdiscard ${DEV}p1 + blkdiscard: /dev/rbd?p1: BLKDISCARD ioctl failed: Operation not permitted (glob) + [1] + $ dd if=/dev/urandom of=${DEV}p2 bs=1k seek=1 count=1 status=none + dd: error writing '/dev/rbd?p2': Operation not permitted (glob) + [1] + $ blkdiscard ${DEV}p2 + blkdiscard: /dev/rbd?p2: BLKDISCARD ioctl failed: Operation not permitted (glob) + [1] + $ blockdev --setrw ${DEV}p1 + .*BLKROSET: Permission denied (re) + [1] + $ sudo blockdev --setrw ${DEV}p1 + .*BLKROSET: Read-only file system (re) + [1] + $ blockdev --setrw ${DEV}p2 + .*BLKROSET: Permission denied (re) + [1] + $ sudo blockdev --setrw ${DEV}p2 + .*BLKROSET: Read-only file system (re) + [1] + $ blockdev --getro ${DEV}p1 + 1 + $ blockdev --getro ${DEV}p2 + 1 + $ dd if=/dev/urandom of=${DEV}p1 bs=1k seek=1 count=1 status=none + dd: error writing '/dev/rbd?p1': Operation not permitted (glob) + [1] + $ blkdiscard ${DEV}p1 + blkdiscard: /dev/rbd?p1: BLKDISCARD ioctl failed: Operation not permitted (glob) + [1] + $ dd if=/dev/urandom of=${DEV}p2 bs=1k seek=1 count=1 status=none + dd: error writing '/dev/rbd?p2': Operation not permitted (glob) + [1] + $ blkdiscard ${DEV}p2 + blkdiscard: /dev/rbd?p2: BLKDISCARD ioctl failed: Operation not permitted (glob) + [1] + $ sudo rbd unmap $DEV + + +read-only OSD caps +================== + +R/W: + + $ DEV=$(sudo rbd map --id ro --key $(echo $RO_KEY) img) + rbd: sysfs write failed + rbd: map failed: (1) Operation not permitted + [1] + +R/O: + + $ DEV=$(sudo rbd map --id ro --key $(echo $RO_KEY) --read-only img) + $ blockdev --getro $DEV + 1 + $ sudo rbd unmap $DEV + +Snapshot: + + $ DEV=$(sudo rbd map --id ro --key $(echo $RO_KEY) img@snap) + $ blockdev --getro $DEV + 1 + $ sudo rbd unmap $DEV + +R/W, clone: + + $ DEV=$(sudo rbd map --id ro --key $(echo $RO_KEY) cloneimg) + rbd: sysfs write failed + rbd: map failed: (1) Operation not permitted + [1] + +R/O, clone: + + $ DEV=$(sudo rbd map --id ro --key $(echo $RO_KEY) --read-only cloneimg) + $ blockdev --getro $DEV + 1 + $ sudo rbd unmap $DEV + + +rw -> ro with open_count > 0 +============================ + + $ DEV=$(sudo rbd map img) + $ { sleep 10; sudo blockdev --setro $DEV; } & + $ dd if=/dev/urandom of=$DEV bs=1k oflag=direct status=noxfer + dd: error writing '/dev/rbd?': Operation not permitted (glob) + [1-9]\d*\+0 records in (re) + [1-9]\d*\+0 records out (re) + [1] + $ sudo rbd unmap $DEV + + +"-o rw --read-only" should result in read-only mapping +====================================================== + + $ DEV=$(sudo rbd map -o rw --read-only img) + $ blockdev --getro $DEV + 1 + $ sudo rbd unmap $DEV + + +Teardown +======== + + $ rbd snap purge imgpart >/dev/null 2>&1 + $ rbd rm imgpart >/dev/null 2>&1 + $ rbd rm cloneimg >/dev/null 2>&1 + $ rbd snap unprotect img@snap + $ rbd snap purge img >/dev/null 2>&1 + $ rbd rm img >/dev/null 2>&1 + diff --git a/qa/rbd/krbd_deep_flatten.t b/qa/rbd/krbd_deep_flatten.t new file mode 100644 index 00000000..7235f003 --- /dev/null +++ b/qa/rbd/krbd_deep_flatten.t @@ -0,0 +1,329 @@ + +Write: + + $ rbd create --size 12M --image-feature layering,deep-flatten img + $ DEV=$(sudo rbd map img) + $ xfs_io -c 'pwrite -w 0 12M' $DEV >/dev/null + $ sudo rbd unmap $DEV + $ rbd snap create img@snap + $ rbd snap protect img@snap + $ rbd clone img@snap cloneimg + $ rbd snap create cloneimg@snap + $ DEV=$(sudo rbd map cloneimg) + $ xfs_io -c 'pwrite -S 0xab -w 6M 1k' $DEV >/dev/null + $ sudo rbd unmap $DEV + + $ DEV=$(sudo rbd map cloneimg) + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0600000 abab abab abab abab abab abab abab abab + * + 0600400 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0c00000 + $ sudo rbd unmap $DEV + $ DEV=$(sudo rbd map cloneimg@snap) + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0c00000 + $ sudo rbd unmap $DEV + + $ rbd flatten --no-progress cloneimg + $ rbd snap unprotect img@snap + $ rbd snap rm --no-progress img@snap + $ rbd rm --no-progress img + + $ DEV=$(sudo rbd map cloneimg) + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0600000 abab abab abab abab abab abab abab abab + * + 0600400 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0c00000 + $ sudo rbd unmap $DEV + $ DEV=$(sudo rbd map cloneimg@snap) + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0c00000 + $ sudo rbd unmap $DEV + + $ rbd snap rm --no-progress cloneimg@snap + $ rbd rm --no-progress cloneimg + +Write, whole object: + + $ rbd create --size 12M --image-feature layering,deep-flatten img + $ DEV=$(sudo rbd map img) + $ xfs_io -c 'pwrite -w 0 12M' $DEV >/dev/null + $ sudo rbd unmap $DEV + $ rbd snap create img@snap + $ rbd snap protect img@snap + $ rbd clone img@snap cloneimg + $ rbd snap create cloneimg@snap + $ DEV=$(sudo rbd map cloneimg) + $ xfs_io -d -c 'pwrite -b 4M -S 0xab 4M 4M' $DEV >/dev/null + $ sudo rbd unmap $DEV + + $ DEV=$(sudo rbd map cloneimg) + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 abab abab abab abab abab abab abab abab + * + 0800000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0c00000 + $ sudo rbd unmap $DEV + $ DEV=$(sudo rbd map cloneimg@snap) + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0c00000 + $ sudo rbd unmap $DEV + + $ rbd flatten --no-progress cloneimg + $ rbd snap unprotect img@snap + $ rbd snap rm --no-progress img@snap + $ rbd rm --no-progress img + + $ DEV=$(sudo rbd map cloneimg) + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 abab abab abab abab abab abab abab abab + * + 0800000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0c00000 + $ sudo rbd unmap $DEV + $ DEV=$(sudo rbd map cloneimg@snap) + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0c00000 + $ sudo rbd unmap $DEV + + $ rbd snap rm --no-progress cloneimg@snap + $ rbd rm --no-progress cloneimg + +Zeroout: + + $ rbd create --size 12M --image-feature layering,deep-flatten img + $ DEV=$(sudo rbd map img) + $ xfs_io -c 'pwrite -w 0 12M' $DEV >/dev/null + $ sudo rbd unmap $DEV + $ rbd snap create img@snap + $ rbd snap protect img@snap + $ rbd clone img@snap cloneimg + $ rbd snap create cloneimg@snap + $ DEV=$(sudo rbd map cloneimg) + $ fallocate -z -o 6M -l 1k $DEV + $ sudo rbd unmap $DEV + + $ DEV=$(sudo rbd map cloneimg) + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0600000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0600400 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0c00000 + $ sudo rbd unmap $DEV + $ DEV=$(sudo rbd map cloneimg@snap) + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0c00000 + $ sudo rbd unmap $DEV + + $ rbd flatten --no-progress cloneimg + $ rbd snap unprotect img@snap + $ rbd snap rm --no-progress img@snap + $ rbd rm --no-progress img + + $ DEV=$(sudo rbd map cloneimg) + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0600000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0600400 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0c00000 + $ sudo rbd unmap $DEV + $ DEV=$(sudo rbd map cloneimg@snap) + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0c00000 + $ sudo rbd unmap $DEV + + $ rbd snap rm --no-progress cloneimg@snap + $ rbd rm --no-progress cloneimg + +Zeroout, whole object: + + $ rbd create --size 12M --image-feature layering,deep-flatten img + $ DEV=$(sudo rbd map img) + $ xfs_io -c 'pwrite -w 0 12M' $DEV >/dev/null + $ sudo rbd unmap $DEV + $ rbd snap create img@snap + $ rbd snap protect img@snap + $ rbd clone img@snap cloneimg + $ rbd snap create cloneimg@snap + $ DEV=$(sudo rbd map cloneimg) + $ fallocate -z -o 4M -l 4M $DEV + $ sudo rbd unmap $DEV + + $ DEV=$(sudo rbd map cloneimg) + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0800000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0c00000 + $ sudo rbd unmap $DEV + $ DEV=$(sudo rbd map cloneimg@snap) + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0c00000 + $ sudo rbd unmap $DEV + + $ rbd flatten --no-progress cloneimg + $ rbd snap unprotect img@snap + $ rbd snap rm --no-progress img@snap + $ rbd rm --no-progress img + + $ DEV=$(sudo rbd map cloneimg) + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0800000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0c00000 + $ sudo rbd unmap $DEV + $ DEV=$(sudo rbd map cloneimg@snap) + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0c00000 + $ sudo rbd unmap $DEV + + $ rbd snap rm --no-progress cloneimg@snap + $ rbd rm --no-progress cloneimg + +Discard, whole object, empty clone: + + $ rbd create --size 12M --image-feature layering,deep-flatten img + $ DEV=$(sudo rbd map img) + $ xfs_io -c 'pwrite -w 0 12M' $DEV >/dev/null + $ sudo rbd unmap $DEV + $ rbd snap create img@snap + $ rbd snap protect img@snap + $ rbd clone img@snap cloneimg + $ rbd snap create cloneimg@snap + $ DEV=$(sudo rbd map cloneimg) + $ blkdiscard -o 4M -l 4M $DEV + $ sudo rbd unmap $DEV + + $ DEV=$(sudo rbd map cloneimg) + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0c00000 + $ sudo rbd unmap $DEV + $ DEV=$(sudo rbd map cloneimg@snap) + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0c00000 + $ sudo rbd unmap $DEV + + $ rbd flatten --no-progress cloneimg + $ rbd snap unprotect img@snap + $ rbd snap rm --no-progress img@snap + $ rbd rm --no-progress img + + $ DEV=$(sudo rbd map cloneimg) + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0c00000 + $ sudo rbd unmap $DEV + $ DEV=$(sudo rbd map cloneimg@snap) + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0c00000 + $ sudo rbd unmap $DEV + + $ rbd snap rm --no-progress cloneimg@snap + $ rbd rm --no-progress cloneimg + +Discard, whole object, full clone: + + $ rbd create --size 12M --image-feature layering,deep-flatten img + $ DEV=$(sudo rbd map img) + $ xfs_io -c 'pwrite -w 0 12M' $DEV >/dev/null + $ sudo rbd unmap $DEV + $ rbd snap create img@snap + $ rbd snap protect img@snap + $ rbd clone img@snap cloneimg + $ rbd snap create cloneimg@snap + $ DEV=$(sudo rbd map cloneimg) + $ xfs_io -c 'pwrite -S 0xab -w 0 12M' $DEV >/dev/null + $ blkdiscard -o 4M -l 4M $DEV + $ sudo rbd unmap $DEV + + $ DEV=$(sudo rbd map cloneimg) + $ hexdump $DEV + 0000000 abab abab abab abab abab abab abab abab + * + 0400000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0800000 abab abab abab abab abab abab abab abab + * + 0c00000 + $ sudo rbd unmap $DEV + $ DEV=$(sudo rbd map cloneimg@snap) + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0c00000 + $ sudo rbd unmap $DEV + + $ rbd flatten --no-progress cloneimg + $ rbd snap unprotect img@snap + $ rbd snap rm --no-progress img@snap + $ rbd rm --no-progress img + + $ DEV=$(sudo rbd map cloneimg) + $ hexdump $DEV + 0000000 abab abab abab abab abab abab abab abab + * + 0400000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0800000 abab abab abab abab abab abab abab abab + * + 0c00000 + $ sudo rbd unmap $DEV + $ DEV=$(sudo rbd map cloneimg@snap) + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0c00000 + $ sudo rbd unmap $DEV + + $ rbd snap rm --no-progress cloneimg@snap + $ rbd rm --no-progress cloneimg diff --git a/qa/rbd/krbd_discard.t b/qa/rbd/krbd_discard.t new file mode 100644 index 00000000..99f7261f --- /dev/null +++ b/qa/rbd/krbd_discard.t @@ -0,0 +1,398 @@ + + $ rbd create --size 4M img + $ DEV=$(sudo rbd map img) + +Zero, < 1 block: + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 156672 -l 512 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 131584 -l 64512 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 131584 -l 65024 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 131072 -l 65024 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + +Zero, 1 block: + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 131072 -l 65536 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0020000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0030000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 131072 -l 66048 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0020000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0030000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 130560 -l 66048 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0020000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0030000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 130560 -l 66560 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0020000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0030000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + +Zero, < 2 blocks: + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 163840 -l 65536 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 131584 -l 130048 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 131584 -l 130560 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0030000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0040000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 131072 -l 130560 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0020000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0030000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + +Zero, 2 blocks: + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 131072 -l 131072 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0020000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0040000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 131072 -l 131584 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0020000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0040000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 130560 -l 131584 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0020000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0040000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 130560 -l 132096 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0020000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0040000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + +Zero, 37 blocks: + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 589824 -l 2424832 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0090000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 02e0000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 589312 -l 2424832 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0090000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 02d0000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 590336 -l 2424832 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 00a0000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 02e0000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + +Truncate: + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 4193792 -l 512 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 4129280 -l 65024 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 4128768 -l 65536 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 03f0000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 4128256 -l 66048 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 03f0000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 4063744 -l 130560 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 03f0000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 4063232 -l 131072 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 03e0000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 4062720 -l 131584 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 03e0000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 512 -l 4193792 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0010000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0400000 + +Delete: + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 0 -l 4194304 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0400000 + +Empty clone: + + $ xfs_io -c 'pwrite -S 0xab -w 0 4M' $DEV >/dev/null + $ sudo rbd unmap $DEV + $ rbd snap create img@snap + $ rbd snap protect img@snap + + $ rbd clone img@snap cloneimg1 + $ DEV=$(sudo rbd map cloneimg1) + $ blkdiscard -o 720896 -l 2719744 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 abab abab abab abab abab abab abab abab + * + 0400000 + $ sudo rbd unmap $DEV + + $ rbd clone img@snap cloneimg2 + $ DEV=$(sudo rbd map cloneimg2) + $ blkdiscard -o 1474560 -l 2719744 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 abab abab abab abab abab abab abab abab + * + 0400000 + $ sudo rbd unmap $DEV + + $ rbd clone img@snap cloneimg3 + $ DEV=$(sudo rbd map cloneimg3) + $ blkdiscard -o 0 -l 4194304 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 abab abab abab abab abab abab abab abab + * + 0400000 + $ sudo rbd unmap $DEV + +Full clone: + + $ rbd clone img@snap cloneimg4 + $ DEV=$(sudo rbd map cloneimg4) + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 720896 -l 2719744 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 00b0000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0340000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 1474560 -l 2719744 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0170000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 0 -l 4194304 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0400000 + + $ sudo rbd unmap $DEV + +Multiple object requests: + + $ rbd create --size 50M --stripe-unit 16K --stripe-count 5 fancyimg + $ DEV=$(sudo rbd map fancyimg) + + $ xfs_io -c 'pwrite -b 4M -w 0 50M' $DEV >/dev/null + $ blkdiscard -o 0 -l 143360 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 3200000 + + $ xfs_io -c 'pwrite -b 4M -w 0 50M' $DEV >/dev/null + $ blkdiscard -o 0 -l 286720 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0008000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0014000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 001c000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0028000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0030000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 003c000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0044000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 3200000 + + $ xfs_io -c 'pwrite -b 4M -w 0 50M' $DEV >/dev/null + $ blkdiscard -o 0 -l 573440 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0050000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 3200000 + + $ sudo rbd unmap $DEV + + $ rbd rm --no-progress fancyimg + $ rbd rm --no-progress cloneimg4 + $ rbd rm --no-progress cloneimg3 + $ rbd rm --no-progress cloneimg2 + $ rbd rm --no-progress cloneimg1 + $ rbd snap unprotect img@snap + $ rbd snap rm --no-progress img@snap + $ rbd rm --no-progress img diff --git a/qa/rbd/krbd_discard_4M.t b/qa/rbd/krbd_discard_4M.t new file mode 100644 index 00000000..6c3d7cc7 --- /dev/null +++ b/qa/rbd/krbd_discard_4M.t @@ -0,0 +1,330 @@ + + $ rbd create --size 4M img + $ DEV=$(sudo rbd map -o alloc_size=4194304 img) + +Zero, < 1 block: + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 156672 -l 512 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 131584 -l 64512 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 131584 -l 65024 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 131072 -l 65024 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + +Zero, 1 block: + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 131072 -l 65536 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 131072 -l 66048 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 130560 -l 66048 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 130560 -l 66560 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + +Zero, < 2 blocks: + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 163840 -l 65536 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 131584 -l 130048 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 131584 -l 130560 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 131072 -l 130560 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + +Zero, 2 blocks: + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 131072 -l 131072 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 131072 -l 131584 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 130560 -l 131584 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 130560 -l 132096 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + +Zero, 37 blocks: + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 589824 -l 2424832 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 589312 -l 2424832 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 590336 -l 2424832 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + +Truncate: + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 4193792 -l 512 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 03ffe00 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 4129280 -l 65024 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 03f0200 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 4128768 -l 65536 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 03f0000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 4128256 -l 66048 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 03efe00 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 4063744 -l 130560 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 03e0200 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 4063232 -l 131072 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 03e0000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 4062720 -l 131584 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 03dfe00 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 512 -l 4193792 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0000200 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0400000 + +Delete: + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 0 -l 4194304 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0400000 + +Empty clone: + + $ xfs_io -c 'pwrite -S 0xab -w 0 4M' $DEV >/dev/null + $ sudo rbd unmap $DEV + $ rbd snap create img@snap + $ rbd snap protect img@snap + + $ rbd clone img@snap cloneimg1 + $ DEV=$(sudo rbd map -o alloc_size=4194304 cloneimg1) + $ blkdiscard -o 720896 -l 2719744 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 abab abab abab abab abab abab abab abab + * + 0400000 + $ sudo rbd unmap $DEV + + $ rbd clone img@snap cloneimg2 + $ DEV=$(sudo rbd map -o alloc_size=4194304 cloneimg2) + $ blkdiscard -o 1474560 -l 2719744 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 abab abab abab abab abab abab abab abab + * + 0400000 + $ sudo rbd unmap $DEV + + $ rbd clone img@snap cloneimg3 + $ DEV=$(sudo rbd map -o alloc_size=4194304 cloneimg3) + $ blkdiscard -o 0 -l 4194304 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 abab abab abab abab abab abab abab abab + * + 0400000 + $ sudo rbd unmap $DEV + +Full clone: + + $ rbd clone img@snap cloneimg4 + $ DEV=$(sudo rbd map -o alloc_size=4194304 cloneimg4) + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 720896 -l 2719744 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 1474560 -l 2719744 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0168000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 0 -l 4194304 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0400000 + + $ sudo rbd unmap $DEV + +Multiple object requests: + + $ rbd create --size 50M --stripe-unit 16K --stripe-count 5 fancyimg + $ DEV=$(sudo rbd map -o alloc_size=4194304 fancyimg) + + $ xfs_io -c 'pwrite -b 4M -w 0 50M' $DEV >/dev/null + $ blkdiscard -o 0 -l 143360 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 3200000 + + $ xfs_io -c 'pwrite -b 4M -w 0 50M' $DEV >/dev/null + $ blkdiscard -o 0 -l 286720 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 3200000 + + $ xfs_io -c 'pwrite -b 4M -w 0 50M' $DEV >/dev/null + $ blkdiscard -o 0 -l 573440 $DEV + $ dd if=$DEV iflag=direct bs=4M status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 3200000 + + $ sudo rbd unmap $DEV + + $ rbd rm --no-progress fancyimg + $ rbd rm --no-progress cloneimg4 + $ rbd rm --no-progress cloneimg3 + $ rbd rm --no-progress cloneimg2 + $ rbd rm --no-progress cloneimg1 + $ rbd snap unprotect img@snap + $ rbd snap rm --no-progress img@snap + $ rbd rm --no-progress img diff --git a/qa/rbd/krbd_discard_512b.t b/qa/rbd/krbd_discard_512b.t new file mode 100644 index 00000000..b3a7c447 --- /dev/null +++ b/qa/rbd/krbd_discard_512b.t @@ -0,0 +1,416 @@ + + $ rbd create --size 4M img + $ DEV=$(sudo rbd map -o alloc_size=512 img) + +Zero, < 1 block: + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 156672 -l 512 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0026400 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0026600 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 131584 -l 64512 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0020200 0000 0000 0000 0000 0000 0000 0000 0000 + * + 002fe00 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 131584 -l 65024 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0020200 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0030000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 131072 -l 65024 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0020000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 002fe00 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + +Zero, 1 block: + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 131072 -l 65536 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0020000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0030000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 131072 -l 66048 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0020000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0030200 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 130560 -l 66048 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 001fe00 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0030000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 130560 -l 66560 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 001fe00 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0030200 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + +Zero, < 2 blocks: + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 163840 -l 65536 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0028000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0038000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 131584 -l 130048 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0020200 0000 0000 0000 0000 0000 0000 0000 0000 + * + 003fe00 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 131584 -l 130560 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0020200 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0040000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 131072 -l 130560 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0020000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 003fe00 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + +Zero, 2 blocks: + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 131072 -l 131072 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0020000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0040000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 131072 -l 131584 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0020000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0040200 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 130560 -l 131584 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 001fe00 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0040000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 130560 -l 132096 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 001fe00 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0040200 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + +Zero, 37 blocks: + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 589824 -l 2424832 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0090000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 02e0000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 589312 -l 2424832 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 008fe00 0000 0000 0000 0000 0000 0000 0000 0000 + * + 02dfe00 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 590336 -l 2424832 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0090200 0000 0000 0000 0000 0000 0000 0000 0000 + * + 02e0200 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + +Truncate: + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 4193792 -l 512 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 03ffe00 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 4129280 -l 65024 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 03f0200 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 4128768 -l 65536 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 03f0000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 4128256 -l 66048 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 03efe00 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 4063744 -l 130560 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 03e0200 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 4063232 -l 131072 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 03e0000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 4062720 -l 131584 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 03dfe00 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 512 -l 4193792 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0000200 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0400000 + +Delete: + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 0 -l 4194304 $DEV + $ hexdump $DEV + 0000000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0400000 + +Empty clone: + + $ xfs_io -c 'pwrite -S 0xab -w 0 4M' $DEV >/dev/null + $ sudo rbd unmap $DEV + $ rbd snap create img@snap + $ rbd snap protect img@snap + + $ rbd clone img@snap cloneimg1 + $ DEV=$(sudo rbd map -o alloc_size=512 cloneimg1) + $ blkdiscard -o 720896 -l 2719744 $DEV + $ hexdump $DEV + 0000000 abab abab abab abab abab abab abab abab + * + 0400000 + $ sudo rbd unmap $DEV + + $ rbd clone img@snap cloneimg2 + $ DEV=$(sudo rbd map -o alloc_size=512 cloneimg2) + $ blkdiscard -o 1474560 -l 2719744 $DEV + $ hexdump $DEV + 0000000 abab abab abab abab abab abab abab abab + * + 0400000 + $ sudo rbd unmap $DEV + + $ rbd clone img@snap cloneimg3 + $ DEV=$(sudo rbd map -o alloc_size=512 cloneimg3) + $ blkdiscard -o 0 -l 4194304 $DEV + $ hexdump $DEV + 0000000 abab abab abab abab abab abab abab abab + * + 0400000 + $ sudo rbd unmap $DEV + +Full clone: + + $ rbd clone img@snap cloneimg4 + $ DEV=$(sudo rbd map -o alloc_size=512 cloneimg4) + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 720896 -l 2719744 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 00b0000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0348000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 1474560 -l 2719744 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0168000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ blkdiscard -o 0 -l 4194304 $DEV + $ hexdump $DEV + 0000000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0400000 + + $ sudo rbd unmap $DEV + +Multiple object requests: + + $ rbd create --size 50M --stripe-unit 16K --stripe-count 5 fancyimg + $ DEV=$(sudo rbd map -o alloc_size=512 fancyimg) + + $ xfs_io -c 'pwrite -b 4M -w 0 50M' $DEV >/dev/null + $ blkdiscard -o 0 -l 143360 $DEV + $ hexdump $DEV + 0000000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0023000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 3200000 + + $ xfs_io -c 'pwrite -b 4M -w 0 50M' $DEV >/dev/null + $ blkdiscard -o 0 -l 286720 $DEV + $ hexdump $DEV + 0000000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0046000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 3200000 + + $ xfs_io -c 'pwrite -b 4M -w 0 50M' $DEV >/dev/null + $ blkdiscard -o 0 -l 573440 $DEV + $ hexdump $DEV + 0000000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 008c000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 3200000 + + $ sudo rbd unmap $DEV + + $ rbd rm --no-progress fancyimg + $ rbd rm --no-progress cloneimg4 + $ rbd rm --no-progress cloneimg3 + $ rbd rm --no-progress cloneimg2 + $ rbd rm --no-progress cloneimg1 + $ rbd snap unprotect img@snap + $ rbd snap rm --no-progress img@snap + $ rbd rm --no-progress img diff --git a/qa/rbd/krbd_discard_granularity.t b/qa/rbd/krbd_discard_granularity.t new file mode 100644 index 00000000..844643ba --- /dev/null +++ b/qa/rbd/krbd_discard_granularity.t @@ -0,0 +1,40 @@ + + $ rbd create --size 20M img + + $ DEV=$(sudo rbd map img) + $ blockdev --getiomin $DEV + 65536 + $ blockdev --getioopt $DEV + 65536 + $ cat /sys/block/${DEV#/dev/}/queue/discard_granularity + 65536 + $ sudo rbd unmap $DEV + + $ DEV=$(sudo rbd map -o alloc_size=512 img) + $ blockdev --getiomin $DEV + 512 + $ blockdev --getioopt $DEV + 512 + $ cat /sys/block/${DEV#/dev/}/queue/discard_granularity + 512 + $ sudo rbd unmap $DEV + + $ DEV=$(sudo rbd map -o alloc_size=4194304 img) + $ blockdev --getiomin $DEV + 4194304 + $ blockdev --getioopt $DEV + 4194304 + $ cat /sys/block/${DEV#/dev/}/queue/discard_granularity + 4194304 + $ sudo rbd unmap $DEV + + $ DEV=$(sudo rbd map -o alloc_size=8388608 img) + $ blockdev --getiomin $DEV + 4194304 + $ blockdev --getioopt $DEV + 4194304 + $ cat /sys/block/${DEV#/dev/}/queue/discard_granularity + 4194304 + $ sudo rbd unmap $DEV + + $ rbd rm --no-progress img diff --git a/qa/rbd/krbd_get_features.t b/qa/rbd/krbd_get_features.t new file mode 100644 index 00000000..b3abf3ce --- /dev/null +++ b/qa/rbd/krbd_get_features.t @@ -0,0 +1,31 @@ + +journaling makes the image only unwritable, rather than both unreadable +and unwritable: + + $ rbd create --size 1 --image-feature layering,exclusive-lock,journaling img + $ rbd snap create img@snap + $ rbd snap protect img@snap + $ rbd clone --image-feature layering,exclusive-lock,journaling img@snap cloneimg + + $ DEV=$(sudo rbd map img) + rbd: sysfs write failed + rbd: map failed: (6) No such device or address + [6] + $ DEV=$(sudo rbd map --read-only img) + $ blockdev --getro $DEV + 1 + $ sudo rbd unmap $DEV + + $ DEV=$(sudo rbd map cloneimg) + rbd: sysfs write failed + rbd: map failed: (6) No such device or address + [6] + $ DEV=$(sudo rbd map --read-only cloneimg) + $ blockdev --getro $DEV + 1 + $ sudo rbd unmap $DEV + + $ rbd rm --no-progress cloneimg + $ rbd snap unprotect img@snap + $ rbd snap rm --no-progress img@snap + $ rbd rm --no-progress img diff --git a/qa/rbd/krbd_huge_image.t b/qa/rbd/krbd_huge_image.t new file mode 100644 index 00000000..9fff1d9d --- /dev/null +++ b/qa/rbd/krbd_huge_image.t @@ -0,0 +1,41 @@ + + $ get_field() { + > rbd info --format=json $1 | python -c "import sys, json; print json.load(sys.stdin)['$2']" + > } + +Write to first and last sectors and make sure we hit the right objects: + + $ ceph osd pool create hugeimg 12 >/dev/null 2>&1 + $ rbd pool init hugeimg + $ rbd create --size 4E --object-size 4K --image-feature layering hugeimg/img + $ DEV=$(sudo rbd map hugeimg/img) + $ xfs_io -c 'pwrite 0 512' $DEV >/dev/null # first sector + $ xfs_io -c 'pwrite 4611686018427387392 512' $DEV >/dev/null # last sector + $ sudo rbd unmap $DEV + + $ get_field hugeimg/img size + 4611686018427387904 + $ get_field hugeimg/img objects + 1125899906842624 + $ rados -p hugeimg ls | grep $(get_field hugeimg/img block_name_prefix) | sort + .*\.0000000000000000 (re) + .*\.0003ffffffffffff (re) + +Dump first and last megabytes: + + $ DEV=$(sudo rbd map hugeimg/img) + $ dd if=$DEV bs=1M count=1 status=none | hexdump + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0000200 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0100000 + $ dd if=$DEV bs=1M skip=4398046511103 status=none | hexdump + 0000000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 00ffe00 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0100000 + $ sudo rbd unmap $DEV + + $ ceph osd pool delete hugeimg hugeimg --yes-i-really-really-mean-it >/dev/null 2>&1 diff --git a/qa/rbd/krbd_msgr_segments.t b/qa/rbd/krbd_msgr_segments.t new file mode 100644 index 00000000..c373af5a --- /dev/null +++ b/qa/rbd/krbd_msgr_segments.t @@ -0,0 +1,85 @@ + + $ get_block_name_prefix() { + > rbd info --format=json $1 | python -c "import sys, json; print json.load(sys.stdin)['block_name_prefix']" + > } + +Short segments: + + $ rbd create --size 12M img + $ DEV=$(sudo rbd map img) + $ xfs_io -d -c 'pwrite 5120 512' $DEV >/dev/null + $ xfs_io -d -c 'pwrite 12577280 512' $DEV >/dev/null + $ hexdump $DEV + 0000000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0001400 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0001600 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0bfea00 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0bfec00 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0c00000 + $ sudo rbd unmap $DEV + $ rbd rm --no-progress img + +Short segment, ceph_msg_data_bio_cursor_init(): + + $ rbd create --size 12M img + $ DEV=$(sudo rbd map img) + $ xfs_io -d -c 'pwrite 0 512' $DEV >/dev/null + $ rados -p rbd stat $(get_block_name_prefix img).0000000000000000 + .* size 512 (re) + $ xfs_io -d -c 'pread -b 2M 0 2M' $DEV >/dev/null + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0000200 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0c00000 + $ sudo rbd unmap $DEV + $ rbd rm --no-progress img + +Short segment, ceph_msg_data_bio_advance(): + + $ rbd create --size 12M img + $ DEV=$(sudo rbd map img) + $ xfs_io -d -c 'pwrite 0 1049088' $DEV >/dev/null + $ rados -p rbd stat $(get_block_name_prefix img).0000000000000000 + .* size 1049088 (re) + $ xfs_io -d -c 'pread -b 2M 0 2M' $DEV >/dev/null + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0100200 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0c00000 + $ sudo rbd unmap $DEV + $ rbd rm --no-progress img + +Cloned bios (dm-snapshot.ko, based on generic/081): + + $ rbd create --size 300M img + $ DEV=$(sudo rbd map img) + $ sudo vgcreate vg_img $DEV + Physical volume "/dev/rbd?" successfully created* (glob) + Volume group "vg_img" successfully created + $ sudo lvcreate -L 256M -n lv_img vg_img + Logical volume "lv_img" created. + $ udevadm settle + $ sudo mkfs.ext4 -q /dev/mapper/vg_img-lv_img + $ sudo lvcreate -L 4M --snapshot -n lv_snap vg_img/lv_img | grep created + Logical volume "lv_snap" created. + $ udevadm settle + $ sudo mount /dev/mapper/vg_img-lv_snap /mnt + $ sudo xfs_io -f -c 'pwrite 0 5M' /mnt/file1 >/dev/null + $ sudo umount /mnt + $ sudo vgremove -f vg_img + Logical volume "lv_snap" successfully removed + Logical volume "lv_img" successfully removed + Volume group "vg_img" successfully removed + $ sudo pvremove $DEV + Labels on physical volume "/dev/rbd?" successfully wiped* (glob) + $ sudo rbd unmap $DEV + $ rbd rm --no-progress img diff --git a/qa/rbd/krbd_parent_overlap.t b/qa/rbd/krbd_parent_overlap.t new file mode 100644 index 00000000..47deda90 --- /dev/null +++ b/qa/rbd/krbd_parent_overlap.t @@ -0,0 +1,64 @@ + +For reads, only the object extent needs to be reverse mapped: + + $ rbd create --size 20M img + $ DEV=$(sudo rbd map img) + $ xfs_io -c 'pwrite 0 20M' $DEV >/dev/null + $ sudo rbd unmap $DEV + $ rbd snap create img@snap + $ rbd snap protect img@snap + $ rbd clone img@snap cloneimg + $ rbd resize --no-progress --size 5M --allow-shrink cloneimg + $ rbd resize --no-progress --size 20M cloneimg + $ DEV=$(sudo rbd map cloneimg) + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0500000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 1400000 + $ sudo rbd unmap $DEV + $ rbd rm --no-progress cloneimg + $ rbd snap unprotect img@snap + $ rbd snap rm --no-progress img@snap + $ rbd rm --no-progress img + +For writes, the entire object needs to be reverse mapped: + + $ rbd create --size 2M img + $ DEV=$(sudo rbd map img) + $ xfs_io -c 'pwrite 0 1M' $DEV >/dev/null + $ sudo rbd unmap $DEV + $ rbd snap create img@snap + $ rbd snap protect img@snap + $ rbd clone img@snap cloneimg + $ rbd resize --no-progress --size 8M cloneimg + $ DEV=$(sudo rbd map cloneimg) + $ xfs_io -c 'pwrite -S 0xef 3M 1M' $DEV >/dev/null + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0100000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0300000 efef efef efef efef efef efef efef efef + * + 0400000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0800000 + $ sudo rbd unmap $DEV + $ DEV=$(sudo rbd map cloneimg) + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0100000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0300000 efef efef efef efef efef efef efef efef + * + 0400000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0800000 + $ sudo rbd unmap $DEV + $ rbd rm --no-progress cloneimg + $ rbd snap unprotect img@snap + $ rbd snap rm --no-progress img@snap + $ rbd rm --no-progress img diff --git a/qa/rbd/krbd_whole_object_zeroout.t b/qa/rbd/krbd_whole_object_zeroout.t new file mode 100644 index 00000000..3530f93e --- /dev/null +++ b/qa/rbd/krbd_whole_object_zeroout.t @@ -0,0 +1,143 @@ + + $ get_block_name_prefix() { + > rbd info --format=json $1 | python -c "import sys, json; print json.load(sys.stdin)['block_name_prefix']" + > } + + $ rbd create --size 200M img + $ DEV=$(sudo rbd map img) + $ xfs_io -c 'pwrite -b 4M 0 200M' $DEV >/dev/null + $ sudo rbd unmap $DEV + $ rbd snap create img@snap + $ rbd snap protect img@snap + +cloneimg1: +1 object in an object set, 4M +25 full object sets +25 objects in total + + $ rbd clone img@snap cloneimg1 + $ DEV=$(sudo rbd map cloneimg1) + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + c800000 + $ fallocate -z -l 100M $DEV + $ hexdump $DEV + 0000000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 6400000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + c800000 + $ sudo rbd unmap $DEV + $ DEV=$(sudo rbd map cloneimg1) + $ hexdump $DEV + 0000000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 6400000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + c800000 + $ sudo rbd unmap $DEV + +cloneimg2: +7 objects in an object set, 28M +3 full object sets +min((100M % 28M) / 512K, 7) = 7 objects in the last object set +28 objects in total + + $ rbd clone --stripe-unit 512K --stripe-count 7 img@snap cloneimg2 + $ DEV=$(sudo rbd map cloneimg2) + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + c800000 + $ fallocate -z -l 100M $DEV + $ hexdump $DEV + 0000000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 6400000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + c800000 + $ sudo rbd unmap $DEV + $ DEV=$(sudo rbd map cloneimg2) + $ hexdump $DEV + 0000000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 6400000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + c800000 + $ sudo rbd unmap $DEV + +cloneimg3: +23 objects in an object set, 92M +1 full object set +min((100M % 92M) / 512K, 23) = 16 objects in the last object set +39 objects in total + + $ rbd clone --stripe-unit 512K --stripe-count 23 img@snap cloneimg3 + $ DEV=$(sudo rbd map cloneimg3) + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + c800000 + $ fallocate -z -l 100M $DEV + $ hexdump $DEV + 0000000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 6400000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + c800000 + $ sudo rbd unmap $DEV + $ DEV=$(sudo rbd map cloneimg3) + $ hexdump $DEV + 0000000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 6400000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + c800000 + $ sudo rbd unmap $DEV + +cloneimg4: +65 objects in an object set, 260M +0 full object sets +min((100M % 260M) / 512K, 65) = 65 objects in the last object set +65 objects in total + + $ rbd clone --stripe-unit 512K --stripe-count 65 img@snap cloneimg4 + $ DEV=$(sudo rbd map cloneimg4) + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + c800000 + $ fallocate -z -l 100M $DEV + $ hexdump $DEV + 0000000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 6400000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + c800000 + $ sudo rbd unmap $DEV + $ DEV=$(sudo rbd map cloneimg4) + $ hexdump $DEV + 0000000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 6400000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + c800000 + $ sudo rbd unmap $DEV + + $ rados -p rbd ls | grep -c $(get_block_name_prefix cloneimg1) + 25 + $ rados -p rbd ls | grep -c $(get_block_name_prefix cloneimg2) + 28 + $ rados -p rbd ls | grep -c $(get_block_name_prefix cloneimg3) + 39 + $ rados -p rbd ls | grep -c $(get_block_name_prefix cloneimg4) + 65 + + $ rbd rm --no-progress cloneimg4 + $ rbd rm --no-progress cloneimg3 + $ rbd rm --no-progress cloneimg2 + $ rbd rm --no-progress cloneimg1 + $ rbd snap unprotect img@snap + $ rbd snap rm --no-progress img@snap + $ rbd rm --no-progress img diff --git a/qa/rbd/krbd_zeroout.t b/qa/rbd/krbd_zeroout.t new file mode 100644 index 00000000..33c8e9d9 --- /dev/null +++ b/qa/rbd/krbd_zeroout.t @@ -0,0 +1,422 @@ + + $ rbd create --size 4M img + $ DEV=$(sudo rbd map img) + +Zero, < 1 block: + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ fallocate -z -o 156672 -l 512 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0026400 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0026600 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ fallocate -z -o 131584 -l 64512 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0020200 0000 0000 0000 0000 0000 0000 0000 0000 + * + 002fe00 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ fallocate -z -o 131584 -l 65024 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0020200 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0030000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ fallocate -z -o 131072 -l 65024 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0020000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 002fe00 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + +Zero, 1 block: + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ fallocate -z -o 131072 -l 65536 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0020000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0030000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ fallocate -z -o 131072 -l 66048 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0020000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0030200 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ fallocate -z -o 130560 -l 66048 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 001fe00 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0030000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ fallocate -z -o 130560 -l 66560 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 001fe00 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0030200 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + +Zero, < 2 blocks: + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ fallocate -z -o 163840 -l 65536 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0028000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0038000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ fallocate -z -o 131584 -l 130048 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0020200 0000 0000 0000 0000 0000 0000 0000 0000 + * + 003fe00 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ fallocate -z -o 131584 -l 130560 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0020200 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0040000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ fallocate -z -o 131072 -l 130560 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0020000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 003fe00 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + +Zero, 2 blocks: + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ fallocate -z -o 131072 -l 131072 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0020000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0040000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ fallocate -z -o 131072 -l 131584 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0020000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0040200 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ fallocate -z -o 130560 -l 131584 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 001fe00 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0040000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ fallocate -z -o 130560 -l 132096 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 001fe00 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0040200 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + +Zero, 37 blocks: + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ fallocate -z -o 589824 -l 2424832 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0090000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 02e0000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ fallocate -z -o 589312 -l 2424832 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 008fe00 0000 0000 0000 0000 0000 0000 0000 0000 + * + 02dfe00 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ fallocate -z -o 590336 -l 2424832 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0090200 0000 0000 0000 0000 0000 0000 0000 0000 + * + 02e0200 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + +Truncate: + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ fallocate -z -o 4193792 -l 512 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 03ffe00 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ fallocate -z -o 4129280 -l 65024 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 03f0200 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ fallocate -z -o 4128768 -l 65536 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 03f0000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ fallocate -z -o 4128256 -l 66048 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 03efe00 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ fallocate -z -o 4063744 -l 130560 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 03e0200 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ fallocate -z -o 4063232 -l 131072 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 03e0000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ fallocate -z -o 4062720 -l 131584 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 03dfe00 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ fallocate -z -o 512 -l 4193792 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0000200 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0400000 + +Delete: + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ fallocate -z -o 0 -l 4194304 $DEV + $ hexdump $DEV + 0000000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0400000 + +Empty clone: + + $ xfs_io -c 'pwrite -S 0xab -w 0 4M' $DEV >/dev/null + $ sudo rbd unmap $DEV + $ rbd snap create img@snap + $ rbd snap protect img@snap + + $ rbd clone img@snap cloneimg1 + $ DEV=$(sudo rbd map cloneimg1) + $ fallocate -z -o 720896 -l 2719744 $DEV + $ hexdump $DEV + 0000000 abab abab abab abab abab abab abab abab + * + 00b0000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0348000 abab abab abab abab abab abab abab abab + * + 0400000 + $ sudo rbd unmap $DEV + + $ rbd clone img@snap cloneimg2 + $ DEV=$(sudo rbd map cloneimg2) + $ fallocate -z -o 1474560 -l 2719744 $DEV + $ hexdump $DEV + 0000000 abab abab abab abab abab abab abab abab + * + 0168000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0400000 + $ sudo rbd unmap $DEV + + $ rbd clone img@snap cloneimg3 + $ DEV=$(sudo rbd map cloneimg3) + $ fallocate -z -o 0 -l 4194304 $DEV + $ hexdump $DEV + 0000000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0400000 + $ sudo rbd unmap $DEV + +Full clone: + + $ rbd clone img@snap cloneimg4 + $ DEV=$(sudo rbd map cloneimg4) + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ fallocate -z -o 720896 -l 2719744 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 00b0000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0348000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ fallocate -z -o 1474560 -l 2719744 $DEV + $ hexdump $DEV + 0000000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 0168000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0400000 + + $ xfs_io -c 'pwrite -w 0 4M' $DEV >/dev/null + $ fallocate -z -o 0 -l 4194304 $DEV + $ hexdump $DEV + 0000000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0400000 + + $ sudo rbd unmap $DEV + +Multiple object requests: + + $ rbd create --size 50M --stripe-unit 16K --stripe-count 5 fancyimg + $ DEV=$(sudo rbd map fancyimg) + + $ xfs_io -c 'pwrite -b 4M -w 0 50M' $DEV >/dev/null + $ fallocate -z -o 0 -l 143360 $DEV + $ hexdump $DEV + 0000000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0023000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 3200000 + + $ xfs_io -c 'pwrite -b 4M -w 0 50M' $DEV >/dev/null + $ fallocate -z -o 0 -l 286720 $DEV + $ hexdump $DEV + 0000000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 0046000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 3200000 + + $ xfs_io -c 'pwrite -b 4M -w 0 50M' $DEV >/dev/null + $ fallocate -z -o 0 -l 573440 $DEV + $ hexdump $DEV + 0000000 0000 0000 0000 0000 0000 0000 0000 0000 + * + 008c000 cdcd cdcd cdcd cdcd cdcd cdcd cdcd cdcd + * + 3200000 + + $ sudo rbd unmap $DEV + + $ rbd rm --no-progress fancyimg + $ rbd rm --no-progress cloneimg4 + $ rbd rm --no-progress cloneimg3 + $ rbd rm --no-progress cloneimg2 + $ rbd rm --no-progress cloneimg1 + $ rbd snap unprotect img@snap + $ rbd snap rm --no-progress img@snap + $ rbd rm --no-progress img diff --git a/qa/rbd/rbd.sh b/qa/rbd/rbd.sh new file mode 100755 index 00000000..2b7ce8ee --- /dev/null +++ b/qa/rbd/rbd.sh @@ -0,0 +1,50 @@ +#!/usr/bin/env bash +set -x + +basedir=`echo $0 | sed 's/[^/]*$//g'`. +. $basedir/common.sh + +rbd_test_init + + +create_multiple() { + for i in `seq 1 10`; do + rbd_create_image $i + done + + for i in `seq 1 10`; do + rbd_add $i + done + for i in `seq 1 10`; do + devname=/dev/rbd`eval echo \\$rbd$i` + echo $devname + done + for i in `seq 1 10`; do + devid=`eval echo \\$rbd$i` + rbd_remove $devid + done + for i in `seq 1 10`; do + rbd_rm_image $i + done +} + +test_dbench() { + rbd_create_image 0 + rbd_add 0 + + devname=/dev/rbd$rbd0 + + mkfs -t ext3 $devname + mount -t ext3 $devname $mnt + + dbench -D $mnt -t 30 5 + sync + + umount $mnt + rbd_remove $rbd0 + rbd_rm_image 0 +} + +create_multiple +test_dbench + diff --git a/qa/releases/infernalis.yaml b/qa/releases/infernalis.yaml new file mode 100644 index 00000000..f21e7fe8 --- /dev/null +++ b/qa/releases/infernalis.yaml @@ -0,0 +1,5 @@ +tasks: +- exec: + osd.0: + - ceph osd set sortbitwise + - for p in `ceph osd pool ls` ; do ceph osd pool set $p use_gmt_hitset true ; done diff --git a/qa/releases/jewel.yaml b/qa/releases/jewel.yaml new file mode 100644 index 00000000..ab09c083 --- /dev/null +++ b/qa/releases/jewel.yaml @@ -0,0 +1,6 @@ +tasks: +- exec: + osd.0: + - ceph osd set sortbitwise + - ceph osd set require_jewel_osds + - for p in `ceph osd pool ls` ; do ceph osd pool set $p use_gmt_hitset true ; done diff --git a/qa/releases/kraken.yaml b/qa/releases/kraken.yaml new file mode 100644 index 00000000..57342057 --- /dev/null +++ b/qa/releases/kraken.yaml @@ -0,0 +1,4 @@ +tasks: +- exec: + osd.0: + - ceph osd set require_kraken_osds diff --git a/qa/releases/luminous-with-mgr.yaml b/qa/releases/luminous-with-mgr.yaml new file mode 100644 index 00000000..ea313076 --- /dev/null +++ b/qa/releases/luminous-with-mgr.yaml @@ -0,0 +1,11 @@ +tasks: +- exec: + osd.0: + - ceph osd require-osd-release luminous + - ceph osd set-require-min-compat-client luminous +- ceph.healthy: +overrides: + ceph: + conf: + mon: + mon warn on osd down out interval zero: false diff --git a/qa/releases/luminous.yaml b/qa/releases/luminous.yaml new file mode 100644 index 00000000..9ed76715 --- /dev/null +++ b/qa/releases/luminous.yaml @@ -0,0 +1,21 @@ +tasks: +- exec: + mgr.x: + - mkdir -p /var/lib/ceph/mgr/ceph-x + - ceph auth get-or-create-key mgr.x mon 'allow profile mgr' + - ceph auth export mgr.x > /var/lib/ceph/mgr/ceph-x/keyring +- ceph.restart: + daemons: [mgr.x] + wait-for-healthy: false +- exec: + osd.0: + - ceph osd require-osd-release luminous + - ceph osd set-require-min-compat-client luminous +- ceph.healthy: +overrides: + ceph: + conf: + mon: + mon warn on osd down out interval zero: false + log-whitelist: + - no active mgr diff --git a/qa/releases/mimic.yaml b/qa/releases/mimic.yaml new file mode 100644 index 00000000..f901e7ed --- /dev/null +++ b/qa/releases/mimic.yaml @@ -0,0 +1,6 @@ +tasks: +- exec: + osd.0: + - ceph osd require-osd-release mimic + - ceph osd set-require-min-compat-client mimic +- ceph.healthy: diff --git a/qa/releases/nautilus.yaml b/qa/releases/nautilus.yaml new file mode 100644 index 00000000..5b79e5b7 --- /dev/null +++ b/qa/releases/nautilus.yaml @@ -0,0 +1,6 @@ +tasks: +- exec: + osd.0: + - ceph osd require-osd-release nautilus + - ceph osd set-require-min-compat-client nautilus +- ceph.healthy: diff --git a/qa/rgw_frontend/beast.yaml b/qa/rgw_frontend/beast.yaml new file mode 100644 index 00000000..369e65f7 --- /dev/null +++ b/qa/rgw_frontend/beast.yaml @@ -0,0 +1,3 @@ +overrides: + rgw: + frontend: beast diff --git a/qa/rgw_frontend/civetweb.yaml b/qa/rgw_frontend/civetweb.yaml new file mode 100644 index 00000000..5845a0e6 --- /dev/null +++ b/qa/rgw_frontend/civetweb.yaml @@ -0,0 +1,3 @@ +overrides: + rgw: + frontend: civetweb diff --git a/qa/rgw_pool_type/ec-profile.yaml b/qa/rgw_pool_type/ec-profile.yaml new file mode 100644 index 00000000..05384cb5 --- /dev/null +++ b/qa/rgw_pool_type/ec-profile.yaml @@ -0,0 +1,10 @@ +overrides: + rgw: + ec-data-pool: true + erasure_code_profile: + name: testprofile + k: 3 + m: 1 + crush-failure-domain: osd + s3tests: + slow_backend: true diff --git a/qa/rgw_pool_type/ec.yaml b/qa/rgw_pool_type/ec.yaml new file mode 100644 index 00000000..7c99b7f8 --- /dev/null +++ b/qa/rgw_pool_type/ec.yaml @@ -0,0 +1,5 @@ +overrides: + rgw: + ec-data-pool: true + s3tests: + slow_backend: true diff --git a/qa/rgw_pool_type/replicated.yaml b/qa/rgw_pool_type/replicated.yaml new file mode 100644 index 00000000..c91709ea --- /dev/null +++ b/qa/rgw_pool_type/replicated.yaml @@ -0,0 +1,3 @@ +overrides: + rgw: + ec-data-pool: false diff --git a/qa/run-standalone.sh b/qa/run-standalone.sh new file mode 100755 index 00000000..897b3e53 --- /dev/null +++ b/qa/run-standalone.sh @@ -0,0 +1,148 @@ +#!/usr/bin/env bash +set -e + +if [ ! -e Makefile -o ! -d bin ]; then + echo 'run this from the build dir' + exit 1 +fi + +function get_cmake_variable() { + local variable=$1 + grep "$variable" CMakeCache.txt | cut -d "=" -f 2 +} + +function get_python_path() { + local py_ver=$(get_cmake_variable MGR_PYTHON_VERSION | cut -d '.' -f1) + if [ -z "${py_ver}" ]; then + if [ $(get_cmake_variable WITH_PYTHON2) = ON ]; then + py_ver=2 + else + py_ver=3 + fi + fi + echo $(realpath ../src/pybind):$(pwd)/lib/cython_modules/lib.$py_ver +} + +if [ `uname` = FreeBSD ]; then + # otherwise module prettytable will not be found + export PYTHONPATH=$(get_python_path):/usr/local/lib/python2.7/site-packages + exec_mode=+111 + KERNCORE="kern.corefile" + COREPATTERN="core.%N.%P" +else + export PYTHONPATH=$(get_python_path) + exec_mode=/111 + KERNCORE="kernel.core_pattern" + COREPATTERN="core.%e.%p.%t" +fi + +function cleanup() { + if [ -n "$precore" ]; then + sudo sysctl -w "${KERNCORE}=${precore}" + fi +} + +function finish() { + cleanup + exit 0 +} + +trap finish TERM HUP INT + +PATH=$(pwd)/bin:$PATH + +# add /sbin and /usr/sbin to PATH to find sysctl in those cases where the +# user's PATH does not get these directories by default (e.g., tumbleweed) +PATH=$PATH:/sbin:/usr/sbin + +export LD_LIBRARY_PATH="$(pwd)/lib" + +# TODO: Use getops +dryrun=false +if [[ "$1" = "--dry-run" ]]; then + dryrun=true + shift +fi + +all=false +if [ "$1" = "" ]; then + all=true +fi + +select=("$@") + +location="../qa/standalone" + +count=0 +errors=0 +userargs="" +precore="$(sysctl -n $KERNCORE)" +# If corepattern already set, avoid having to use sudo +if [ "$precore" = "$COREPATTERN" ]; then + precore="" +else + sudo sysctl -w "${KERNCORE}=${COREPATTERN}" +fi +# Clean out any cores in core target directory (currently .) +if ls $(dirname $(sysctl -n $KERNCORE)) | grep -q '^core\|core$' ; then + mkdir found.cores.$$ 2> /dev/null || true + for i in $(ls $(dirname $(sysctl -n $KERNCORE)) | grep '^core\|core$'); do + mv $i found.cores.$$ + done + echo "Stray cores put in $(pwd)/found.cores.$$" +fi + +ulimit -c unlimited +for f in $(cd $location ; find . -mindepth 2 -perm $exec_mode -type f) +do + f=$(echo $f | sed 's/\.\///') + if [[ "$all" = "false" ]]; then + found=false + for c in "${!select[@]}" + do + # Get command and any arguments of subset of tests to run + allargs="${select[$c]}" + arg1=$(echo "$allargs" | cut --delimiter " " --field 1) + # Get user args for this selection for use below + userargs="$(echo $allargs | cut -s --delimiter " " --field 2-)" + if [[ "$arg1" = $(basename $f) ]] || [[ "$arg1" = $(dirname $f) ]]; then + found=true + break + fi + if [[ "$arg1" = "$f" ]]; then + found=true + break + fi + done + if [[ "$found" = "false" ]]; then + continue + fi + fi + # Don't run test-failure.sh unless explicitly specified + if [ "$all" = "true" -a "$f" = "special/test-failure.sh" ]; then + continue + fi + + cmd="$location/$f $userargs" + count=$(expr $count + 1) + echo "--- $cmd ---" + if [[ "$dryrun" != "true" ]]; then + if ! PATH=$PATH:bin \ + CEPH_ROOT=.. \ + CEPH_LIB=lib \ + LOCALRUN=yes \ + time -f "Elapsed %E (%e seconds)" $cmd ; then + echo "$f .............. FAILED" + errors=$(expr $errors + 1) + fi + fi +done +cleanup + +if [ "$errors" != "0" ]; then + echo "$errors TESTS FAILED, $count TOTAL TESTS" + exit 1 +fi + +echo "ALL $count TESTS PASSED" +exit 0 diff --git a/qa/run_xfstests-obsolete.sh b/qa/run_xfstests-obsolete.sh new file mode 100644 index 00000000..4393c7c8 --- /dev/null +++ b/qa/run_xfstests-obsolete.sh @@ -0,0 +1,458 @@ +#!/usr/bin/env bash + +# Copyright (C) 2012 Dreamhost, LLC +# +# This is free software; see the source for copying conditions. +# There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR +# A PARTICULAR PURPOSE. +# +# This is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as +# published by the Free Software Foundation version 2. + +# Usage: +# run_xfs_tests -t /dev/ -s /dev/ -f +# - test device and scratch device will both get trashed +# - fstypes can be xfs, ext4, or btrfs (xfs default) +# - tests can be listed individually or in ranges: 1 3-5 8 +# tests can also be specified by group: -g quick +# +# Exit status: +# 0: success +# 1: usage error +# 2: other runtime error +# 99: argument count error (programming error) +# 100: getopt error (internal error) + +# Alex Elder +# April 13, 2012 + +set -e + +PROGNAME=$(basename $0) + +# xfstests is downloaded from this git repository and then built. +# XFSTESTS_REPO="git://oss.sgi.com/xfs/cmds/xfstests.git" +XFSTESTS_REPO="git://git.ceph.com/xfstests.git" + +# Default command line option values +COUNT="1" +FS_TYPE="xfs" +SCRATCH_DEV="" # MUST BE SPECIFIED +TEST_DEV="" # MUST BE SPECIFIED +TESTS="-g auto" # The "auto" group is supposed to be "known good" + +# rbd presents geometry information that causes mkfs.xfs to +# issue a warning. This option avoids this class of problems. +XFS_MKFS_OPTIONS="-l su=32k" + +# Override the default test list with a list of tests known to pass +# until we can work through getting them all passing reliably. +TESTS="1-7 9 11-15 17 19-21 26-29 31-34 41 46-48 50-54 56 61 63-67 69-70 74-76" +TESTS="${TESTS} 78 79 84-89 91-92 100 103 105 108 110 116-121 124 126" +TESTS="${TESTS} 129-135 137-141 164-167 182 184 187-190 192 194" +TESTS="${TESTS} 196 199 201 203 214-216 220-227 234 236-238 241 243-249" +TESTS="${TESTS} 253 257-259 261 262 269 273 275 277 278 280 285 286" +# 275 was the highest available test as of 4/10/12. +# 289 was the highest available test as of 11/15/12. + +###### +# Some explanation of why tests have been excluded above: +# +# Test 008 was pulled because it contained a race condition leading to +# spurious failures. +# +# Test 049 was pulled because it caused a kernel fault. +# http://tracker.newdream.net/issues/2260 +# Test 232 was pulled because it caused an XFS error +# http://tracker.newdream.net/issues/2302 +# +# This test passes but takes a LONG time (1+ hours): 127 +# +# These were not run for one (anticipated) reason or another: +# 010 016 030 035 040 044 057 058-060 072 077 090 093-095 097-099 104 +# 112 113 122 123 125 128 142 147-163 168 175-178 180 185 191 193 +# 195 197 198 207-213 217 228 230-233 235 239 240 252 254 255 264-266 +# 270-272 276 278-279 281-284 288 289 +# +# These tests all failed (produced output different from golden): +# 042 073 083 096 109 169 170 200 202 204-206 218 229 240 242 250 +# 263 276 277 279 287 +# +# The rest were not part of the "auto" group: +# 018 022 023 024 025 036 037 038 039 043 055 071 080 081 082 101 +# 102 106 107 111 114 115 136 171 172 173 251 267 268 +###### + +# print an error message and quit with non-zero status +function err() { + if [ $# -gt 0 ]; then + echo "" >&2 + echo "${PROGNAME}: ${FUNCNAME[1]}: $@" >&2 + fi + exit 2 +} + +# routine used to validate argument counts to all shell functions +function arg_count() { + local func + local want + local got + + if [ $# -eq 2 ]; then + func="${FUNCNAME[1]}" # calling function + want=$1 + got=$2 + else + func="${FUNCNAME[0]}" # i.e., arg_count + want=2 + got=$# + fi + [ "${want}" -eq "${got}" ] && return 0 + echo "${PROGNAME}: ${func}: arg count bad (want ${want} got ${got})" >&2 + exit 99 +} + +# validation function for repeat count argument +function count_valid() { + arg_count 1 $# + + test "$1" -gt 0 # 0 is pointless; negative is wrong +} + +# validation function for filesystem type argument +function fs_type_valid() { + arg_count 1 $# + + case "$1" in + xfs|ext4|btrfs) return 0 ;; + *) return 1 ;; + esac +} + +# validation function for device arguments +function device_valid() { + arg_count 1 $# + + # Very simple testing--really should try to be more careful... + test -b "$1" +} + +# print a usage message and quit +# +# if a message is supplied, print that first, and then exit +# with non-zero status +function usage() { + if [ $# -gt 0 ]; then + echo "" >&2 + echo "$@" >&2 + fi + + echo "" >&2 + echo "Usage: ${PROGNAME} " >&2 + echo "" >&2 + echo " options:" >&2 + echo " -h or --help" >&2 + echo " show this message" >&2 + echo " -c or --count" >&2 + echo " iteration count (1 or more)" >&2 + echo " -f or --fs-type" >&2 + echo " one of: xfs, ext4, btrfs" >&2 + echo " (default fs-type: xfs)" >&2 + echo " -s or --scratch-dev (REQUIRED)" >&2 + echo " name of device used for scratch filesystem" >&2 + echo " -t or --test-dev (REQUIRED)" >&2 + echo " name of device used for test filesystem" >&2 + echo " tests:" >&2 + echo " list of test numbers or ranges, e.g.:" >&2 + echo " 1-9 11-15 17 19-21 26-28 31-34 41" >&2 + echo " or possibly an xfstests test group, e.g.:" >&2 + echo " -g quick" >&2 + echo " (default tests: -g auto)" >&2 + echo "" >&2 + + [ $# -gt 0 ] && exit 1 + + exit 0 # This is used for a --help +} + +# parse command line arguments +function parseargs() { + # Short option flags + SHORT_OPTS="" + SHORT_OPTS="${SHORT_OPTS},h" + SHORT_OPTS="${SHORT_OPTS},c:" + SHORT_OPTS="${SHORT_OPTS},f:" + SHORT_OPTS="${SHORT_OPTS},s:" + SHORT_OPTS="${SHORT_OPTS},t:" + + # Short option flags + LONG_OPTS="" + LONG_OPTS="${LONG_OPTS},help" + LONG_OPTS="${LONG_OPTS},count:" + LONG_OPTS="${LONG_OPTS},fs-type:" + LONG_OPTS="${LONG_OPTS},scratch-dev:" + LONG_OPTS="${LONG_OPTS},test-dev:" + + TEMP=$(getopt --name "${PROGNAME}" \ + --options "${SHORT_OPTS}" \ + --longoptions "${LONG_OPTS}" \ + -- "$@") + eval set -- "$TEMP" + + while [ "$1" != "--" ]; do + case "$1" in + -h|--help) + usage + ;; + -c|--count) + count_valid "$2" || + usage "invalid count '$2'" + COUNT="$2" + shift + ;; + -f|--fs-type) + fs_type_valid "$2" || + usage "invalid fs_type '$2'" + FS_TYPE="$2" + shift + ;; + -s|--scratch-dev) + device_valid "$2" || + usage "invalid scratch-dev '$2'" + SCRATCH_DEV="$2" + shift + ;; + -t|--test-dev) + device_valid "$2" || + usage "invalid test-dev '$2'" + TEST_DEV="$2" + shift + ;; + *) + exit 100 # Internal error + ;; + esac + shift + done + shift + + [ -n "${TEST_DEV}" ] || usage "test-dev must be supplied" + [ -n "${SCRATCH_DEV}" ] || usage "scratch-dev must be supplied" + + [ $# -eq 0 ] || TESTS="$@" +} + +################################################################ + +[ -z "$TESTDIR" ] && export TESTDIR="/tmp/cephtest" + +# Set up some environment for normal teuthology test setup. +# This really should not be necessary but I found it was. +export CEPH_ARGS="--conf ${TESTDIR}/ceph.conf" +export CEPH_ARGS="${CEPH_ARGS} --keyring ${TESTDIR}/data/client.0.keyring" +export CEPH_ARGS="${CEPH_ARGS} --name client.0" + +export LD_LIBRARY_PATH="${TESTDIR}/binary/usr/local/lib:${LD_LIBRARY_PATH}" +export PATH="${TESTDIR}/binary/usr/local/bin:${PATH}" +export PATH="${TESTDIR}/binary/usr/local/sbin:${PATH}" + +################################################################ + +# Filesystem-specific mkfs options--set if not supplied +export XFS_MKFS_OPTIONS="${XFS_MKFS_OPTIONS:--f -l su=65536}" +export EXT4_MKFS_OPTIONS="${EXT4_MKFS_OPTIONS:--F}" +export BTRFS_MKFS_OPTION # No defaults + +XFSTESTS_DIR="/var/lib/xfstests" # Where the tests live + +# download, build, and install xfstests +function install_xfstests() { + arg_count 0 $# + + local multiple="" + local ncpu + + pushd "${TESTDIR}" + + git clone "${XFSTESTS_REPO}" + + cd xfstests + + # FIXME: use an older version before the tests were rearranged! + git reset --hard e5f1a13792f20cfac097fef98007610b422f2cac + + ncpu=$(getconf _NPROCESSORS_ONLN 2>&1) + [ -n "${ncpu}" -a "${ncpu}" -gt 1 ] && multiple="-j ${ncpu}" + + make realclean + make ${multiple} + make -k install + + popd +} + +# remove previously-installed xfstests files +function remove_xfstests() { + arg_count 0 $# + + rm -rf "${TESTDIR}/xfstests" + rm -rf "${XFSTESTS_DIR}" +} + +# create a host options file that uses the specified devices +function setup_host_options() { + arg_count 0 $# + + # Create mount points for the test and scratch filesystems + local test_dir="$(mktemp -d ${TESTDIR}/test_dir.XXXXXXXXXX)" + local scratch_dir="$(mktemp -d ${TESTDIR}/scratch_mnt.XXXXXXXXXX)" + + # Write a host options file that uses these devices. + # xfstests uses the file defined by HOST_OPTIONS as the + # place to get configuration variables for its run, and + # all (or most) of the variables set here are required. + export HOST_OPTIONS="$(mktemp ${TESTDIR}/host_options.XXXXXXXXXX)" + cat > "${HOST_OPTIONS}" <<-! + # Created by ${PROGNAME} on $(date) + # HOST_OPTIONS="${HOST_OPTIONS}" + TEST_DEV="${TEST_DEV}" + SCRATCH_DEV="${SCRATCH_DEV}" + TEST_DIR="${test_dir}" + SCRATCH_MNT="${scratch_dir}" + FSTYP="${FS_TYPE}" + export TEST_DEV SCRATCH_DEV TEST_DIR SCRATCH_MNT FSTYP + # + export XFS_MKFS_OPTIONS="${XFS_MKFS_OPTIONS}" + ! + + # Now ensure we are using the same values + . "${HOST_OPTIONS}" +} + +# remove the host options file, plus the directories it refers to +function cleanup_host_options() { + arg_count 0 $# + + rm -rf "${TEST_DIR}" "${SCRATCH_MNT}" + rm -f "${HOST_OPTIONS}" +} + +# run mkfs on the given device using the specified filesystem type +function do_mkfs() { + arg_count 1 $# + + local dev="${1}" + local options + + case "${FSTYP}" in + xfs) options="${XFS_MKFS_OPTIONS}" ;; + ext4) options="${EXT4_MKFS_OPTIONS}" ;; + btrfs) options="${BTRFS_MKFS_OPTIONS}" ;; + esac + + "mkfs.${FSTYP}" ${options} "${dev}" || + err "unable to make ${FSTYP} file system on device \"${dev}\"" +} + +# mount the given device on the given mount point +function do_mount() { + arg_count 2 $# + + local dev="${1}" + local dir="${2}" + + mount "${dev}" "${dir}" || + err "unable to mount file system \"${dev}\" on \"${dir}\"" +} + +# unmount a previously-mounted device +function do_umount() { + arg_count 1 $# + + local dev="${1}" + + if mount | grep "${dev}" > /dev/null; then + if ! umount "${dev}"; then + err "unable to unmount device \"${dev}\"" + fi + else + # Report it but don't error out + echo "device \"${dev}\" was not mounted" >&2 + fi +} + +# do basic xfstests setup--make and mount the test and scratch filesystems +function setup_xfstests() { + arg_count 0 $# + + # TEST_DEV can persist across test runs, but for now we + # don't bother. I believe xfstests prefers its devices to + # have been already been formatted for the desired + # filesystem type--it uses blkid to identify things or + # something. So we mkfs both here for a fresh start. + do_mkfs "${TEST_DEV}" + do_mkfs "${SCRATCH_DEV}" + + # I believe the test device is expected to be mounted; the + # scratch doesn't need to be (but it doesn't hurt). + do_mount "${TEST_DEV}" "${TEST_DIR}" + do_mount "${SCRATCH_DEV}" "${SCRATCH_MNT}" +} + +# clean up changes made by setup_xfstests +function cleanup_xfstests() { + arg_count 0 $# + + # Unmount these in case a test left them mounted (plus + # the corresponding setup function mounted them...) + do_umount "${TEST_DEV}" + do_umount "${SCRATCH_DEV}" +} + +# top-level setup routine +function setup() { + arg_count 0 $# + + setup_host_options + install_xfstests + setup_xfstests +} + +# top-level (final) cleanup routine +function cleanup() { + arg_count 0 $# + + cd / + cleanup_xfstests + remove_xfstests + cleanup_host_options +} +trap cleanup EXIT ERR HUP INT QUIT + +# ################################################################ + +start_date="$(date)" + +parseargs "$@" + +setup + +pushd "${XFSTESTS_DIR}" +for (( i = 1 ; i <= "${COUNT}" ; i++ )); do + [ "${COUNT}" -gt 1 ] && echo "=== Iteration "$i" starting at: $(date)" + + ./check ${TESTS} # Here we actually run the tests + status=$? + + [ "${COUNT}" -gt 1 ] && echo "=== Iteration "$i" complete at: $(date)" +done +popd + +# cleanup is called via the trap call, above + +echo "This xfstests run started at: ${start_date}" +echo "xfstests run completed at: $(date)" +[ "${COUNT}" -gt 1 ] && echo "xfstests run consisted of ${COUNT} iterations" + +exit "${status}" diff --git a/qa/run_xfstests.sh b/qa/run_xfstests.sh new file mode 100755 index 00000000..892bdfeb --- /dev/null +++ b/qa/run_xfstests.sh @@ -0,0 +1,323 @@ +#!/usr/bin/env bash + +# Copyright (C) 2012 Dreamhost, LLC +# +# This is free software; see the source for copying conditions. +# There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR +# A PARTICULAR PURPOSE. +# +# This is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as +# published by the Free Software Foundation version 2. + +# Usage: +# run_xfstests -t /dev/ -s /dev/ [-f ] -- +# - test device and scratch device will both get trashed +# - fstypes can be xfs, ext4, or btrfs (xfs default) +# - tests can be listed individually: generic/001 xfs/008 xfs/009 +# tests can also be specified by group: -g quick +# +# Exit status: +# 0: success +# 1: usage error +# 2: other runtime error +# 99: argument count error (programming error) +# 100: getopt error (internal error) + +# Alex Elder +# April 13, 2012 + +set -e + +PROGNAME=$(basename $0) + +# Default command line option values +COUNT="1" +EXPUNGE_FILE="" +DO_RANDOMIZE="" # false +FSTYP="xfs" +SCRATCH_DEV="" # MUST BE SPECIFIED +TEST_DEV="" # MUST BE SPECIFIED +TESTS="-g auto" # The "auto" group is supposed to be "known good" + +# print an error message and quit with non-zero status +function err() { + if [ $# -gt 0 ]; then + echo "" >&2 + echo "${PROGNAME}: ${FUNCNAME[1]}: $@" >&2 + fi + exit 2 +} + +# routine used to validate argument counts to all shell functions +function arg_count() { + local func + local want + local got + + if [ $# -eq 2 ]; then + func="${FUNCNAME[1]}" # calling function + want=$1 + got=$2 + else + func="${FUNCNAME[0]}" # i.e., arg_count + want=2 + got=$# + fi + [ "${want}" -eq "${got}" ] && return 0 + echo "${PROGNAME}: ${func}: arg count bad (want ${want} got ${got})" >&2 + exit 99 +} + +# validation function for repeat count argument +function count_valid() { + arg_count 1 $# + + test "$1" -gt 0 # 0 is pointless; negative is wrong +} + +# validation function for filesystem type argument +function fs_type_valid() { + arg_count 1 $# + + case "$1" in + xfs|ext4|btrfs) return 0 ;; + *) return 1 ;; + esac +} + +# validation function for device arguments +function device_valid() { + arg_count 1 $# + + # Very simple testing--really should try to be more careful... + test -b "$1" +} + +# validation function for expunge file argument +function expunge_file_valid() { + arg_count 1 $# + + test -s "$1" +} + +# print a usage message and quit +# +# if a message is supplied, print that first, and then exit +# with non-zero status +function usage() { + if [ $# -gt 0 ]; then + echo "" >&2 + echo "$@" >&2 + fi + + echo "" >&2 + echo "Usage: ${PROGNAME} -- " >&2 + echo "" >&2 + echo " options:" >&2 + echo " -h or --help" >&2 + echo " show this message" >&2 + echo " -c or --count" >&2 + echo " iteration count (1 or more)" >&2 + echo " -f or --fs-type" >&2 + echo " one of: xfs, ext4, btrfs" >&2 + echo " (default fs-type: xfs)" >&2 + echo " -r or --randomize" >&2 + echo " randomize test order" >&2 + echo " -s or --scratch-dev (REQUIRED)" >&2 + echo " name of device used for scratch filesystem" >&2 + echo " -t or --test-dev (REQUIRED)" >&2 + echo " name of device used for test filesystem" >&2 + echo " -x or --expunge-file" >&2 + echo " name of file with list of tests to skip" >&2 + echo " tests:" >&2 + echo " list of test numbers, e.g.:" >&2 + echo " generic/001 xfs/008 shared/032 btrfs/009" >&2 + echo " or possibly an xfstests test group, e.g.:" >&2 + echo " -g quick" >&2 + echo " (default tests: -g auto)" >&2 + echo "" >&2 + + [ $# -gt 0 ] && exit 1 + + exit 0 # This is used for a --help +} + +# parse command line arguments +function parseargs() { + # Short option flags + SHORT_OPTS="" + SHORT_OPTS="${SHORT_OPTS},h" + SHORT_OPTS="${SHORT_OPTS},c:" + SHORT_OPTS="${SHORT_OPTS},f:" + SHORT_OPTS="${SHORT_OPTS},r" + SHORT_OPTS="${SHORT_OPTS},s:" + SHORT_OPTS="${SHORT_OPTS},t:" + SHORT_OPTS="${SHORT_OPTS},x:" + + # Long option flags + LONG_OPTS="" + LONG_OPTS="${LONG_OPTS},help" + LONG_OPTS="${LONG_OPTS},count:" + LONG_OPTS="${LONG_OPTS},fs-type:" + LONG_OPTS="${LONG_OPTS},randomize" + LONG_OPTS="${LONG_OPTS},scratch-dev:" + LONG_OPTS="${LONG_OPTS},test-dev:" + LONG_OPTS="${LONG_OPTS},expunge-file:" + + TEMP=$(getopt --name "${PROGNAME}" \ + --options "${SHORT_OPTS}" \ + --longoptions "${LONG_OPTS}" \ + -- "$@") + eval set -- "$TEMP" + + while [ "$1" != "--" ]; do + case "$1" in + -h|--help) + usage + ;; + -c|--count) + count_valid "$2" || + usage "invalid count '$2'" + COUNT="$2" + shift + ;; + -f|--fs-type) + fs_type_valid "$2" || + usage "invalid fs_type '$2'" + FSTYP="$2" + shift + ;; + -r|--randomize) + DO_RANDOMIZE="t" + ;; + -s|--scratch-dev) + device_valid "$2" || + usage "invalid scratch-dev '$2'" + SCRATCH_DEV="$2" + shift + ;; + -t|--test-dev) + device_valid "$2" || + usage "invalid test-dev '$2'" + TEST_DEV="$2" + shift + ;; + -x|--expunge-file) + expunge_file_valid "$2" || + usage "invalid expunge-file '$2'" + EXPUNGE_FILE="$2" + shift + ;; + *) + exit 100 # Internal error + ;; + esac + shift + done + shift + + [ -n "${TEST_DEV}" ] || usage "test-dev must be supplied" + [ -n "${SCRATCH_DEV}" ] || usage "scratch-dev must be supplied" + + [ $# -eq 0 ] || TESTS="$@" +} + +################################################################ + +# run mkfs on the given device using the specified filesystem type +function do_mkfs() { + arg_count 1 $# + + local dev="${1}" + local options + + case "${FSTYP}" in + xfs) options="-f" ;; + ext4) options="-F" ;; + btrfs) options="-f" ;; + esac + + "mkfs.${FSTYP}" ${options} "${dev}" || + err "unable to make ${FSTYP} file system on device \"${dev}\"" +} + +# top-level setup routine +function setup() { + arg_count 0 $# + + wget -P "${TESTDIR}" http://download.ceph.com/qa/xfstests.tar.gz + tar zxf "${TESTDIR}/xfstests.tar.gz" -C "$(dirname "${XFSTESTS_DIR}")" + mkdir "${TEST_DIR}" + mkdir "${SCRATCH_MNT}" + do_mkfs "${TEST_DEV}" +} + +# top-level (final) cleanup routine +function cleanup() { + arg_count 0 $# + + # ensure teuthology can clean up the logs + chmod -R a+rw "${TESTDIR}/archive" + + findmnt "${TEST_DEV}" && umount "${TEST_DEV}" + [ -d "${SCRATCH_MNT}" ] && rmdir "${SCRATCH_MNT}" + [ -d "${TEST_DIR}" ] && rmdir "${TEST_DIR}" + rm -rf "${XFSTESTS_DIR}" + rm -f "${TESTDIR}/xfstests.tar.gz" +} + +# ################################################################ + +start_date="$(date)" +parseargs "$@" +[ -n "${TESTDIR}" ] || usage "TESTDIR env variable must be set" +[ -d "${TESTDIR}/archive" ] || usage "\$TESTDIR/archive directory must exist" +TESTDIR="$(readlink -e "${TESTDIR}")" +[ -n "${EXPUNGE_FILE}" ] && EXPUNGE_FILE="$(readlink -e "${EXPUNGE_FILE}")" + +XFSTESTS_DIR="/var/lib/xfstests" # hardcoded into dbench binary +TEST_DIR="/mnt/test_dir" +SCRATCH_MNT="/mnt/scratch_mnt" +MKFS_OPTIONS="" +EXT_MOUNT_OPTIONS="-o block_validity" + +trap cleanup EXIT ERR HUP INT QUIT +setup + +export TEST_DEV +export TEST_DIR +export SCRATCH_DEV +export SCRATCH_MNT +export FSTYP +export MKFS_OPTIONS +export EXT_MOUNT_OPTIONS + +pushd "${XFSTESTS_DIR}" +for (( i = 1 ; i <= "${COUNT}" ; i++ )); do + [ "${COUNT}" -gt 1 ] && echo "=== Iteration "$i" starting at: $(date)" + + RESULT_BASE="${TESTDIR}/archive/results-${i}" + mkdir "${RESULT_BASE}" + export RESULT_BASE + + EXPUNGE="" + [ -n "${EXPUNGE_FILE}" ] && EXPUNGE="-E ${EXPUNGE_FILE}" + + RANDOMIZE="" + [ -n "${DO_RANDOMIZE}" ] && RANDOMIZE="-r" + + # -T output timestamps + PATH="${PWD}/bin:${PATH}" ./check -T ${RANDOMIZE} ${EXPUNGE} ${TESTS} + findmnt "${TEST_DEV}" && umount "${TEST_DEV}" + + [ "${COUNT}" -gt 1 ] && echo "=== Iteration "$i" complete at: $(date)" +done +popd + +# cleanup is called via the trap call, above + +echo "This xfstests run started at: ${start_date}" +echo "xfstests run completed at: $(date)" +[ "${COUNT}" -gt 1 ] && echo "xfstests run consisted of ${COUNT} iterations" +echo OK diff --git a/qa/run_xfstests_qemu.sh b/qa/run_xfstests_qemu.sh new file mode 100644 index 00000000..40300cea --- /dev/null +++ b/qa/run_xfstests_qemu.sh @@ -0,0 +1,29 @@ +#!/usr/bin/env bash +# +# TODO switch to run_xfstests.sh (see run_xfstests_krbd.sh) + +set -x + +[ -n "${TESTDIR}" ] || export TESTDIR="/tmp/cephtest" +[ -d "${TESTDIR}" ] || mkdir "${TESTDIR}" + +URL_BASE="https://git.ceph.com/?p=ceph.git;a=blob_plain;f=qa" +SCRIPT="run_xfstests-obsolete.sh" + +cd "${TESTDIR}" + +wget -O "${SCRIPT}" "${URL_BASE}/${SCRIPT}" +chmod +x "${SCRIPT}" + +# tests excluded fail in the current testing vm regardless of whether +# rbd is used + +./"${SCRIPT}" -c 1 -f xfs -t /dev/vdb -s /dev/vdc \ + 1-7 9-17 19-26 28-49 51-61 63 66-67 69-79 83 85-105 108-110 112-135 \ + 137-170 174-191 193-204 206-217 220-227 230-231 233 235-241 243-249 \ + 251-262 264-278 281-286 288-289 +STATUS=$? + +rm -f "${SCRIPT}" + +exit "${STATUS}" diff --git a/qa/runallonce.sh b/qa/runallonce.sh new file mode 100755 index 00000000..bd809fef --- /dev/null +++ b/qa/runallonce.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash + +set -ex + +basedir=`echo $0 | sed 's/[^/]*$//g'`. +testdir="$1" +[ -n "$2" ] && logdir=$2 || logdir=$1 + +[ ${basedir:0:1} == "." ] && basedir=`pwd`/${basedir:1} + +PATH="$basedir/src:$PATH" + +[ -z "$testdir" ] || [ ! -d "$testdir" ] && echo "specify test dir" && exit 1 +cd $testdir + +for test in `cd $basedir/workunits && find . -executable -type f | $basedir/../src/script/permute` +do + echo "------ running test $test ------" + pwd + [ -d $test ] && rm -r $test + mkdir -p $test + mkdir -p `dirname $logdir/$test.log` + test -e $logdir/$test.log && rm $logdir/$test.log + sh -c "cd $test && $basedir/workunits/$test" 2>&1 | tee $logdir/$test.log +done diff --git a/qa/runoncfuse.sh b/qa/runoncfuse.sh new file mode 100755 index 00000000..7be54535 --- /dev/null +++ b/qa/runoncfuse.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash +set -x + +mkdir -p testspace +ceph-fuse testspace -m $1 + +./runallonce.sh testspace +killall ceph-fuse diff --git a/qa/runonkclient.sh b/qa/runonkclient.sh new file mode 100755 index 00000000..f7e8605f --- /dev/null +++ b/qa/runonkclient.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +set -x + +mkdir -p testspace +/bin/mount -t ceph $1 testspace + +./runallonce.sh testspace + +/bin/umount testspace diff --git a/qa/setup-chroot.sh b/qa/setup-chroot.sh new file mode 100755 index 00000000..a6e12f35 --- /dev/null +++ b/qa/setup-chroot.sh @@ -0,0 +1,65 @@ +#!/usr/bin/env bash + +die() { + echo ${@} + exit 1 +} + +usage() +{ + cat << EOF +$0: sets up a chroot environment for building the ceph server +usage: +-h Show this message + +-r [install_dir] location of the root filesystem to install to + example: -r /images/sepia/ + +-s [src_dir] location of the directory with the source code + example: -s ./src/ceph +EOF +} + +cleanup() { + umount -l "${INSTALL_DIR}/mnt/tmp" + umount -l "${INSTALL_DIR}/proc" + umount -l "${INSTALL_DIR}/sys" +} + +INSTALL_DIR= +SRC_DIR= +while getopts “hr:s:” OPTION; do + case $OPTION in + h) usage; exit 1 ;; + r) INSTALL_DIR=$OPTARG ;; + s) SRC_DIR=$OPTARG ;; + ?) usage; exit + ;; + esac +done + +[ $EUID -eq 0 ] || die "This script uses chroot, which requires root permissions." + +[ -d "${INSTALL_DIR}" ] || die "No such directory as '${INSTALL_DIR}'. \ +You must specify an install directory with -r" + +[ -d "${SRC_DIR}" ] || die "no such directory as '${SRC_DIR}'. \ +You must specify a source directory with -s" + +readlink -f ${SRC_DIR} || die "readlink failed on ${SRC_DIR}" +ABS_SRC_DIR=`readlink -f ${SRC_DIR}` + +trap cleanup INT TERM EXIT + +mount --bind "${ABS_SRC_DIR}" "${INSTALL_DIR}/mnt/tmp" || die "bind mount failed" +mount -t proc none "${INSTALL_DIR}/proc" || die "mounting proc failed" +mount -t sysfs none "${INSTALL_DIR}/sys" || die "mounting sys failed" + +echo "$0: starting chroot." +echo "cd /mnt/tmp before building" +echo +chroot ${INSTALL_DIR} env HOME=/mnt/tmp /bin/bash + +echo "$0: exiting chroot." + +exit 0 diff --git a/qa/standalone/README b/qa/standalone/README new file mode 100644 index 00000000..3082442c --- /dev/null +++ b/qa/standalone/README @@ -0,0 +1,23 @@ +qa/standalone +============= + +These scripts run standalone clusters, but not in a normal way. They make +use of functions ceph-helpers.sh to quickly start/stop daemons against +toy clusters in a single directory. + +They are normally run via teuthology based on qa/suites/rados/standalone/*.yaml. + +You can run them in a git checkout + build directory as well: + + * The qa/run-standalone.sh will run all of them in sequence. This is slow + since there is no parallelism. + + * You can run individual script(s) by specifying the basename or path below + qa/standalone as arguments to qa/run-standalone.sh. + +../qa/run-standalone.sh misc.sh osd/osd-dup.sh + + * Add support for specifying arguments to selected tests by simply adding + list of tests to each argument. + +../qa/run-standalone.sh "test-ceph-helpers.sh test_get_last_scrub_stamp" diff --git a/qa/standalone/ceph-helpers.sh b/qa/standalone/ceph-helpers.sh new file mode 100755 index 00000000..bcdd108f --- /dev/null +++ b/qa/standalone/ceph-helpers.sh @@ -0,0 +1,2285 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2013,2014 Cloudwatt +# Copyright (C) 2014,2015 Red Hat +# Copyright (C) 2014 Federico Gimenez +# +# Author: Loic Dachary +# Author: Federico Gimenez +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Library Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library Public License for more details. +# +TIMEOUT=300 +WAIT_FOR_CLEAN_TIMEOUT=90 +MAX_TIMEOUT=15 +PG_NUM=4 +TMPDIR=${TMPDIR:-/tmp} +CEPH_BUILD_VIRTUALENV=${TMPDIR} +TESTDIR=${TESTDIR:-${TMPDIR}} + +if type xmlstarlet > /dev/null 2>&1; then + XMLSTARLET=xmlstarlet +elif type xml > /dev/null 2>&1; then + XMLSTARLET=xml +else + echo "Missing xmlstarlet binary!" + exit 1 +fi + +if [ `uname` = FreeBSD ]; then + SED=gsed + AWK=gawk + DIFFCOLOPTS="" + KERNCORE="kern.corefile" +else + SED=sed + AWK=awk + termwidth=$(stty -a | head -1 | sed -e 's/.*columns \([0-9]*\).*/\1/') + if [ -n "$termwidth" -a "$termwidth" != "0" ]; then + termwidth="-W ${termwidth}" + fi + DIFFCOLOPTS="-y $termwidth" + KERNCORE="kernel.core_pattern" +fi + +EXTRA_OPTS="" + +#! @file ceph-helpers.sh +# @brief Toolbox to manage Ceph cluster dedicated to testing +# +# Example use case: +# +# ~~~~~~~~~~~~~~~~{.sh} +# source ceph-helpers.sh +# +# function mytest() { +# # cleanup leftovers and reset mydir +# setup mydir +# # create a cluster with one monitor and three osds +# run_mon mydir a +# run_osd mydir 0 +# run_osd mydir 2 +# run_osd mydir 3 +# # put and get an object +# rados --pool rbd put GROUP /etc/group +# rados --pool rbd get GROUP /tmp/GROUP +# # stop the cluster and cleanup the directory +# teardown mydir +# } +# ~~~~~~~~~~~~~~~~ +# +# The focus is on simplicity and efficiency, in the context of +# functional tests. The output is intentionally very verbose +# and functions return as soon as an error is found. The caller +# is also expected to abort on the first error so that debugging +# can be done by looking at the end of the output. +# +# Each function is documented, implemented and tested independently. +# When modifying a helper, the test and the documentation are +# expected to be updated and it is easier of they are collocated. A +# test for a given function can be run with +# +# ~~~~~~~~~~~~~~~~{.sh} +# ceph-helpers.sh TESTS test_get_osds +# ~~~~~~~~~~~~~~~~ +# +# and all the tests (i.e. all functions matching test_*) are run +# with: +# +# ~~~~~~~~~~~~~~~~{.sh} +# ceph-helpers.sh TESTS +# ~~~~~~~~~~~~~~~~ +# +# A test function takes a single argument : the directory dedicated +# to the tests. It is expected to not create any file outside of this +# directory and remove it entirely when it completes successfully. +# + + +function get_asok_dir() { + if [ -n "$CEPH_ASOK_DIR" ]; then + echo "$CEPH_ASOK_DIR" + else + echo ${TMPDIR:-/tmp}/ceph-asok.$$ + fi +} + +function get_asok_path() { + local name=$1 + if [ -n "$name" ]; then + echo $(get_asok_dir)/ceph-$name.asok + else + echo $(get_asok_dir)/\$cluster-\$name.asok + fi +} +## +# Cleanup any leftovers found in **dir** via **teardown** +# and reset **dir** as an empty environment. +# +# @param dir path name of the environment +# @return 0 on success, 1 on error +# +function setup() { + local dir=$1 + teardown $dir || return 1 + mkdir -p $dir + mkdir -p $(get_asok_dir) + if [ $(ulimit -n) -le 1024 ]; then + ulimit -n 4096 || return 1 + fi + if [ -z "$LOCALRUN" ]; then + trap "teardown $dir 1" TERM HUP INT + fi +} + +function test_setup() { + local dir=$dir + setup $dir || return 1 + test -d $dir || return 1 + setup $dir || return 1 + test -d $dir || return 1 + teardown $dir +} + +####################################################################### + +## +# Kill all daemons for which a .pid file exists in **dir** and remove +# **dir**. If the file system in which **dir** is btrfs, delete all +# subvolumes that relate to it. +# +# @param dir path name of the environment +# @param dumplogs pass "1" to dump logs otherwise it will only if cores found +# @return 0 on success, 1 on error +# +function teardown() { + local dir=$1 + local dumplogs=$2 + kill_daemons $dir KILL + if [ `uname` != FreeBSD ] \ + && [ $(stat -f -c '%T' .) == "btrfs" ]; then + __teardown_btrfs $dir + fi + local cores="no" + local pattern="$(sysctl -n $KERNCORE)" + # See if we have apport core handling + if [ "${pattern:0:1}" = "|" ]; then + # TODO: Where can we get the dumps? + # Not sure where the dumps really are so this will look in the CWD + pattern="" + fi + # Local we start with core and teuthology ends with core + if ls $(dirname "$pattern") | grep -q '^core\|core$' ; then + cores="yes" + if [ -n "$LOCALRUN" ]; then + mkdir /tmp/cores.$$ 2> /dev/null || true + for i in $(ls $(dirname $(sysctl -n $KERNCORE)) | grep '^core\|core$'); do + mv $i /tmp/cores.$$ + done + fi + fi + if [ "$cores" = "yes" -o "$dumplogs" = "1" ]; then + if [ -n "$LOCALRUN" ]; then + display_logs $dir + else + # Move logs to where Teuthology will archive it + mkdir -p $TESTDIR/archive/log + mv $dir/*.log $TESTDIR/archive/log + fi + fi + rm -fr $dir + rm -rf $(get_asok_dir) + if [ "$cores" = "yes" ]; then + echo "ERROR: Failure due to cores found" + if [ -n "$LOCALRUN" ]; then + echo "Find saved core files in /tmp/cores.$$" + fi + return 1 + fi + return 0 +} + +function __teardown_btrfs() { + local btrfs_base_dir=$1 + local btrfs_root=$(df -P . | tail -1 | $AWK '{print $NF}') + local btrfs_dirs=$(cd $btrfs_base_dir; sudo btrfs subvolume list -t . | $AWK '/^[0-9]/ {print $4}' | grep "$btrfs_base_dir/$btrfs_dir") + for subvolume in $btrfs_dirs; do + sudo btrfs subvolume delete $btrfs_root/$subvolume + done +} + +function test_teardown() { + local dir=$dir + setup $dir || return 1 + teardown $dir || return 1 + ! test -d $dir || return 1 +} + +####################################################################### + +## +# Sends a signal to a single daemon. +# This is a helper function for kill_daemons +# +# After the daemon is sent **signal**, its actual termination +# will be verified by sending it signal 0. If the daemon is +# still alive, kill_daemon will pause for a few seconds and +# try again. This will repeat for a fixed number of times +# before kill_daemon returns on failure. The list of +# sleep intervals can be specified as **delays** and defaults +# to: +# +# 0.1 0.2 1 1 1 2 3 5 5 5 10 10 20 60 60 60 120 +# +# This sequence is designed to run first a very short sleep time (0.1) +# if the machine is fast enough and the daemon terminates in a fraction of a +# second. The increasing sleep numbers should give plenty of time for +# the daemon to die even on the slowest running machine. If a daemon +# takes more than a few minutes to stop (the sum of all sleep times), +# there probably is no point in waiting more and a number of things +# are likely to go wrong anyway: better give up and return on error. +# +# @param pid the process id to send a signal +# @param send_signal the signal to send +# @param delays sequence of sleep times before failure +# +function kill_daemon() { + local pid=$(cat $1) + local send_signal=$2 + local delays=${3:-0.1 0.2 1 1 1 2 3 5 5 5 10 10 20 60 60 60 120} + local exit_code=1 + # In order to try after the last large sleep add 0 at the end so we check + # one last time before dropping out of the loop + for try in $delays 0 ; do + if kill -$send_signal $pid 2> /dev/null ; then + exit_code=1 + else + exit_code=0 + break + fi + send_signal=0 + sleep $try + done; + return $exit_code +} + +function test_kill_daemon() { + local dir=$1 + setup $dir || return 1 + run_mon $dir a --osd_pool_default_size=1 || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + + name_prefix=osd + for pidfile in $(find $dir 2>/dev/null | grep $name_prefix'[^/]*\.pid') ; do + # + # sending signal 0 won't kill the daemon + # waiting just for one second instead of the default schedule + # allows us to quickly verify what happens when kill fails + # to stop the daemon (i.e. it must return false) + # + ! kill_daemon $pidfile 0 1 || return 1 + # + # killing just the osd and verify the mon still is responsive + # + kill_daemon $pidfile TERM || return 1 + done + + name_prefix=mgr + for pidfile in $(find $dir 2>/dev/null | grep $name_prefix'[^/]*\.pid') ; do + # + # kill the mgr + # + kill_daemon $pidfile TERM || return 1 + done + + name_prefix=mon + for pidfile in $(find $dir 2>/dev/null | grep $name_prefix'[^/]*\.pid') ; do + # + # kill the mon and verify it cannot be reached + # + kill_daemon $pidfile TERM || return 1 + ! timeout 5 ceph status || return 1 + done + + teardown $dir || return 1 +} + +## +# Kill all daemons for which a .pid file exists in **dir**. Each +# daemon is sent a **signal** and kill_daemons waits for it to exit +# during a few minutes. By default all daemons are killed. If a +# **name_prefix** is provided, only the daemons for which a pid +# file is found matching the prefix are killed. See run_osd and +# run_mon for more information about the name conventions for +# the pid files. +# +# Send TERM to all daemons : kill_daemons $dir +# Send KILL to all daemons : kill_daemons $dir KILL +# Send KILL to all osds : kill_daemons $dir KILL osd +# Send KILL to osd 1 : kill_daemons $dir KILL osd.1 +# +# If a daemon is sent the TERM signal and does not terminate +# within a few minutes, it will still be running even after +# kill_daemons returns. +# +# If all daemons are kill successfully the function returns 0 +# if at least one daemon remains, this is treated as an +# error and the function return 1. +# +# @param dir path name of the environment +# @param signal name of the first signal (defaults to TERM) +# @param name_prefix only kill match daemons (defaults to all) +# @param delays sequence of sleep times before failure +# @return 0 on success, 1 on error +# +function kill_daemons() { + local trace=$(shopt -q -o xtrace && echo true || echo false) + $trace && shopt -u -o xtrace + local dir=$1 + local signal=${2:-TERM} + local name_prefix=$3 # optional, osd, mon, osd.1 + local delays=$4 #optional timing + local status=0 + local pids="" + + for pidfile in $(find $dir 2>/dev/null | grep $name_prefix'[^/]*\.pid') ; do + run_in_background pids kill_daemon $pidfile $signal $delays + done + + wait_background pids + status=$? + + $trace && shopt -s -o xtrace + return $status +} + +function test_kill_daemons() { + local dir=$1 + setup $dir || return 1 + run_mon $dir a --osd_pool_default_size=1 || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + # + # sending signal 0 won't kill the daemon + # waiting just for one second instead of the default schedule + # allows us to quickly verify what happens when kill fails + # to stop the daemon (i.e. it must return false) + # + ! kill_daemons $dir 0 osd 1 || return 1 + # + # killing just the osd and verify the mon still is responsive + # + kill_daemons $dir TERM osd || return 1 + # + # kill the mgr + # + kill_daemons $dir TERM mgr || return 1 + # + # kill the mon and verify it cannot be reached + # + kill_daemons $dir TERM || return 1 + ! timeout 5 ceph status || return 1 + teardown $dir || return 1 +} + +# +# return a random TCP port which is not used yet +# +# please note, there could be racing if we use this function for +# a free port, and then try to bind on this port. +# +function get_unused_port() { + local ip=127.0.0.1 + python3 -c "import socket; s=socket.socket(); s.bind(('$ip', 0)); print(s.getsockname()[1]); s.close()" +} + +####################################################################### + +## +# Run a monitor by the name mon.**id** with data in **dir**/**id**. +# The logs can be found in **dir**/mon.**id**.log and the pid file +# is **dir**/mon.**id**.pid and the admin socket is +# **dir**/**id**/ceph-mon.**id**.asok. +# +# The remaining arguments are passed verbatim to ceph-mon --mkfs +# and the ceph-mon daemon. +# +# Two mandatory arguments must be provided: --fsid and --mon-host +# Instead of adding them to every call to run_mon, they can be +# set in the CEPH_ARGS environment variable to be read implicitly +# by every ceph command. +# +# The CEPH_CONF variable is expected to be set to /dev/null to +# only rely on arguments for configuration. +# +# Examples: +# +# CEPH_ARGS="--fsid=$(uuidgen) " +# CEPH_ARGS+="--mon-host=127.0.0.1:7018 " +# run_mon $dir a # spawn a mon and bind port 7018 +# run_mon $dir a --debug-filestore=20 # spawn with filestore debugging +# +# If mon_initial_members is not set, the default rbd pool is deleted +# and replaced with a replicated pool with less placement groups to +# speed up initialization. If mon_initial_members is set, no attempt +# is made to recreate the rbd pool because it would hang forever, +# waiting for other mons to join. +# +# A **dir**/ceph.conf file is created but not meant to be used by any +# function. It is convenient for debugging a failure with: +# +# ceph --conf **dir**/ceph.conf -s +# +# @param dir path name of the environment +# @param id mon identifier +# @param ... can be any option valid for ceph-mon +# @return 0 on success, 1 on error +# +function run_mon() { + local dir=$1 + shift + local id=$1 + shift + local data=$dir/$id + + ceph-mon \ + --id $id \ + --mkfs \ + --mon-data=$data \ + --run-dir=$dir \ + "$@" || return 1 + + ceph-mon \ + --id $id \ + --osd-failsafe-full-ratio=.99 \ + --mon-osd-full-ratio=.99 \ + --mon-data-avail-crit=1 \ + --mon-data-avail-warn=5 \ + --paxos-propose-interval=0.1 \ + --osd-crush-chooseleaf-type=0 \ + $EXTRA_OPTS \ + --debug-mon 20 \ + --debug-ms 20 \ + --debug-paxos 20 \ + --chdir= \ + --mon-data=$data \ + --log-file=$dir/\$name.log \ + --admin-socket=$(get_asok_path) \ + --mon-cluster-log-file=$dir/log \ + --run-dir=$dir \ + --pid-file=$dir/\$name.pid \ + --mon-allow-pool-delete \ + --mon-osd-backfillfull-ratio .99 \ + --mon-warn-on-insecure-global-id-reclaim-allowed=false \ + "$@" || return 1 + + cat > $dir/ceph.conf < $osd_data/new.json + ceph osd new $uuid -i $osd_data/new.json + rm $osd_data/new.json + ceph-osd -i $id $ceph_args --mkfs --key $OSD_SECRET --osd-uuid $uuid + + local key_fn=$osd_data/keyring + cat > $key_fn< $osd_data/new.json + ceph osd new $uuid -i $osd_data/new.json + rm $osd_data/new.json + ceph-osd -i $id $ceph_args --mkfs --key $OSD_SECRET --osd-uuid $uuid --osd-objectstore=filestore + + local key_fn=$osd_data/keyring + cat > $key_fn</dev/null | \ + jq '.acting | .[]') + # get rid of the trailing space + echo $osds +} + +function test_get_osds() { + local dir=$1 + + setup $dir || return 1 + run_mon $dir a --osd_pool_default_size=2 || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + run_osd $dir 1 || return 1 + create_rbd_pool || return 1 + wait_for_clean || return 1 + create_rbd_pool || return 1 + get_osds rbd GROUP | grep --quiet '^[0-1] [0-1]$' || return 1 + teardown $dir || return 1 +} + +####################################################################### + +## +# Wait for the monitor to form quorum (optionally, of size N) +# +# @param timeout duration (lower-bound) to wait for quorum to be formed +# @param quorumsize size of quorum to wait for +# @return 0 on success, 1 on error +# +function wait_for_quorum() { + local timeout=$1 + local quorumsize=$2 + + if [[ -z "$timeout" ]]; then + timeout=300 + fi + + if [[ -z "$quorumsize" ]]; then + timeout $timeout ceph mon_status --format=json >&/dev/null || return 1 + return 0 + fi + + no_quorum=1 + wait_until=$((`date +%s` + $timeout)) + while [[ $(date +%s) -lt $wait_until ]]; do + jqfilter='.quorum | length == '$quorumsize + jqinput="$(timeout $timeout ceph mon_status --format=json 2>/dev/null)" + res=$(echo $jqinput | jq "$jqfilter") + if [[ "$res" == "true" ]]; then + no_quorum=0 + break + fi + done + return $no_quorum +} + +####################################################################### + +## +# Return the PG of supporting the **objectname** stored in +# **poolname**, as reported by ceph osd map. +# +# @param poolname an existing pool +# @param objectname an objectname (may or may not exist) +# @param STDOUT a PG +# @return 0 on success, 1 on error +# +function get_pg() { + local poolname=$1 + local objectname=$2 + + ceph --format json osd map $poolname $objectname 2>/dev/null | jq -r '.pgid' +} + +function test_get_pg() { + local dir=$1 + + setup $dir || return 1 + run_mon $dir a --osd_pool_default_size=1 || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + create_rbd_pool || return 1 + wait_for_clean || return 1 + get_pg rbd GROUP | grep --quiet '^[0-9]\.[0-9a-f][0-9a-f]*$' || return 1 + teardown $dir || return 1 +} + +####################################################################### + +## +# Return the value of the **config**, obtained via the config get command +# of the admin socket of **daemon**.**id**. +# +# @param daemon mon or osd +# @param id mon or osd ID +# @param config the configuration variable name as found in config_opts.h +# @param STDOUT the config value +# @return 0 on success, 1 on error +# +function get_config() { + local daemon=$1 + local id=$2 + local config=$3 + + CEPH_ARGS='' \ + ceph --format json daemon $(get_asok_path $daemon.$id) \ + config get $config 2> /dev/null | \ + jq -r ".$config" +} + +function test_get_config() { + local dir=$1 + + # override the default config using command line arg and check it + setup $dir || return 1 + run_mon $dir a --osd_pool_default_size=1 || return 1 + test $(get_config mon a osd_pool_default_size) = 1 || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 --osd_max_scrubs=3 || return 1 + test $(get_config osd 0 osd_max_scrubs) = 3 || return 1 + teardown $dir || return 1 +} + +####################################################################### + +## +# Set the **config** to specified **value**, via the config set command +# of the admin socket of **daemon**.**id** +# +# @param daemon mon or osd +# @param id mon or osd ID +# @param config the configuration variable name as found in config_opts.h +# @param value the config value +# @return 0 on success, 1 on error +# +function set_config() { + local daemon=$1 + local id=$2 + local config=$3 + local value=$4 + + test $(env CEPH_ARGS='' ceph --format json daemon $(get_asok_path $daemon.$id) \ + config set $config $value 2> /dev/null | \ + jq 'has("success")') == true +} + +function test_set_config() { + local dir=$1 + + setup $dir || return 1 + run_mon $dir a --osd_pool_default_size=1 || return 1 + test $(get_config mon a ms_crc_header) = true || return 1 + set_config mon a ms_crc_header false || return 1 + test $(get_config mon a ms_crc_header) = false || return 1 + set_config mon a ms_crc_header true || return 1 + test $(get_config mon a ms_crc_header) = true || return 1 + teardown $dir || return 1 +} + +####################################################################### + +## +# Return the OSD id of the primary OSD supporting the **objectname** +# stored in **poolname**, as reported by ceph osd map. +# +# @param poolname an existing pool +# @param objectname an objectname (may or may not exist) +# @param STDOUT the primary OSD id +# @return 0 on success, 1 on error +# +function get_primary() { + local poolname=$1 + local objectname=$2 + + ceph --format json osd map $poolname $objectname 2>/dev/null | \ + jq '.acting_primary' +} + +function test_get_primary() { + local dir=$1 + + setup $dir || return 1 + run_mon $dir a --osd_pool_default_size=1 || return 1 + local osd=0 + run_mgr $dir x || return 1 + run_osd $dir $osd || return 1 + create_rbd_pool || return 1 + wait_for_clean || return 1 + test $(get_primary rbd GROUP) = $osd || return 1 + teardown $dir || return 1 +} + +####################################################################### + +## +# Return the id of any OSD supporting the **objectname** stored in +# **poolname**, as reported by ceph osd map, except the primary. +# +# @param poolname an existing pool +# @param objectname an objectname (may or may not exist) +# @param STDOUT the OSD id +# @return 0 on success, 1 on error +# +function get_not_primary() { + local poolname=$1 + local objectname=$2 + + local primary=$(get_primary $poolname $objectname) + ceph --format json osd map $poolname $objectname 2>/dev/null | \ + jq ".acting | map(select (. != $primary)) | .[0]" +} + +function test_get_not_primary() { + local dir=$1 + + setup $dir || return 1 + run_mon $dir a --osd_pool_default_size=2 || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + run_osd $dir 1 || return 1 + create_rbd_pool || return 1 + wait_for_clean || return 1 + local primary=$(get_primary rbd GROUP) + local not_primary=$(get_not_primary rbd GROUP) + test $not_primary != $primary || return 1 + test $not_primary = 0 -o $not_primary = 1 || return 1 + teardown $dir || return 1 +} + +####################################################################### + +function _objectstore_tool_nodown() { + local dir=$1 + shift + local id=$1 + shift + local osd_data=$dir/$id + + ceph-objectstore-tool \ + --data-path $osd_data \ + "$@" || return 1 +} + +function _objectstore_tool_nowait() { + local dir=$1 + shift + local id=$1 + shift + + kill_daemons $dir TERM osd.$id >&2 < /dev/null || return 1 + + _objectstore_tool_nodown $dir $id "$@" || return 1 + activate_osd $dir $id $ceph_osd_args >&2 || return 1 +} + +## +# Run ceph-objectstore-tool against the OSD **id** using the data path +# **dir**. The OSD is killed with TERM prior to running +# ceph-objectstore-tool because access to the data path is +# exclusive. The OSD is restarted after the command completes. The +# objectstore_tool returns after all PG are active+clean again. +# +# @param dir the data path of the OSD +# @param id the OSD id +# @param ... arguments to ceph-objectstore-tool +# @param STDIN the input of ceph-objectstore-tool +# @param STDOUT the output of ceph-objectstore-tool +# @return 0 on success, 1 on error +# +# The value of $ceph_osd_args will be passed to restarted osds +# +function objectstore_tool() { + local dir=$1 + shift + local id=$1 + shift + + _objectstore_tool_nowait $dir $id "$@" || return 1 + wait_for_clean >&2 +} + +function test_objectstore_tool() { + local dir=$1 + + setup $dir || return 1 + run_mon $dir a --osd_pool_default_size=1 || return 1 + local osd=0 + run_mgr $dir x || return 1 + run_osd $dir $osd || return 1 + create_rbd_pool || return 1 + wait_for_clean || return 1 + rados --pool rbd put GROUP /etc/group || return 1 + objectstore_tool $dir $osd GROUP get-bytes | \ + diff - /etc/group + ! objectstore_tool $dir $osd NOTEXISTS get-bytes || return 1 + teardown $dir || return 1 +} + +####################################################################### + +## +# Predicate checking if there is an ongoing recovery in the +# cluster. If any of the recovering_{keys,bytes,objects}_per_sec +# counters are reported by ceph status, it means recovery is in +# progress. +# +# @return 0 if recovery in progress, 1 otherwise +# +function get_is_making_recovery_progress() { + local recovery_progress + recovery_progress+=".recovering_keys_per_sec + " + recovery_progress+=".recovering_bytes_per_sec + " + recovery_progress+=".recovering_objects_per_sec" + local progress=$(ceph --format json status 2>/dev/null | \ + jq -r ".pgmap | $recovery_progress") + test "$progress" != null +} + +function test_get_is_making_recovery_progress() { + local dir=$1 + + setup $dir || return 1 + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + ! get_is_making_recovery_progress || return 1 + teardown $dir || return 1 +} + +####################################################################### + +## +# Return the number of active PGs in the cluster. A PG is active if +# ceph pg dump pgs reports it both **active** and **clean** and that +# not **stale**. +# +# @param STDOUT the number of active PGs +# @return 0 on success, 1 on error +# +function get_num_active_clean() { + local expression + expression+="select(contains(\"active\") and contains(\"clean\")) | " + expression+="select(contains(\"stale\") | not)" + ceph --format json pg dump pgs 2>/dev/null | \ + jq ".pg_stats | [.[] | .state | $expression] | length" +} + +function test_get_num_active_clean() { + local dir=$1 + + setup $dir || return 1 + run_mon $dir a --osd_pool_default_size=1 || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + create_rbd_pool || return 1 + wait_for_clean || return 1 + local num_active_clean=$(get_num_active_clean) + test "$num_active_clean" = $PG_NUM || return 1 + teardown $dir || return 1 +} + +## +# Return the number of active or peered PGs in the cluster. A PG matches if +# ceph pg dump pgs reports it is either **active** or **peered** and that +# not **stale**. +# +# @param STDOUT the number of active PGs +# @return 0 on success, 1 on error +# +function get_num_active_or_peered() { + local expression + expression+="select(contains(\"active\") or contains(\"peered\")) | " + expression+="select(contains(\"stale\") | not)" + ceph --format json pg dump pgs 2>/dev/null | \ + jq ".pg_stats | [.[] | .state | $expression] | length" +} + +function test_get_num_active_or_peered() { + local dir=$1 + + setup $dir || return 1 + run_mon $dir a --osd_pool_default_size=1 || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + create_rbd_pool || return 1 + wait_for_clean || return 1 + local num_peered=$(get_num_active_or_peered) + test "$num_peered" = $PG_NUM || return 1 + teardown $dir || return 1 +} + +####################################################################### + +## +# Return the number of PGs in the cluster, according to +# ceph pg dump pgs. +# +# @param STDOUT the number of PGs +# @return 0 on success, 1 on error +# +function get_num_pgs() { + ceph --format json status 2>/dev/null | jq '.pgmap.num_pgs' +} + +function test_get_num_pgs() { + local dir=$1 + + setup $dir || return 1 + run_mon $dir a --osd_pool_default_size=1 || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + create_rbd_pool || return 1 + wait_for_clean || return 1 + local num_pgs=$(get_num_pgs) + test "$num_pgs" -gt 0 || return 1 + teardown $dir || return 1 +} + +####################################################################### + +## +# Return the OSD ids in use by at least one PG in the cluster (either +# in the up or the acting set), according to ceph pg dump pgs. Every +# OSD id shows as many times as they are used in up and acting sets. +# If an OSD id is in both the up and acting set of a given PG, it will +# show twice. +# +# @param STDOUT a sorted list of OSD ids +# @return 0 on success, 1 on error +# +function get_osd_id_used_by_pgs() { + ceph --format json pg dump pgs 2>/dev/null | jq '.pg_stats | .[] | .up[], .acting[]' | sort +} + +function test_get_osd_id_used_by_pgs() { + local dir=$1 + + setup $dir || return 1 + run_mon $dir a --osd_pool_default_size=1 || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + create_rbd_pool || return 1 + wait_for_clean || return 1 + local osd_ids=$(get_osd_id_used_by_pgs | uniq) + test "$osd_ids" = "0" || return 1 + teardown $dir || return 1 +} + +####################################################################### + +## +# Wait until the OSD **id** shows **count** times in the +# PGs (see get_osd_id_used_by_pgs for more information about +# how OSD ids are counted). +# +# @param id the OSD id +# @param count the number of time it must show in the PGs +# @return 0 on success, 1 on error +# +function wait_osd_id_used_by_pgs() { + local id=$1 + local count=$2 + + status=1 + for ((i=0; i < $TIMEOUT / 5; i++)); do + echo $i + if ! test $(get_osd_id_used_by_pgs | grep -c $id) = $count ; then + sleep 5 + else + status=0 + break + fi + done + return $status +} + +function test_wait_osd_id_used_by_pgs() { + local dir=$1 + + setup $dir || return 1 + run_mon $dir a --osd_pool_default_size=1 || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + create_rbd_pool || return 1 + wait_for_clean || return 1 + wait_osd_id_used_by_pgs 0 8 || return 1 + ! TIMEOUT=1 wait_osd_id_used_by_pgs 123 5 || return 1 + teardown $dir || return 1 +} + +####################################################################### + +## +# Return the date and time of the last completed scrub for **pgid**, +# as reported by ceph pg dump pgs. Note that a repair also sets this +# date. +# +# @param pgid the id of the PG +# @param STDOUT the date and time of the last scrub +# @return 0 on success, 1 on error +# +function get_last_scrub_stamp() { + local pgid=$1 + local sname=${2:-last_scrub_stamp} + ceph --format json pg dump pgs 2>/dev/null | \ + jq -r ".pg_stats | .[] | select(.pgid==\"$pgid\") | .$sname" +} + +function test_get_last_scrub_stamp() { + local dir=$1 + + setup $dir || return 1 + run_mon $dir a --osd_pool_default_size=1 || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + create_rbd_pool || return 1 + wait_for_clean || return 1 + stamp=$(get_last_scrub_stamp 1.0) + test -n "$stamp" || return 1 + teardown $dir || return 1 +} + +####################################################################### + +## +# Predicate checking if the cluster is clean, i.e. all of its PGs are +# in a clean state (see get_num_active_clean for a definition). +# +# @return 0 if the cluster is clean, 1 otherwise +# +function is_clean() { + num_pgs=$(get_num_pgs) + test $num_pgs != 0 || return 1 + test $(get_num_active_clean) = $num_pgs || return 1 +} + +function test_is_clean() { + local dir=$1 + + setup $dir || return 1 + run_mon $dir a --osd_pool_default_size=1 || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + create_rbd_pool || return 1 + wait_for_clean || return 1 + is_clean || return 1 + teardown $dir || return 1 +} + +####################################################################### + +calc() { $AWK "BEGIN{print $*}"; } + +## +# Return a list of numbers that are increasingly larger and whose +# total is **timeout** seconds. It can be used to have short sleep +# delay while waiting for an event on a fast machine. But if running +# very slowly the larger delays avoid stressing the machine even +# further or spamming the logs. +# +# @param timeout sum of all delays, in seconds +# @return a list of sleep delays +# +function get_timeout_delays() { + local trace=$(shopt -q -o xtrace && echo true || echo false) + $trace && shopt -u -o xtrace + local timeout=$1 + local first_step=${2:-1} + local max_timeout=${3:-$MAX_TIMEOUT} + + local i + local total="0" + i=$first_step + while test "$(calc $total + $i \<= $timeout)" = "1"; do + echo -n "$(calc $i) " + total=$(calc $total + $i) + i=$(calc $i \* 2) + if [ $max_timeout -gt 0 ]; then + # Did we reach max timeout ? + if [ ${i%.*} -eq ${max_timeout%.*} ] && [ ${i#*.} \> ${max_timeout#*.} ] || [ ${i%.*} -gt ${max_timeout%.*} ]; then + # Yes, so let's cap the max wait time to max + i=$max_timeout + fi + fi + done + if test "$(calc $total \< $timeout)" = "1"; then + echo -n "$(calc $timeout - $total) " + fi + $trace && shopt -s -o xtrace +} + +function test_get_timeout_delays() { + test "$(get_timeout_delays 1)" = "1 " || return 1 + test "$(get_timeout_delays 5)" = "1 2 2 " || return 1 + test "$(get_timeout_delays 6)" = "1 2 3 " || return 1 + test "$(get_timeout_delays 7)" = "1 2 4 " || return 1 + test "$(get_timeout_delays 8)" = "1 2 4 1 " || return 1 + test "$(get_timeout_delays 1 .1)" = "0.1 0.2 0.4 0.3 " || return 1 + test "$(get_timeout_delays 1.5 .1)" = "0.1 0.2 0.4 0.8 " || return 1 + test "$(get_timeout_delays 5 .1)" = "0.1 0.2 0.4 0.8 1.6 1.9 " || return 1 + test "$(get_timeout_delays 6 .1)" = "0.1 0.2 0.4 0.8 1.6 2.9 " || return 1 + test "$(get_timeout_delays 6.3 .1)" = "0.1 0.2 0.4 0.8 1.6 3.2 " || return 1 + test "$(get_timeout_delays 20 .1)" = "0.1 0.2 0.4 0.8 1.6 3.2 6.4 7.3 " || return 1 + test "$(get_timeout_delays 300 .1 0)" = "0.1 0.2 0.4 0.8 1.6 3.2 6.4 12.8 25.6 51.2 102.4 95.3 " || return 1 + test "$(get_timeout_delays 300 .1 10)" = "0.1 0.2 0.4 0.8 1.6 3.2 6.4 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10 7.3 " || return 1 +} + +####################################################################### + +## +# Wait until the cluster becomes clean or if it does not make progress +# for $WAIT_FOR_CLEAN_TIMEOUT seconds. +# Progress is measured either via the **get_is_making_recovery_progress** +# predicate or if the number of clean PGs changes (as returned by get_num_active_clean) +# +# @return 0 if the cluster is clean, 1 otherwise +# +function wait_for_clean() { + local cmd=$1 + local num_active_clean=-1 + local cur_active_clean + local -a delays=($(get_timeout_delays $WAIT_FOR_CLEAN_TIMEOUT .1)) + local -i loop=0 + + flush_pg_stats || return 1 + while test $(get_num_pgs) == 0 ; do + sleep 1 + done + + while true ; do + # Comparing get_num_active_clean & get_num_pgs is used to determine + # if the cluster is clean. That's almost an inline of is_clean() to + # get more performance by avoiding multiple calls of get_num_active_clean. + cur_active_clean=$(get_num_active_clean) + test $cur_active_clean = $(get_num_pgs) && break + if test $cur_active_clean != $num_active_clean ; then + loop=0 + num_active_clean=$cur_active_clean + elif get_is_making_recovery_progress ; then + loop=0 + elif (( $loop >= ${#delays[*]} )) ; then + ceph report + return 1 + fi + # eval is a no-op if cmd is empty + eval $cmd + sleep ${delays[$loop]} + loop+=1 + done + return 0 +} + +function test_wait_for_clean() { + local dir=$1 + + setup $dir || return 1 + run_mon $dir a --osd_pool_default_size=2 || return 1 + run_osd $dir 0 || return 1 + run_mgr $dir x || return 1 + create_rbd_pool || return 1 + ! WAIT_FOR_CLEAN_TIMEOUT=1 wait_for_clean || return 1 + run_osd $dir 1 || return 1 + wait_for_clean || return 1 + teardown $dir || return 1 +} + +## +# Wait until the cluster becomes peered or if it does not make progress +# for $WAIT_FOR_CLEAN_TIMEOUT seconds. +# Progress is measured either via the **get_is_making_recovery_progress** +# predicate or if the number of peered PGs changes (as returned by get_num_active_or_peered) +# +# @return 0 if the cluster is clean, 1 otherwise +# +function wait_for_peered() { + local cmd=$1 + local num_peered=-1 + local cur_peered + local -a delays=($(get_timeout_delays $WAIT_FOR_CLEAN_TIMEOUT .1)) + local -i loop=0 + + flush_pg_stats || return 1 + while test $(get_num_pgs) == 0 ; do + sleep 1 + done + + while true ; do + # Comparing get_num_active_clean & get_num_pgs is used to determine + # if the cluster is clean. That's almost an inline of is_clean() to + # get more performance by avoiding multiple calls of get_num_active_clean. + cur_peered=$(get_num_active_or_peered) + test $cur_peered = $(get_num_pgs) && break + if test $cur_peered != $num_peered ; then + loop=0 + num_peered=$cur_peered + elif get_is_making_recovery_progress ; then + loop=0 + elif (( $loop >= ${#delays[*]} )) ; then + ceph report + return 1 + fi + # eval is a no-op if cmd is empty + eval $cmd + sleep ${delays[$loop]} + loop+=1 + done + return 0 +} + +function test_wait_for_peered() { + local dir=$1 + + setup $dir || return 1 + run_mon $dir a --osd_pool_default_size=2 || return 1 + run_osd $dir 0 || return 1 + run_mgr $dir x || return 1 + create_rbd_pool || return 1 + ! WAIT_FOR_CLEAN_TIMEOUT=1 wait_for_clean || return 1 + run_osd $dir 1 || return 1 + wait_for_peered || return 1 + teardown $dir || return 1 +} + + +####################################################################### + +## +# Wait until the cluster has health condition passed as arg +# again for $TIMEOUT seconds. +# +# @param string to grep for in health detail +# @return 0 if the cluster health matches request, 1 otherwise +# +function wait_for_health() { + local grepstr=$1 + local -a delays=($(get_timeout_delays $TIMEOUT .1)) + local -i loop=0 + + while ! ceph health detail | grep "$grepstr" ; do + if (( $loop >= ${#delays[*]} )) ; then + ceph health detail + return 1 + fi + sleep ${delays[$loop]} + loop+=1 + done +} + +## +# Wait until the cluster becomes HEALTH_OK again or if it does not make progress +# for $TIMEOUT seconds. +# +# @return 0 if the cluster is HEALTHY, 1 otherwise +# +function wait_for_health_ok() { + wait_for_health "HEALTH_OK" || return 1 +} + +function test_wait_for_health_ok() { + local dir=$1 + + setup $dir || return 1 + run_mon $dir a --osd_failsafe_full_ratio=.99 --mon_pg_warn_min_per_osd=0 || return 1 + run_mgr $dir x --mon_pg_warn_min_per_osd=0 || return 1 + # start osd_pool_default_size OSDs + run_osd $dir 0 || return 1 + run_osd $dir 1 || return 1 + run_osd $dir 2 || return 1 + kill_daemons $dir TERM osd || return 1 + ceph osd down 0 || return 1 + # expect TOO_FEW_OSDS warning + ! TIMEOUT=1 wait_for_health_ok || return 1 + # resurrect all OSDs + activate_osd $dir 0 || return 1 + activate_osd $dir 1 || return 1 + activate_osd $dir 2 || return 1 + wait_for_health_ok || return 1 + teardown $dir || return 1 +} + + +####################################################################### + +## +# Run repair on **pgid** and wait until it completes. The repair +# function will fail if repair does not complete within $TIMEOUT +# seconds. +# +# @param pgid the id of the PG +# @return 0 on success, 1 on error +# +function repair() { + local pgid=$1 + local last_scrub=$(get_last_scrub_stamp $pgid) + ceph pg repair $pgid + wait_for_scrub $pgid "$last_scrub" +} + +function test_repair() { + local dir=$1 + + setup $dir || return 1 + run_mon $dir a --osd_pool_default_size=1 || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + create_rbd_pool || return 1 + wait_for_clean || return 1 + repair 1.0 || return 1 + kill_daemons $dir KILL osd || return 1 + ! TIMEOUT=1 repair 1.0 || return 1 + teardown $dir || return 1 +} +####################################################################### + +## +# Run scrub on **pgid** and wait until it completes. The pg_scrub +# function will fail if repair does not complete within $TIMEOUT +# seconds. The pg_scrub is complete whenever the +# **get_last_scrub_stamp** function reports a timestamp different from +# the one stored before starting the scrub. +# +# @param pgid the id of the PG +# @return 0 on success, 1 on error +# +function pg_scrub() { + local pgid=$1 + local last_scrub=$(get_last_scrub_stamp $pgid) + ceph pg scrub $pgid + wait_for_scrub $pgid "$last_scrub" +} + +function pg_deep_scrub() { + local pgid=$1 + local last_scrub=$(get_last_scrub_stamp $pgid last_deep_scrub_stamp) + ceph pg deep-scrub $pgid + wait_for_scrub $pgid "$last_scrub" last_deep_scrub_stamp +} + +function test_pg_scrub() { + local dir=$1 + + setup $dir || return 1 + run_mon $dir a --osd_pool_default_size=1 || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + create_rbd_pool || return 1 + wait_for_clean || return 1 + pg_scrub 1.0 || return 1 + kill_daemons $dir KILL osd || return 1 + ! TIMEOUT=1 pg_scrub 1.0 || return 1 + teardown $dir || return 1 +} + +####################################################################### + +## +# Run the *command* and expect it to fail (i.e. return a non zero status). +# The output (stderr and stdout) is stored in a temporary file in *dir* +# and is expected to contain the string *expected*. +# +# Return 0 if the command failed and the string was found. Otherwise +# return 1 and cat the full output of the command on stderr for debug. +# +# @param dir temporary directory to store the output +# @param expected string to look for in the output +# @param command ... the command and its arguments +# @return 0 on success, 1 on error +# + +function expect_failure() { + local dir=$1 + shift + local expected="$1" + shift + local success + + if "$@" > $dir/out 2>&1 ; then + success=true + else + success=false + fi + + if $success || ! grep --quiet "$expected" $dir/out ; then + cat $dir/out >&2 + return 1 + else + return 0 + fi +} + +function test_expect_failure() { + local dir=$1 + + setup $dir || return 1 + expect_failure $dir FAIL bash -c 'echo FAIL ; exit 1' || return 1 + # the command did not fail + ! expect_failure $dir FAIL bash -c 'echo FAIL ; exit 0' > $dir/out || return 1 + grep --quiet FAIL $dir/out || return 1 + # the command failed but the output does not contain the expected string + ! expect_failure $dir FAIL bash -c 'echo UNEXPECTED ; exit 1' > $dir/out || return 1 + ! grep --quiet FAIL $dir/out || return 1 + teardown $dir || return 1 +} + +####################################################################### + +## +# Given the *last_scrub*, wait for scrub to happen on **pgid**. It +# will fail if scrub does not complete within $TIMEOUT seconds. The +# repair is complete whenever the **get_last_scrub_stamp** function +# reports a timestamp different from the one given in argument. +# +# @param pgid the id of the PG +# @param last_scrub timestamp of the last scrub for *pgid* +# @return 0 on success, 1 on error +# +function wait_for_scrub() { + local pgid=$1 + local last_scrub="$2" + local sname=${3:-last_scrub_stamp} + + for ((i=0; i < $TIMEOUT; i++)); do + if test "$(get_last_scrub_stamp $pgid $sname)" '>' "$last_scrub" ; then + return 0 + fi + sleep 1 + done + return 1 +} + +function test_wait_for_scrub() { + local dir=$1 + + setup $dir || return 1 + run_mon $dir a --osd_pool_default_size=1 || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + create_rbd_pool || return 1 + wait_for_clean || return 1 + local pgid=1.0 + ceph pg repair $pgid + local last_scrub=$(get_last_scrub_stamp $pgid) + wait_for_scrub $pgid "$last_scrub" || return 1 + kill_daemons $dir KILL osd || return 1 + last_scrub=$(get_last_scrub_stamp $pgid) + ! TIMEOUT=1 wait_for_scrub $pgid "$last_scrub" || return 1 + teardown $dir || return 1 +} + +####################################################################### + +## +# Return 0 if the erasure code *plugin* is available, 1 otherwise. +# +# @param plugin erasure code plugin +# @return 0 on success, 1 on error +# + +function erasure_code_plugin_exists() { + local plugin=$1 + local status + local grepstr + local s + case `uname` in + FreeBSD) grepstr="Cannot open.*$plugin" ;; + *) grepstr="$plugin.*No such file" ;; + esac + + s=$(ceph osd erasure-code-profile set TESTPROFILE plugin=$plugin 2>&1) + local status=$? + if [ $status -eq 0 ]; then + ceph osd erasure-code-profile rm TESTPROFILE + elif ! echo $s | grep --quiet "$grepstr" ; then + status=1 + # display why the string was rejected. + echo $s + fi + return $status +} + +function test_erasure_code_plugin_exists() { + local dir=$1 + + setup $dir || return 1 + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + erasure_code_plugin_exists jerasure || return 1 + ! erasure_code_plugin_exists FAKE || return 1 + teardown $dir || return 1 +} + +####################################################################### + +## +# Display all log files from **dir** on stdout. +# +# @param dir directory in which all data is stored +# + +function display_logs() { + local dir=$1 + + find $dir -maxdepth 1 -name '*.log' | \ + while read file ; do + echo "======================= $file" + cat $file + done +} + +function test_display_logs() { + local dir=$1 + + setup $dir || return 1 + run_mon $dir a || return 1 + kill_daemons $dir || return 1 + display_logs $dir > $dir/log.out + grep --quiet mon.a.log $dir/log.out || return 1 + teardown $dir || return 1 +} + +####################################################################### +## +# Spawn a command in background and save the pid in the variable name +# passed in argument. To make the output reading easier, the output is +# prepend with the process id. +# +# Example: +# pids1="" +# run_in_background pids1 bash -c 'sleep 1; exit 1' +# +# @param pid_variable the variable name (not value) where the pids will be stored +# @param ... the command to execute +# @return only the pid_variable output should be considered and used with **wait_background** +# +function run_in_background() { + local pid_variable=$1 + shift + # Execute the command and prepend the output with its pid + # We enforce to return the exit status of the command and not the sed one. + ("$@" |& sed 's/^/'$$': /'; return "${PIPESTATUS[0]}") >&2 & + eval "$pid_variable+=\" $!\"" +} + +function save_stdout { + local out="$1" + shift + "$@" > "$out" +} + +function test_run_in_background() { + local pids + run_in_background pids sleep 1 + run_in_background pids sleep 1 + test $(echo $pids | wc -w) = 2 || return 1 + wait $pids || return 1 +} + +####################################################################### +## +# Wait for pids running in background to complete. +# This function is usually used after a **run_in_background** call +# Example: +# pids1="" +# run_in_background pids1 bash -c 'sleep 1; exit 1' +# wait_background pids1 +# +# @param pids The variable name that contains the active PIDS. Set as empty at then end of the function. +# @return returns 1 if at least one process exits in error unless returns 0 +# +function wait_background() { + # We extract the PIDS from the variable name + pids=${!1} + + return_code=0 + for pid in $pids; do + if ! wait $pid; then + # If one process failed then return 1 + return_code=1 + fi + done + + # We empty the variable reporting that all process ended + eval "$1=''" + + return $return_code +} + + +function test_wait_background() { + local pids="" + run_in_background pids bash -c "sleep 1; exit 1" + run_in_background pids bash -c "sleep 2; exit 0" + wait_background pids + if [ $? -ne 1 ]; then return 1; fi + + run_in_background pids bash -c "sleep 1; exit 0" + run_in_background pids bash -c "sleep 2; exit 0" + wait_background pids + if [ $? -ne 0 ]; then return 1; fi + + if [ ! -z "$pids" ]; then return 1; fi +} + +function flush_pg_stats() +{ + local timeout=${1:-$TIMEOUT} + + ids=`ceph osd ls` + seqs='' + for osd in $ids; do + seq=`ceph tell osd.$osd flush_pg_stats` + if test -z "$seq" + then + continue + fi + seqs="$seqs $osd-$seq" + done + + for s in $seqs; do + osd=`echo $s | cut -d - -f 1` + seq=`echo $s | cut -d - -f 2` + echo "waiting osd.$osd seq $seq" + while test $(ceph osd last-stat-seq $osd) -lt $seq; do + sleep 1 + if [ $((timeout--)) -eq 0 ]; then + return 1 + fi + done + done +} + +function test_flush_pg_stats() +{ + local dir=$1 + + setup $dir || return 1 + run_mon $dir a --osd_pool_default_size=1 || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + create_rbd_pool || return 1 + rados -p rbd put obj /etc/group + flush_pg_stats || return 1 + local jq_filter='.pools | .[] | select(.name == "rbd") | .stats' + stored=`ceph df detail --format=json | jq "$jq_filter.stored"` + stored_raw=`ceph df detail --format=json | jq "$jq_filter.stored_raw"` + test $stored -gt 0 || return 1 + test $stored == $stored_raw || return 1 + teardown $dir +} + +####################################################################### + +## +# Call the **run** function (which must be defined by the caller) with +# the **dir** argument followed by the caller argument list. +# +# If the **run** function returns on error, all logs found in **dir** +# are displayed for diagnostic purposes. +# +# **teardown** function is called when the **run** function returns +# (on success or on error), to cleanup leftovers. The CEPH_CONF is set +# to /dev/null and CEPH_ARGS is unset so that the tests are protected from +# external interferences. +# +# It is the responsibility of the **run** function to call the +# **setup** function to prepare the test environment (create a temporary +# directory etc.). +# +# The shell is required (via PS4) to display the function and line +# number whenever a statement is executed to help debugging. +# +# @param dir directory in which all data is stored +# @param ... arguments passed transparently to **run** +# @return 0 on success, 1 on error +# +function main() { + local dir=td/$1 + shift + + shopt -s -o xtrace + PS4='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: ' + + export PATH=.:$PATH # make sure program from sources are preferred + export PYTHONWARNINGS=ignore + export CEPH_CONF=/dev/null + unset CEPH_ARGS + + local code + if run $dir "$@" ; then + code=0 + else + code=1 + fi + teardown $dir $code || return 1 + return $code +} + +####################################################################### + +function run_tests() { + shopt -s -o xtrace + PS4='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: ' + + export .:$PATH # make sure program from sources are preferred + + export CEPH_MON="127.0.0.1:7109" # git grep '\<7109\>' : there must be only one + export CEPH_ARGS + CEPH_ARGS+=" --fsid=$(uuidgen) --auth-supported=none " + CEPH_ARGS+="--mon-host=$CEPH_MON " + export CEPH_CONF=/dev/null + + local funcs=${@:-$(set | sed -n -e 's/^\(test_[0-9a-z_]*\) .*/\1/p')} + local dir=td/ceph-helpers + + for func in $funcs ; do + if ! $func $dir; then + teardown $dir 1 + return 1 + fi + done +} + +if test "$1" = TESTS ; then + shift + run_tests "$@" + exit $? +fi + +# NOTE: +# jq only support --exit-status|-e from version 1.4 forwards, which makes +# returning on error waaaay prettier and straightforward. +# However, the current automated upstream build is running with v1.3, +# which has no idea what -e is. Hence the convoluted error checking we +# need. Sad. +# The next time someone changes this code, please check if v1.4 is now +# a thing, and, if so, please change these to use -e. Thanks. + +# jq '.all.supported | select([.[] == "foo"] | any)' +function jq_success() { + input="$1" + filter="$2" + expects="\"$3\"" + + in_escaped=$(printf %s "$input" | sed "s/'/'\\\\''/g") + filter_escaped=$(printf %s "$filter" | sed "s/'/'\\\\''/g") + + ret=$(echo "$in_escaped" | jq "$filter_escaped") + if [[ "$ret" == "true" ]]; then + return 0 + elif [[ -n "$expects" ]]; then + if [[ "$ret" == "$expects" ]]; then + return 0 + fi + fi + return 1 + input=$1 + filter=$2 + expects="$3" + + ret="$(echo $input | jq \"$filter\")" + if [[ "$ret" == "true" ]]; then + return 0 + elif [[ -n "$expects" && "$ret" == "$expects" ]]; then + return 0 + fi + return 1 +} + +function inject_eio() { + local pooltype=$1 + shift + local which=$1 + shift + local poolname=$1 + shift + local objname=$1 + shift + local dir=$1 + shift + local shard_id=$1 + shift + + local -a initial_osds=($(get_osds $poolname $objname)) + local osd_id=${initial_osds[$shard_id]} + if [ "$pooltype" != "ec" ]; then + shard_id="" + fi + type=$(cat $dir/$osd_id/type) + set_config osd $osd_id ${type}_debug_inject_read_err true || return 1 + local loop=0 + while ( CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.$osd_id) \ + inject${which}err $poolname $objname $shard_id | grep -q Invalid ); do + loop=$(expr $loop + 1) + if [ $loop = "10" ]; then + return 1 + fi + sleep 1 + done +} + +function multidiff() { + if ! diff $@ ; then + if [ "$DIFFCOLOPTS" = "" ]; then + return 1 + fi + diff $DIFFCOLOPTS $@ + fi +} + +function create_ec_pool() { + local pool_name=$1 + shift + local allow_overwrites=$1 + shift + + ceph osd erasure-code-profile set myprofile crush-failure-domain=osd "$@" || return 1 + + create_pool "$poolname" 1 1 erasure myprofile || return 1 + + if [ "$allow_overwrites" = "true" ]; then + ceph osd pool set "$poolname" allow_ec_overwrites true || return 1 + fi + + wait_for_clean || return 1 + return 0 +} + +# Local Variables: +# compile-command: "cd ../../src ; make -j4 && ../qa/standalone/ceph-helpers.sh TESTS # test_get_config" +# End: diff --git a/qa/standalone/crush/crush-choose-args.sh b/qa/standalone/crush/crush-choose-args.sh new file mode 100755 index 00000000..d2c33cfe --- /dev/null +++ b/qa/standalone/crush/crush-choose-args.sh @@ -0,0 +1,243 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2017 Red Hat +# +# Author: Loic Dachary +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Library Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library Public License for more details. +# + +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh + +function run() { + local dir=$1 + shift + + export CEPH_MON="127.0.0.1:7131" # git grep '\<7131\>' : there must be only one + export CEPH_ARGS + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " + CEPH_ARGS+="--mon-host=$CEPH_MON " + CEPH_ARGS+="--crush-location=root=default,host=HOST " + CEPH_ARGS+="--osd-crush-initial-weight=3 " + # + # Disable device auto class feature for now. + # The device class is non-deterministic and will + # crash the crushmap comparison below. + # + CEPH_ARGS+="--osd-class-update-on-start=false " + + local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} + for func in $funcs ; do + setup $dir || return 1 + $func $dir || return 1 + teardown $dir || return 1 + done +} + +function TEST_choose_args_update() { + # + # adding a weighted OSD updates the weight up to the top + # + local dir=$1 + + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + + ceph osd set-require-min-compat-client luminous + ceph osd getcrushmap > $dir/map || return 1 + crushtool -d $dir/map -o $dir/map.txt || return 1 + sed -i -e '/end crush map/d' $dir/map.txt + cat >> $dir/map.txt < $dir/map-one-more || return 1 + crushtool -d $dir/map-one-more -o $dir/map-one-more.txt || return 1 + cat $dir/map-one-more.txt + diff -u $dir/map-one-more.txt $CEPH_ROOT/src/test/crush/crush-choose-args-expected-one-more-3.txt || return 1 + + destroy_osd $dir 1 || return 1 + ceph osd crush tree + ceph osd getcrushmap > $dir/map-one-less || return 1 + crushtool -d $dir/map-one-less -o $dir/map-one-less.txt || return 1 + diff -u $dir/map-one-less.txt $dir/map.txt || return 1 +} + +function TEST_no_update_weight_set() { + # + # adding a zero weight OSD does not update the weight set at all + # + local dir=$1 + + ORIG_CEPH_ARGS="$CEPH_ARGS" + CEPH_ARGS+="--osd-crush-update-weight-set=false " + + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + + ceph osd set-require-min-compat-client luminous + ceph osd crush tree + ceph osd getcrushmap > $dir/map || return 1 + crushtool -d $dir/map -o $dir/map.txt || return 1 + sed -i -e '/end crush map/d' $dir/map.txt + cat >> $dir/map.txt < $dir/map-one-more || return 1 + crushtool -d $dir/map-one-more -o $dir/map-one-more.txt || return 1 + cat $dir/map-one-more.txt + diff -u $dir/map-one-more.txt $CEPH_ROOT/src/test/crush/crush-choose-args-expected-one-more-0.txt || return 1 + + destroy_osd $dir 1 || return 1 + ceph osd crush tree + ceph osd getcrushmap > $dir/map-one-less || return 1 + crushtool -d $dir/map-one-less -o $dir/map-one-less.txt || return 1 + diff -u $dir/map-one-less.txt $dir/map.txt || return 1 + + CEPH_ARGS="$ORIG_CEPH_ARGS" +} + +function TEST_reweight() { + # reweight and reweight-compat behave appropriately + local dir=$1 + + ORIG_CEPH_ARGS="$CEPH_ARGS" + CEPH_ARGS+="--osd-crush-update-weight-set=false " + + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + run_osd $dir 1 || return 1 + + ceph osd crush weight-set create-compat || return 1 + ceph osd crush tree + + ceph osd crush weight-set reweight-compat osd.0 2 || return 1 + ceph osd crush tree + ceph osd crush tree | grep host | grep '6.00000 5.00000' || return 1 + + run_osd $dir 2 || return 1 + ceph osd crush tree + ceph osd crush tree | grep host | grep '9.00000 5.00000' || return 1 + + ceph osd crush reweight osd.2 4 + ceph osd crush tree + ceph osd crush tree | grep host | grep '10.00000 5.00000' || return 1 + + ceph osd crush weight-set reweight-compat osd.2 4 + ceph osd crush tree + ceph osd crush tree | grep host | grep '10.00000 9.00000' || return 1 +} + +function TEST_move_bucket() { + local dir=$1 + + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + run_osd $dir 1 || return 1 + + ceph osd crush weight-set create-compat || return 1 + ceph osd crush weight-set reweight-compat osd.0 2 || return 1 + ceph osd crush weight-set reweight-compat osd.1 2 || return 1 + ceph osd crush tree + ceph osd crush tree | grep HOST | grep '6.00000 4.00000' || return 1 + + # moving a bucket adjusts the weights + ceph osd crush add-bucket RACK rack root=default || return 1 + ceph osd crush move HOST rack=RACK || return 1 + ceph osd crush tree + ceph osd crush tree | grep HOST | grep '6.00000 4.00000' || return 1 + ceph osd crush tree | grep RACK | grep '6.00000 4.00000' || return 1 + + # weight-set reweight adjusts containing buckets + ceph osd crush weight-set reweight-compat osd.0 1 || return 1 + ceph osd crush tree + ceph osd crush tree | grep HOST | grep '6.00000 3.00000' || return 1 + ceph osd crush tree | grep RACK | grep '6.00000 3.00000' || return 1 + + # moving a leaf resets its weight-set to the canonical weight... + ceph config set mon osd_crush_update_weight_set true || return 1 + ceph osd crush add-bucket FOO host root=default || return 1 + ceph osd crush move osd.0 host=FOO || return 1 + ceph osd crush tree + ceph osd crush tree | grep osd.0 | grep '3.00000 3.00000' || return 1 + ceph osd crush tree | grep HOST | grep '3.00000 2.00000' || return 1 + ceph osd crush tree | grep RACK | grep '3.00000 2.00000' || return 1 + + # ...or to zero. + ceph config set mon osd_crush_update_weight_set false || return 1 + ceph osd crush move osd.1 host=FOO || return 1 + ceph osd crush tree + ceph osd crush tree | grep osd.0 | grep '3.00000 3.00000' || return 1 + ceph osd crush tree | grep osd.1 | grep '3.00000 0' || return 1 + ceph osd crush tree | grep FOO | grep '6.00000 3.00000' || return 1 +} + +main crush-choose-args "$@" + +# Local Variables: +# compile-command: "cd ../../../build ; ln -sf ../src/ceph-disk/ceph_disk/main.py bin/ceph-disk && make -j4 && ../src/test/crush/crush-choose-args.sh" +# End: diff --git a/qa/standalone/crush/crush-classes.sh b/qa/standalone/crush/crush-classes.sh new file mode 100755 index 00000000..509585db --- /dev/null +++ b/qa/standalone/crush/crush-classes.sh @@ -0,0 +1,237 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2017 Red Hat +# +# Author: Loic Dachary +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Library Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library Public License for more details. +# + +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh + +function run() { + local dir=$1 + shift + + export CEPH_MON="127.0.0.1:7130" # git grep '\<7130\>' : there must be only one + export CEPH_ARGS + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " + CEPH_ARGS+="--mon-host=$CEPH_MON " + # + # Disable auto-class, so we can inject device class manually below + # + CEPH_ARGS+="--osd-class-update-on-start=false " + + local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} + for func in $funcs ; do + setup $dir || return 1 + $func $dir || return 1 + teardown $dir || return 1 + done +} + +function add_something() { + local dir=$1 + local obj=${2:-SOMETHING} + + local payload=ABCDEF + echo $payload > $dir/ORIGINAL + rados --pool rbd put $obj $dir/ORIGINAL || return 1 +} + +function get_osds_up() { + local poolname=$1 + local objectname=$2 + + local osds=$(ceph --format xml osd map $poolname $objectname 2>/dev/null | \ + $XMLSTARLET sel -t -m "//up/osd" -v . -o ' ') + # get rid of the trailing space + echo $osds +} + +function TEST_classes() { + local dir=$1 + + run_mon $dir a || return 1 + run_osd $dir 0 || return 1 + run_osd $dir 1 || return 1 + run_osd $dir 2 || return 1 + create_rbd_pool || return 1 + + test "$(get_osds_up rbd SOMETHING)" == "1 2 0" || return 1 + add_something $dir SOMETHING || return 1 + + # + # osd.0 has class ssd and the rule is modified + # to only take ssd devices. + # + ceph osd getcrushmap > $dir/map || return 1 + crushtool -d $dir/map -o $dir/map.txt || return 1 + ${SED} -i \ + -e '/device 0 osd.0/s/$/ class ssd/' \ + -e '/step take default/s/$/ class ssd/' \ + $dir/map.txt || return 1 + crushtool -c $dir/map.txt -o $dir/map-new || return 1 + ceph osd setcrushmap -i $dir/map-new || return 1 + + # + # There can only be one mapping since there only is + # one device with ssd class. + # + ok=false + for delay in 2 4 8 16 32 64 128 256 ; do + if test "$(get_osds_up rbd SOMETHING_ELSE)" == "0" ; then + ok=true + break + fi + sleep $delay + ceph osd dump # for debugging purposes + ceph pg dump # for debugging purposes + done + $ok || return 1 + # + # Writing keeps working because the pool is min_size 1 by + # default. + # + add_something $dir SOMETHING_ELSE || return 1 + + # + # Sanity check that the rule indeed has ssd + # generated bucket with a name including ~ssd. + # + ceph osd crush dump | grep -q '~ssd' || return 1 +} + +function TEST_set_device_class() { + local dir=$1 + + TEST_classes $dir || return 1 + + ceph osd crush set-device-class ssd osd.0 || return 1 + ceph osd crush class ls-osd ssd | grep 0 || return 1 + ceph osd crush set-device-class ssd osd.1 || return 1 + ceph osd crush class ls-osd ssd | grep 1 || return 1 + ceph osd crush set-device-class ssd 0 1 || return 1 # should be idempotent + + ok=false + for delay in 2 4 8 16 32 64 128 256 ; do + if test "$(get_osds_up rbd SOMETHING_ELSE)" == "0 1" ; then + ok=true + break + fi + sleep $delay + ceph osd crush dump + ceph osd dump # for debugging purposes + ceph pg dump # for debugging purposes + done + $ok || return 1 +} + +function TEST_mon_classes() { + local dir=$1 + + run_mon $dir a || return 1 + run_osd $dir 0 || return 1 + run_osd $dir 1 || return 1 + run_osd $dir 2 || return 1 + create_rbd_pool || return 1 + + test "$(get_osds_up rbd SOMETHING)" == "1 2 0" || return 1 + add_something $dir SOMETHING || return 1 + + # test create and remove class + ceph osd crush class create CLASS || return 1 + ceph osd crush class create CLASS || return 1 # idempotent + ceph osd crush class ls | grep CLASS || return 1 + ceph osd crush class rename CLASS TEMP || return 1 + ceph osd crush class ls | grep TEMP || return 1 + ceph osd crush class rename TEMP CLASS || return 1 + ceph osd crush class ls | grep CLASS || return 1 + ceph osd erasure-code-profile set myprofile plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd crush-device-class=CLASS || return 1 + expect_failure $dir EBUSY ceph osd crush class rm CLASS || return 1 + ceph osd erasure-code-profile rm myprofile || return 1 + ceph osd crush class rm CLASS || return 1 + ceph osd crush class rm CLASS || return 1 # test idempotence + + # test rm-device-class + ceph osd crush set-device-class aaa osd.0 || return 1 + ceph osd tree | grep -q 'aaa' || return 1 + ceph osd crush dump | grep -q '~aaa' || return 1 + ceph osd crush tree --show-shadow | grep -q '~aaa' || return 1 + ceph osd crush set-device-class bbb osd.1 || return 1 + ceph osd tree | grep -q 'bbb' || return 1 + ceph osd crush dump | grep -q '~bbb' || return 1 + ceph osd crush tree --show-shadow | grep -q '~bbb' || return 1 + ceph osd crush set-device-class ccc osd.2 || return 1 + ceph osd tree | grep -q 'ccc' || return 1 + ceph osd crush dump | grep -q '~ccc' || return 1 + ceph osd crush tree --show-shadow | grep -q '~ccc' || return 1 + ceph osd crush rm-device-class 0 || return 1 + ceph osd tree | grep -q 'aaa' && return 1 + ceph osd crush class ls | grep -q 'aaa' && return 1 # class 'aaa' should gone + ceph osd crush rm-device-class 1 || return 1 + ceph osd tree | grep -q 'bbb' && return 1 + ceph osd crush class ls | grep -q 'bbb' && return 1 # class 'bbb' should gone + ceph osd crush rm-device-class 2 || return 1 + ceph osd tree | grep -q 'ccc' && return 1 + ceph osd crush class ls | grep -q 'ccc' && return 1 # class 'ccc' should gone + ceph osd crush set-device-class asdf all || return 1 + ceph osd tree | grep -q 'asdf' || return 1 + ceph osd crush dump | grep -q '~asdf' || return 1 + ceph osd crush tree --show-shadow | grep -q '~asdf' || return 1 + ceph osd crush rule create-replicated asdf-rule default host asdf || return 1 + ceph osd crush rm-device-class all || return 1 + ceph osd tree | grep -q 'asdf' && return 1 + ceph osd crush class ls | grep -q 'asdf' || return 1 # still referenced by asdf-rule + + ceph osd crush set-device-class abc osd.2 || return 1 + ceph osd crush move osd.2 root=foo rack=foo-rack host=foo-host || return 1 + out=`ceph osd tree |awk '$1 == 2 && $2 == "abc" {print $0}'` + if [ "$out" == "" ]; then + return 1 + fi + + # verify 'crush move' too + ceph osd crush dump | grep -q 'foo~abc' || return 1 + ceph osd crush tree --show-shadow | grep -q 'foo~abc' || return 1 + ceph osd crush dump | grep -q 'foo-rack~abc' || return 1 + ceph osd crush tree --show-shadow | grep -q 'foo-rack~abc' || return 1 + ceph osd crush dump | grep -q 'foo-host~abc' || return 1 + ceph osd crush tree --show-shadow | grep -q 'foo-host~abc' || return 1 + ceph osd crush rm-device-class osd.2 || return 1 + # restore class, so we can continue to test create-replicated + ceph osd crush set-device-class abc osd.2 || return 1 + + ceph osd crush rule create-replicated foo-rule foo host abc || return 1 + + # test set-device-class implicitly change class + ceph osd crush set-device-class hdd osd.0 || return 1 + expect_failure $dir EBUSY ceph osd crush set-device-class nvme osd.0 || return 1 + + # test class rename + ceph osd crush rm-device-class all || return 1 + ceph osd crush set-device-class class_1 all || return 1 + ceph osd crush class ls | grep 'class_1' || return 1 + ceph osd crush tree --show-shadow | grep 'class_1' || return 1 + ceph osd crush rule create-replicated class_1_rule default host class_1 || return 1 + ceph osd crush class rename class_1 class_2 + ceph osd crush class rename class_1 class_2 # idempotent + ceph osd crush class ls | grep 'class_1' && return 1 + ceph osd crush tree --show-shadow | grep 'class_1' && return 1 + ceph osd crush class ls | grep 'class_2' || return 1 + ceph osd crush tree --show-shadow | grep 'class_2' || return 1 +} + +main crush-classes "$@" + +# Local Variables: +# compile-command: "cd ../../../build ; ln -sf ../src/ceph-disk/ceph_disk/main.py bin/ceph-disk && make -j4 && ../src/test/crush/crush-classes.sh" +# End: diff --git a/qa/standalone/erasure-code/test-erasure-code-plugins.sh b/qa/standalone/erasure-code/test-erasure-code-plugins.sh new file mode 100755 index 00000000..b5648d47 --- /dev/null +++ b/qa/standalone/erasure-code/test-erasure-code-plugins.sh @@ -0,0 +1,118 @@ +#!/usr/bin/env bash +set -x + +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh + +arch=$(uname -m) + +case $arch in + i[[3456]]86*|x86_64*|amd64*) + legacy_jerasure_plugins=(jerasure_generic jerasure_sse3 jerasure_sse4) + legacy_shec_plugins=(shec_generic shec_sse3 shec_sse4) + plugins=(jerasure shec lrc isa) + ;; + aarch64*|arm*) + legacy_jerasure_plugins=(jerasure_generic jerasure_neon) + legacy_shec_plugins=(shec_generic shec_neon) + plugins=(jerasure shec lrc) + ;; + *) + echo "unsupported platform ${arch}." + return 1 + ;; +esac + +function run() { + local dir=$1 + shift + + export CEPH_MON="127.0.0.1:17110" # git grep '\<17110\>' : there must be only one + export CEPH_ARGS + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " + CEPH_ARGS+="--mon-host=$CEPH_MON " + + local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} + for func in $funcs ; do + $func $dir || return 1 + done +} + +function TEST_preload_warning() { + local dir=$1 + + for plugin in ${legacy_jerasure_plugins[*]} ${legacy_shec_plugins[*]}; do + setup $dir || return 1 + run_mon $dir a --osd_erasure_code_plugins="${plugin}" || return 1 + run_mgr $dir x || return 1 + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path mon.a) log flush || return 1 + run_osd $dir 0 --osd_erasure_code_plugins="${plugin}" || return 1 + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.0) log flush || return 1 + grep "WARNING: osd_erasure_code_plugins contains plugin ${plugin}" $dir/mon.a.log || return 1 + grep "WARNING: osd_erasure_code_plugins contains plugin ${plugin}" $dir/osd.0.log || return 1 + teardown $dir || return 1 + done + return 0 +} + +function TEST_preload_no_warning() { + local dir=$1 + + for plugin in ${plugins[*]}; do + setup $dir || return 1 + run_mon $dir a --osd_erasure_code_plugins="${plugin}" || return 1 + run_mgr $dir x || return 1 + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path mon.a) log flush || return 1 + run_osd $dir 0 --osd_erasure_code_plugins="${plugin}" || return 1 + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.0) log flush || return 1 + ! grep "WARNING: osd_erasure_code_plugins contains plugin" $dir/mon.a.log || return 1 + ! grep "WARNING: osd_erasure_code_plugins contains plugin" $dir/osd.0.log || return 1 + teardown $dir || return 1 + done + + return 0 +} + +function TEST_preload_no_warning_default() { + local dir=$1 + + setup $dir || return 1 + run_mon $dir a || return 1 + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path mon.a) log flush || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.0) log flush || return 1 + ! grep "WARNING: osd_erasure_code_plugins" $dir/mon.a.log || return 1 + ! grep "WARNING: osd_erasure_code_plugins" $dir/osd.0.log || return 1 + teardown $dir || return 1 + + return 0 +} + +function TEST_ec_profile_warning() { + local dir=$1 + + setup $dir || return 1 + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + for id in $(seq 0 2) ; do + run_osd $dir $id || return 1 + done + create_rbd_pool || return 1 + wait_for_clean || return 1 + + for plugin in ${legacy_jerasure_plugins[*]}; do + ceph osd erasure-code-profile set prof-${plugin} crush-failure-domain=osd technique=reed_sol_van plugin=${plugin} || return 1 + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path mon.a) log flush || return 1 + grep "WARNING: erasure coding profile prof-${plugin} uses plugin ${plugin}" $dir/mon.a.log || return 1 + done + + for plugin in ${legacy_shec_plugins[*]}; do + ceph osd erasure-code-profile set prof-${plugin} crush-failure-domain=osd plugin=${plugin} || return 1 + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path mon.a) log flush || return 1 + grep "WARNING: erasure coding profile prof-${plugin} uses plugin ${plugin}" $dir/mon.a.log || return 1 + done + + teardown $dir || return 1 +} + +main test-erasure-code-plugins "$@" diff --git a/qa/standalone/erasure-code/test-erasure-code.sh b/qa/standalone/erasure-code/test-erasure-code.sh new file mode 100755 index 00000000..e18e673c --- /dev/null +++ b/qa/standalone/erasure-code/test-erasure-code.sh @@ -0,0 +1,333 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2014 Cloudwatt +# Copyright (C) 2014, 2015 Red Hat +# +# Author: Loic Dachary +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Library Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library Public License for more details. +# + +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh + +function run() { + local dir=$1 + shift + + export CEPH_MON="127.0.0.1:7101" # git grep '\<7101\>' : there must be only one + export CEPH_ARGS + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " + CEPH_ARGS+="--mon-host=$CEPH_MON --mon-osd-prime-pg-temp=false" + + setup $dir || return 1 + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + # check that erasure code plugins are preloaded + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path mon.a) log flush || return 1 + grep 'load: jerasure.*lrc' $dir/mon.a.log || return 1 + for id in $(seq 0 10) ; do + run_osd $dir $id || return 1 + done + create_rbd_pool || return 1 + wait_for_clean || return 1 + # check that erasure code plugins are preloaded + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.0) log flush || return 1 + grep 'load: jerasure.*lrc' $dir/osd.0.log || return 1 + create_erasure_coded_pool ecpool || return 1 + + local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} + for func in $funcs ; do + $func $dir || return 1 + done + + delete_pool ecpool || return 1 + teardown $dir || return 1 +} + +function create_erasure_coded_pool() { + local poolname=$1 + + ceph osd erasure-code-profile set myprofile \ + crush-failure-domain=osd || return 1 + create_pool $poolname 12 12 erasure myprofile \ + || return 1 + wait_for_clean || return 1 +} + +function rados_put_get() { + local dir=$1 + local poolname=$2 + local objname=${3:-SOMETHING} + + + for marker in AAA BBB CCCC DDDD ; do + printf "%*s" 1024 $marker + done > $dir/ORIGINAL + + # + # get and put an object, compare they are equal + # + rados --pool $poolname put $objname $dir/ORIGINAL || return 1 + rados --pool $poolname get $objname $dir/COPY || return 1 + diff $dir/ORIGINAL $dir/COPY || return 1 + rm $dir/COPY + + # + # take out an OSD used to store the object and + # check the object can still be retrieved, which implies + # recovery + # + local -a initial_osds=($(get_osds $poolname $objname)) + local last=$((${#initial_osds[@]} - 1)) + ceph osd out ${initial_osds[$last]} || return 1 + ! get_osds $poolname $objname | grep '\<'${initial_osds[$last]}'\>' || return 1 + rados --pool $poolname get $objname $dir/COPY || return 1 + diff $dir/ORIGINAL $dir/COPY || return 1 + ceph osd in ${initial_osds[$last]} || return 1 + + rm $dir/ORIGINAL +} + +function rados_osds_out_in() { + local dir=$1 + local poolname=$2 + local objname=${3:-SOMETHING} + + + for marker in FFFF GGGG HHHH IIII ; do + printf "%*s" 1024 $marker + done > $dir/ORIGINAL + + # + # get and put an object, compare they are equal + # + rados --pool $poolname put $objname $dir/ORIGINAL || return 1 + rados --pool $poolname get $objname $dir/COPY || return 1 + diff $dir/ORIGINAL $dir/COPY || return 1 + rm $dir/COPY + + # + # take out two OSDs used to store the object, wait for the cluster + # to be clean (i.e. all PG are clean and active) again which + # implies the PG have been moved to use the remaining OSDs. Check + # the object can still be retrieved. + # + wait_for_clean || return 1 + local osds_list=$(get_osds $poolname $objname) + local -a osds=($osds_list) + for osd in 0 1 ; do + ceph osd out ${osds[$osd]} || return 1 + done + wait_for_clean || return 1 + # + # verify the object is no longer mapped to the osds that are out + # + for osd in 0 1 ; do + ! get_osds $poolname $objname | grep '\<'${osds[$osd]}'\>' || return 1 + done + rados --pool $poolname get $objname $dir/COPY || return 1 + diff $dir/ORIGINAL $dir/COPY || return 1 + # + # bring the osds back in, , wait for the cluster + # to be clean (i.e. all PG are clean and active) again which + # implies the PG go back to using the same osds as before + # + for osd in 0 1 ; do + ceph osd in ${osds[$osd]} || return 1 + done + wait_for_clean || return 1 + test "$osds_list" = "$(get_osds $poolname $objname)" || return 1 + rm $dir/ORIGINAL +} + +function TEST_rados_put_get_lrc_advanced() { + local dir=$1 + local poolname=pool-lrc-a + local profile=profile-lrc-a + + ceph osd erasure-code-profile set $profile \ + plugin=lrc \ + mapping=DD_ \ + crush-steps='[ [ "chooseleaf", "osd", 0 ] ]' \ + layers='[ [ "DDc", "" ] ]' || return 1 + create_pool $poolname 12 12 erasure $profile \ + || return 1 + + rados_put_get $dir $poolname || return 1 + + delete_pool $poolname + ceph osd erasure-code-profile rm $profile +} + +function TEST_rados_put_get_lrc_kml() { + local dir=$1 + local poolname=pool-lrc + local profile=profile-lrc + + ceph osd erasure-code-profile set $profile \ + plugin=lrc \ + k=4 m=2 l=3 \ + crush-failure-domain=osd || return 1 + create_pool $poolname 12 12 erasure $profile \ + || return 1 + + rados_put_get $dir $poolname || return 1 + + delete_pool $poolname + ceph osd erasure-code-profile rm $profile +} + +function TEST_rados_put_get_isa() { + if ! erasure_code_plugin_exists isa ; then + echo "SKIP because plugin isa has not been built" + return 0 + fi + local dir=$1 + local poolname=pool-isa + + ceph osd erasure-code-profile set profile-isa \ + plugin=isa \ + crush-failure-domain=osd || return 1 + create_pool $poolname 1 1 erasure profile-isa \ + || return 1 + + rados_put_get $dir $poolname || return 1 + + delete_pool $poolname +} + +function TEST_rados_put_get_jerasure() { + local dir=$1 + + rados_put_get $dir ecpool || return 1 + + local poolname=pool-jerasure + local profile=profile-jerasure + + ceph osd erasure-code-profile set $profile \ + plugin=jerasure \ + k=4 m=2 \ + crush-failure-domain=osd || return 1 + create_pool $poolname 12 12 erasure $profile \ + || return 1 + + rados_put_get $dir $poolname || return 1 + rados_osds_out_in $dir $poolname || return 1 + + delete_pool $poolname + ceph osd erasure-code-profile rm $profile +} + +function TEST_rados_put_get_shec() { + local dir=$1 + + local poolname=pool-shec + local profile=profile-shec + + ceph osd erasure-code-profile set $profile \ + plugin=shec \ + k=2 m=1 c=1 \ + crush-failure-domain=osd || return 1 + create_pool $poolname 12 12 erasure $profile \ + || return 1 + + rados_put_get $dir $poolname || return 1 + + delete_pool $poolname + ceph osd erasure-code-profile rm $profile +} + +function TEST_alignment_constraints() { + local payload=ABC + echo "$payload" > $dir/ORIGINAL + # + # Verify that the rados command enforces alignment constraints + # imposed by the stripe width + # See http://tracker.ceph.com/issues/8622 + # + local stripe_unit=$(ceph-conf --show-config-value osd_pool_erasure_code_stripe_unit) + eval local $(ceph osd erasure-code-profile get myprofile | grep k=) + local block_size=$((stripe_unit * k - 1)) + dd if=/dev/zero of=$dir/ORIGINAL bs=$block_size count=2 + rados --block-size=$block_size \ + --pool ecpool put UNALIGNED $dir/ORIGINAL || return 1 + rm $dir/ORIGINAL +} + +function chunk_size() { + echo $(ceph-conf --show-config-value osd_pool_erasure_code_stripe_unit) +} + +# +# By default an object will be split in two (k=2) with the first part +# of the object in the first OSD of the up set and the second part in +# the next OSD in the up set. This layout is defined by the mapping +# parameter and this function helps verify that the first and second +# part of the object are located in the OSD where they should be. +# +function verify_chunk_mapping() { + local dir=$1 + local poolname=$2 + local first=$3 + local second=$4 + + local payload=$(printf '%*s' $(chunk_size) FIRST$poolname ; printf '%*s' $(chunk_size) SECOND$poolname) + echo -n "$payload" > $dir/ORIGINAL + + rados --pool $poolname put SOMETHING$poolname $dir/ORIGINAL || return 1 + rados --pool $poolname get SOMETHING$poolname $dir/COPY || return 1 + local -a osds=($(get_osds $poolname SOMETHING$poolname)) + for (( i = 0; i < ${#osds[@]}; i++ )) ; do + ceph daemon osd.${osds[$i]} flush_journal + done + diff $dir/ORIGINAL $dir/COPY || return 1 + rm $dir/COPY + + local -a osds=($(get_osds $poolname SOMETHING$poolname)) + objectstore_tool $dir ${osds[$first]} SOMETHING$poolname get-bytes | grep --quiet FIRST$poolname || return 1 + objectstore_tool $dir ${osds[$second]} SOMETHING$poolname get-bytes | grep --quiet SECOND$poolname || return 1 +} + +function TEST_chunk_mapping() { + local dir=$1 + + # + # mapping=DD_ is the default: + # first OSD (i.e. 0) in the up set has the first part of the object + # second OSD (i.e. 1) in the up set has the second part of the object + # + verify_chunk_mapping $dir ecpool 0 1 || return 1 + + ceph osd erasure-code-profile set remap-profile \ + plugin=lrc \ + layers='[ [ "cDD", "" ] ]' \ + mapping='_DD' \ + crush-steps='[ [ "choose", "osd", 0 ] ]' || return 1 + ceph osd erasure-code-profile get remap-profile + create_pool remap-pool 12 12 erasure remap-profile \ + || return 1 + + # + # mapping=_DD + # second OSD (i.e. 1) in the up set has the first part of the object + # third OSD (i.e. 2) in the up set has the second part of the object + # + verify_chunk_mapping $dir remap-pool 1 2 || return 1 + + delete_pool remap-pool + ceph osd erasure-code-profile rm remap-profile +} + +main test-erasure-code "$@" + +# Local Variables: +# compile-command: "cd ../.. ; make -j4 && test/erasure-code/test-erasure-code.sh" +# End: diff --git a/qa/standalone/erasure-code/test-erasure-eio.sh b/qa/standalone/erasure-code/test-erasure-eio.sh new file mode 100755 index 00000000..fb1a1a2c --- /dev/null +++ b/qa/standalone/erasure-code/test-erasure-eio.sh @@ -0,0 +1,670 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2015 Red Hat +# +# +# Author: Kefu Chai +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Library Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library Public License for more details. +# + +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh + +function run() { + local dir=$1 + shift + + export CEPH_MON="127.0.0.1:7112" # git grep '\<7112\>' : there must be only one + export CEPH_ARGS + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " + CEPH_ARGS+="--mon-host=$CEPH_MON " + + local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} + for func in $funcs ; do + setup $dir || return 1 + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + create_pool rbd 4 || return 1 + + # check that erasure code plugins are preloaded + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path mon.a) log flush || return 1 + grep 'load: jerasure.*lrc' $dir/mon.a.log || return 1 + $func $dir || return 1 + teardown $dir || return 1 + done +} + +function setup_osds() { + local count=$1 + shift + + for id in $(seq 0 $(expr $count - 1)) ; do + run_osd $dir $id || return 1 + done + + # check that erasure code plugins are preloaded + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.0) log flush || return 1 + grep 'load: jerasure.*lrc' $dir/osd.0.log || return 1 +} + +function get_state() { + local pgid=$1 + local sname=state + ceph --format json pg dump pgs 2>/dev/null | \ + jq -r ".pg_stats | .[] | select(.pgid==\"$pgid\") | .$sname" +} + +function create_erasure_coded_pool() { + local poolname=$1 + shift + local k=$1 + shift + local m=$1 + shift + + ceph osd erasure-code-profile set myprofile \ + plugin=jerasure \ + k=$k m=$m \ + crush-failure-domain=osd || return 1 + create_pool $poolname 1 1 erasure myprofile \ + || return 1 + wait_for_clean || return 1 +} + +function delete_erasure_coded_pool() { + local poolname=$1 + ceph osd pool delete $poolname $poolname --yes-i-really-really-mean-it + ceph osd erasure-code-profile rm myprofile +} + +function rados_put() { + local dir=$1 + local poolname=$2 + local objname=${3:-SOMETHING} + + for marker in AAA BBB CCCC DDDD ; do + printf "%*s" 1024 $marker + done > $dir/ORIGINAL + # + # get and put an object, compare they are equal + # + rados --pool $poolname put $objname $dir/ORIGINAL || return 1 +} + +function rados_get() { + local dir=$1 + local poolname=$2 + local objname=${3:-SOMETHING} + local expect=${4:-ok} + + # + # Expect a failure to get object + # + if [ $expect = "fail" ]; + then + ! rados --pool $poolname get $objname $dir/COPY + return + fi + # + # get an object, compare with $dir/ORIGINAL + # + rados --pool $poolname get $objname $dir/COPY || return 1 + diff $dir/ORIGINAL $dir/COPY || return 1 + rm $dir/COPY +} + + +function inject_remove() { + local pooltype=$1 + shift + local which=$1 + shift + local poolname=$1 + shift + local objname=$1 + shift + local dir=$1 + shift + local shard_id=$1 + shift + + local -a initial_osds=($(get_osds $poolname $objname)) + local osd_id=${initial_osds[$shard_id]} + objectstore_tool $dir $osd_id $objname remove || return 1 +} + +# Test with an inject error +function rados_put_get_data() { + local inject=$1 + shift + local dir=$1 + shift + local shard_id=$1 + shift + local arg=$1 + + # inject eio to speificied shard + # + local poolname=pool-jerasure + local objname=obj-$inject-$$-$shard_id + rados_put $dir $poolname $objname || return 1 + inject_$inject ec data $poolname $objname $dir $shard_id || return 1 + rados_get $dir $poolname $objname || return 1 + + if [ "$arg" = "recovery" ]; + then + # + # take out the last OSD used to store the object, + # bring it back, and check for clean PGs which means + # recovery didn't crash the primary. + # + local -a initial_osds=($(get_osds $poolname $objname)) + local last_osd=${initial_osds[-1]} + # Kill OSD + kill_daemons $dir TERM osd.${last_osd} >&2 < /dev/null || return 1 + ceph osd out ${last_osd} || return 1 + ! get_osds $poolname $objname | grep '\<'${last_osd}'\>' || return 1 + ceph osd in ${last_osd} || return 1 + activate_osd $dir ${last_osd} || return 1 + wait_for_clean || return 1 + fi + + shard_id=$(expr $shard_id + 1) + inject_$inject ec data $poolname $objname $dir $shard_id || return 1 + # Now 2 out of 3 shards get an error, so should fail + rados_get $dir $poolname $objname fail || return 1 + rm $dir/ORIGINAL +} + +# Change the size of speificied shard +# +function set_size() { + local objname=$1 + shift + local dir=$1 + shift + local shard_id=$1 + shift + local bytes=$1 + shift + local mode=${1} + + local poolname=pool-jerasure + local -a initial_osds=($(get_osds $poolname $objname)) + local osd_id=${initial_osds[$shard_id]} + ceph osd set noout + if [ "$mode" = "add" ]; + then + objectstore_tool $dir $osd_id $objname get-bytes $dir/CORRUPT || return 1 + dd if=/dev/urandom bs=$bytes count=1 >> $dir/CORRUPT + elif [ "$bytes" = "0" ]; + then + touch $dir/CORRUPT + else + dd if=/dev/urandom bs=$bytes count=1 of=$dir/CORRUPT + fi + objectstore_tool $dir $osd_id $objname set-bytes $dir/CORRUPT || return 1 + rm -f $dir/CORRUPT + ceph osd unset noout +} + +function rados_get_data_bad_size() { + local dir=$1 + shift + local shard_id=$1 + shift + local bytes=$1 + shift + local mode=${1:-set} + + local poolname=pool-jerasure + local objname=obj-size-$$-$shard_id-$bytes + rados_put $dir $poolname $objname || return 1 + + # Change the size of speificied shard + # + set_size $objname $dir $shard_id $bytes $mode || return 1 + + rados_get $dir $poolname $objname || return 1 + + # Leave objname and modify another shard + shard_id=$(expr $shard_id + 1) + set_size $objname $dir $shard_id $bytes $mode || return 1 + rados_get $dir $poolname $objname fail || return 1 + rm $dir/ORIGINAL +} + +# +# These two test cases try to validate the following behavior: +# For object on EC pool, if there is one shard having read error ( +# either primary or replica), client can still read object. +# +# If 2 shards have read errors the client will get an error. +# +function TEST_rados_get_subread_eio_shard_0() { + local dir=$1 + setup_osds 4 || return 1 + + local poolname=pool-jerasure + create_erasure_coded_pool $poolname 2 1 || return 1 + # inject eio on primary OSD (0) and replica OSD (1) + local shard_id=0 + rados_put_get_data eio $dir $shard_id || return 1 + delete_erasure_coded_pool $poolname +} + +function TEST_rados_get_subread_eio_shard_1() { + local dir=$1 + setup_osds 4 || return 1 + + local poolname=pool-jerasure + create_erasure_coded_pool $poolname 2 1 || return 1 + # inject eio into replicas OSD (1) and OSD (2) + local shard_id=1 + rados_put_get_data eio $dir $shard_id || return 1 + delete_erasure_coded_pool $poolname +} + +# We don't remove the object from the primary because +# that just causes it to appear to be missing + +function TEST_rados_get_subread_missing() { + local dir=$1 + setup_osds 4 || return 1 + + local poolname=pool-jerasure + create_erasure_coded_pool $poolname 2 1 || return 1 + # inject remove into replicas OSD (1) and OSD (2) + local shard_id=1 + rados_put_get_data remove $dir $shard_id || return 1 + delete_erasure_coded_pool $poolname +} + +# +# +# These two test cases try to validate that following behavior: +# For object on EC pool, if there is one shard which an incorrect +# size this will cause an internal read error, client can still read object. +# +# If 2 shards have incorrect size the client will get an error. +# +function TEST_rados_get_bad_size_shard_0() { + local dir=$1 + setup_osds 4 || return 1 + + local poolname=pool-jerasure + create_erasure_coded_pool $poolname 2 1 || return 1 + # Set incorrect size into primary OSD (0) and replica OSD (1) + local shard_id=0 + rados_get_data_bad_size $dir $shard_id 10 || return 1 + rados_get_data_bad_size $dir $shard_id 0 || return 1 + rados_get_data_bad_size $dir $shard_id 256 add || return 1 + delete_erasure_coded_pool $poolname +} + +function TEST_rados_get_bad_size_shard_1() { + local dir=$1 + setup_osds 4 || return 1 + + local poolname=pool-jerasure + create_erasure_coded_pool $poolname 2 1 || return 1 + # Set incorrect size into replicas OSD (1) and OSD (2) + local shard_id=1 + rados_get_data_bad_size $dir $shard_id 10 || return 1 + rados_get_data_bad_size $dir $shard_id 0 || return 1 + rados_get_data_bad_size $dir $shard_id 256 add || return 1 + delete_erasure_coded_pool $poolname +} + +function TEST_rados_get_with_subreadall_eio_shard_0() { + local dir=$1 + local shard_id=0 + + setup_osds 4 || return 1 + + local poolname=pool-jerasure + create_erasure_coded_pool $poolname 2 1 || return 1 + # inject eio on primary OSD (0) + rados_put_get_data eio $dir $shard_id recovery || return 1 + + delete_erasure_coded_pool $poolname +} + +function TEST_rados_get_with_subreadall_eio_shard_1() { + local dir=$1 + local shard_id=1 + + setup_osds 4 || return 1 + + local poolname=pool-jerasure + create_erasure_coded_pool $poolname 2 1 || return 1 + # inject eio on replica OSD (1) + rados_put_get_data eio $dir $shard_id recovery || return 1 + + delete_erasure_coded_pool $poolname +} + +# Test recovery the object attr read error +function TEST_ec_object_attr_read_error() { + local dir=$1 + local objname=myobject + + setup_osds 7 || return 1 + + local poolname=pool-jerasure + create_erasure_coded_pool $poolname 3 2 || return 1 + + local primary_osd=$(get_primary $poolname $objname) + # Kill primary OSD + kill_daemons $dir TERM osd.${primary_osd} >&2 < /dev/null || return 1 + + # Write data + rados_put $dir $poolname $objname || return 1 + + # Inject eio, shard 1 is the one read attr + inject_eio ec mdata $poolname $objname $dir 1 || return 1 + + # Restart OSD + activate_osd $dir ${primary_osd} || return 1 + + # Cluster should recover this object + wait_for_clean || return 1 + + rados_get $dir $poolname myobject || return 1 + + delete_erasure_coded_pool $poolname +} + +# Test recovery the first k copies aren't all available +function TEST_ec_single_recovery_error() { + local dir=$1 + local objname=myobject + + setup_osds 7 || return 1 + + local poolname=pool-jerasure + create_erasure_coded_pool $poolname 3 2 || return 1 + + rados_put $dir $poolname $objname || return 1 + inject_eio ec data $poolname $objname $dir 0 || return 1 + + local -a initial_osds=($(get_osds $poolname $objname)) + local last_osd=${initial_osds[-1]} + # Kill OSD + kill_daemons $dir TERM osd.${last_osd} >&2 < /dev/null || return 1 + ceph osd down ${last_osd} || return 1 + ceph osd out ${last_osd} || return 1 + + # Cluster should recover this object + wait_for_clean || return 1 + + rados_get $dir $poolname myobject || return 1 + + delete_erasure_coded_pool $poolname +} + +# Test recovery when repeated reads are needed due to EIO +function TEST_ec_recovery_multiple_errors() { + local dir=$1 + local objname=myobject + + setup_osds 9 || return 1 + + local poolname=pool-jerasure + create_erasure_coded_pool $poolname 4 4 || return 1 + + rados_put $dir $poolname $objname || return 1 + inject_eio ec data $poolname $objname $dir 0 || return 1 + # first read will try shards 0,1,2 when 0 gets EIO, shard 3 gets + # tried as well. Make that fail to test multiple-EIO handling. + inject_eio ec data $poolname $objname $dir 3 || return 1 + inject_eio ec data $poolname $objname $dir 4 || return 1 + + local -a initial_osds=($(get_osds $poolname $objname)) + local last_osd=${initial_osds[-1]} + # Kill OSD + kill_daemons $dir TERM osd.${last_osd} >&2 < /dev/null || return 1 + ceph osd down ${last_osd} || return 1 + ceph osd out ${last_osd} || return 1 + + # Cluster should recover this object + wait_for_clean || return 1 + + rados_get $dir $poolname myobject || return 1 + + delete_erasure_coded_pool $poolname +} + +# Test recovery when there's only one shard to recover, but multiple +# objects recovering in one RecoveryOp +function TEST_ec_recovery_multiple_objects() { + local dir=$1 + local objname=myobject + + ORIG_ARGS=$CEPH_ARGS + CEPH_ARGS+=' --osd-recovery-max-single-start 3 --osd-recovery-max-active 3 ' + setup_osds 7 || return 1 + CEPH_ARGS=$ORIG_ARGS + + local poolname=pool-jerasure + create_erasure_coded_pool $poolname 3 2 || return 1 + + rados_put $dir $poolname test1 + rados_put $dir $poolname test2 + rados_put $dir $poolname test3 + + ceph osd out 0 || return 1 + + # Cluster should recover these objects all at once + wait_for_clean || return 1 + + rados_get $dir $poolname test1 + rados_get $dir $poolname test2 + rados_get $dir $poolname test3 + + delete_erasure_coded_pool $poolname +} + +# test multi-object recovery when the one missing shard gets EIO +function TEST_ec_recovery_multiple_objects_eio() { + local dir=$1 + local objname=myobject + + ORIG_ARGS=$CEPH_ARGS + CEPH_ARGS+=' --osd-recovery-max-single-start 3 --osd-recovery-max-active 3 ' + setup_osds 7 || return 1 + CEPH_ARGS=$ORIG_ARGS + + local poolname=pool-jerasure + create_erasure_coded_pool $poolname 3 2 || return 1 + + rados_put $dir $poolname test1 + rados_put $dir $poolname test2 + rados_put $dir $poolname test3 + + # can't read from this shard anymore + inject_eio ec data $poolname $objname $dir 0 || return 1 + ceph osd out 0 || return 1 + + # Cluster should recover these objects all at once + wait_for_clean || return 1 + + rados_get $dir $poolname test1 + rados_get $dir $poolname test2 + rados_get $dir $poolname test3 + + delete_erasure_coded_pool $poolname +} + +# Test backfill with unfound object +function TEST_ec_backfill_unfound() { + local dir=$1 + local objname=myobject + local lastobj=300 + # Must be between 1 and $lastobj + local testobj=obj250 + + ORIG_ARGS=$CEPH_ARGS + CEPH_ARGS+=' --osd_min_pg_log_entries=5 --osd_max_pg_log_entries=10' + setup_osds 5 || return 1 + CEPH_ARGS=$ORIG_ARGS + + local poolname=pool-jerasure + create_erasure_coded_pool $poolname 3 2 || return 1 + + ceph pg dump pgs + + rados_put $dir $poolname $objname || return 1 + + local -a initial_osds=($(get_osds $poolname $objname)) + local last_osd=${initial_osds[-1]} + kill_daemons $dir TERM osd.${last_osd} 2>&2 < /dev/null || return 1 + ceph osd down ${last_osd} || return 1 + ceph osd out ${last_osd} || return 1 + + ceph pg dump pgs + + dd if=/dev/urandom of=${dir}/ORIGINAL bs=1024 count=4 + for i in $(seq 1 $lastobj) + do + rados --pool $poolname put obj${i} $dir/ORIGINAL || return 1 + done + + inject_eio ec data $poolname $testobj $dir 0 || return 1 + inject_eio ec data $poolname $testobj $dir 1 || return 1 + + activate_osd $dir ${last_osd} || return 1 + ceph osd in ${last_osd} || return 1 + + sleep 15 + + for tmp in $(seq 1 100); do + state=$(get_state 2.0) + echo $state | grep backfill_unfound + if [ "$?" = "0" ]; then + break + fi + echo $state + sleep 1 + done + + ceph pg dump pgs + ceph pg 2.0 list_unfound | grep -q $testobj || return 1 + + # Command should hang because object is unfound + timeout 5 rados -p $poolname get $testobj $dir/CHECK + test $? = "124" || return 1 + + ceph pg 2.0 mark_unfound_lost delete + + wait_for_clean || return 1 + + for i in $(seq 1 $lastobj) + do + if [ obj${i} = "$testobj" ]; then + # Doesn't exist anymore + ! rados -p $poolname get $testobj $dir/CHECK || return 1 + else + rados --pool $poolname get obj${i} $dir/CHECK || return 1 + diff -q $dir/ORIGINAL $dir/CHECK || return 1 + fi + done + + rm -f ${dir}/ORIGINAL ${dir}/CHECK + + delete_erasure_coded_pool $poolname +} + +# Test recovery with unfound object +function TEST_ec_recovery_unfound() { + local dir=$1 + local objname=myobject + local lastobj=100 + # Must be between 1 and $lastobj + local testobj=obj75 + + ORIG_ARGS=$CEPH_ARGS + CEPH_ARGS+=' --osd-recovery-max-single-start 3 --osd-recovery-max-active 3 ' + CEPH_ARGS+=' --osd_min_pg_log_entries=5 --osd_max_pg_log_entries=10' + setup_osds 5 || return 1 + CEPH_ARGS=$ORIG_ARGS + + local poolname=pool-jerasure + create_erasure_coded_pool $poolname 3 2 || return 1 + + ceph pg dump pgs + + rados_put $dir $poolname $objname || return 1 + + local -a initial_osds=($(get_osds $poolname $objname)) + local last_osd=${initial_osds[-1]} + kill_daemons $dir TERM osd.${last_osd} 2>&2 < /dev/null || return 1 + ceph osd down ${last_osd} || return 1 + ceph osd out ${last_osd} || return 1 + + ceph pg dump pgs + + dd if=/dev/urandom of=${dir}/ORIGINAL bs=1024 count=4 + for i in $(seq 1 $lastobj) + do + rados --pool $poolname put obj${i} $dir/ORIGINAL || return 1 + done + + inject_eio ec data $poolname $testobj $dir 0 || return 1 + inject_eio ec data $poolname $testobj $dir 1 || return 1 + + activate_osd $dir ${last_osd} || return 1 + ceph osd in ${last_osd} || return 1 + + sleep 15 + + for tmp in $(seq 1 100); do + state=$(get_state 2.0) + echo $state | grep recovery_unfound + if [ "$?" = "0" ]; then + break + fi + echo "$state " + sleep 1 + done + + ceph pg dump pgs + ceph pg 2.0 list_unfound | grep -q $testobj || return 1 + + # Command should hang because object is unfound + timeout 5 rados -p $poolname get $testobj $dir/CHECK + test $? = "124" || return 1 + + ceph pg 2.0 mark_unfound_lost delete + + wait_for_clean || return 1 + + for i in $(seq 1 $lastobj) + do + if [ obj${i} = "$testobj" ]; then + # Doesn't exist anymore + ! rados -p $poolname get $testobj $dir/CHECK || return 1 + else + rados --pool $poolname get obj${i} $dir/CHECK || return 1 + diff -q $dir/ORIGINAL $dir/CHECK || return 1 + fi + done + + rm -f ${dir}/ORIGINAL ${dir}/CHECK + + delete_erasure_coded_pool $poolname +} + +main test-erasure-eio "$@" + +# Local Variables: +# compile-command: "cd ../.. ; make -j4 && test/erasure-code/test-erasure-eio.sh" +# End: diff --git a/qa/standalone/mgr/balancer.sh b/qa/standalone/mgr/balancer.sh new file mode 100755 index 00000000..7e87cbf4 --- /dev/null +++ b/qa/standalone/mgr/balancer.sh @@ -0,0 +1,221 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2019 Red Hat +# +# Author: David Zafman +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Library Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library Public License for more details. +# +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh + +function run() { + local dir=$1 + shift + + export CEPH_MON="127.0.0.1:7102" # git grep '\<7102\>' : there must be only one + export CEPH_ARGS + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " + CEPH_ARGS+="--mon-host=$CEPH_MON " + + local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} + for func in $funcs ; do + $func $dir || return 1 + done +} + +TEST_POOL1=test1 +TEST_POOL2=test2 + +function TEST_balancer() { + local dir=$1 + + setup $dir || return 1 + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + run_osd $dir 1 || return 1 + run_osd $dir 2 || return 1 + create_pool $TEST_POOL1 8 + create_pool $TEST_POOL2 8 + + wait_for_clean || return 1 + + ceph pg dump pgs + ceph osd set-require-min-compat-client luminous + ceph balancer status || return 1 + eval MODE=$(ceph balancer status | jq '.mode') + test $MODE = "none" || return 1 + ACTIVE=$(ceph balancer status | jq '.active') + test $ACTIVE = "false" || return 1 + + ceph balancer ls || return 1 + PLANS=$(ceph balancer ls) + test "$PLANS" = "[]" || return 1 + ceph balancer eval || return 1 + EVAL="$(ceph balancer eval)" + test "$EVAL" = "current cluster score 0.000000 (lower is better)" + ceph balancer eval-verbose || return 1 + + ceph balancer pool add $TEST_POOL1 || return 1 + ceph balancer pool add $TEST_POOL2 || return 1 + ceph balancer pool ls || return 1 + eval POOL=$(ceph balancer pool ls | jq 'sort | .[0]') + test "$POOL" = "$TEST_POOL1" || return 1 + eval POOL=$(ceph balancer pool ls | jq 'sort | .[1]') + test "$POOL" = "$TEST_POOL2" || return 1 + ceph balancer pool rm $TEST_POOL1 || return 1 + ceph balancer pool rm $TEST_POOL2 || return 1 + ceph balancer pool ls || return 1 + ceph balancer pool add $TEST_POOL1 || return 1 + + ceph balancer mode crush-compat || return 1 + ceph balancer status || return 1 + eval MODE=$(ceph balancer status | jq '.mode') + test $MODE = "crush-compat" || return 1 + ! ceph balancer optimize plan_crush $TEST_POOL1 || return 1 + ceph balancer status || return 1 + eval RESULT=$(ceph balancer status | jq '.optimize_result') + test "$RESULT" = "Distribution is already perfect" || return 1 + + ceph balancer on || return 1 + ACTIVE=$(ceph balancer status | jq '.active') + test $ACTIVE = "true" || return 1 + sleep 2 + ceph balancer status || return 1 + ceph balancer off || return 1 + ACTIVE=$(ceph balancer status | jq '.active') + test $ACTIVE = "false" || return 1 + sleep 2 + + ceph balancer reset || return 1 + + ceph balancer mode upmap || return 1 + ceph balancer status || return 1 + eval MODE=$(ceph balancer status | jq '.mode') + test $MODE = "upmap" || return 1 + ! ceph balancer optimize plan_upmap $TEST_POOL || return 1 + ceph balancer status || return 1 + eval RESULT=$(ceph balancer status | jq '.optimize_result') + test "$RESULT" = "Unable to find further optimization, or pool(s) pg_num is decreasing, or distribution is already perfect" || return 1 + + ceph balancer on || return 1 + ACTIVE=$(ceph balancer status | jq '.active') + test $ACTIVE = "true" || return 1 + sleep 2 + ceph balancer status || return 1 + ceph balancer off || return 1 + ACTIVE=$(ceph balancer status | jq '.active') + test $ACTIVE = "false" || return 1 + + teardown $dir || return 1 +} + +function TEST_balancer2() { + local dir=$1 + TEST_PGS1=118 + TEST_PGS2=132 + TOTAL_PGS=$(expr $TEST_PGS1 + $TEST_PGS2) + OSDS=5 + DEFAULT_REPLICAS=3 + # Integer average of PGS per OSD (70.8), so each OSD >= this + FINAL_PER_OSD1=$(expr \( $TEST_PGS1 \* $DEFAULT_REPLICAS \) / $OSDS) + # Integer average of PGS per OSD (150) + FINAL_PER_OSD2=$(expr \( \( $TEST_PGS1 + $TEST_PGS2 \) \* $DEFAULT_REPLICAS \) / $OSDS) + + CEPH_ARGS+="--osd_pool_default_pg_autoscale_mode=off " + CEPH_ARGS+="--debug_osd=20 " + setup $dir || return 1 + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + for i in $(seq 0 $(expr $OSDS - 1)) + do + run_osd $dir $i || return 1 + done + + ceph osd set-require-min-compat-client luminous + ceph config set mgr mgr/balancer/upmap_max_deviation 1 + ceph balancer mode upmap || return 1 + ceph balancer on || return 1 + ceph config set mgr mgr/balancer/sleep_interval 5 + + create_pool $TEST_POOL1 $TEST_PGS1 + + wait_for_clean || return 1 + + # Wait up to 2 minutes + OK=no + for i in $(seq 1 25) + do + sleep 5 + if grep -q "Optimization plan is almost perfect" $dir/mgr.x.log + then + OK=yes + break + fi + done + test $OK = "yes" || return 1 + # Plan is found, but PGs still need to move + sleep 30 + ceph osd df + + PGS=$(ceph osd df --format=json-pretty | jq '.nodes[0].pgs') + test $PGS -ge $FINAL_PER_OSD1 || return 1 + PGS=$(ceph osd df --format=json-pretty | jq '.nodes[1].pgs') + test $PGS -ge $FINAL_PER_OSD1 || return 1 + PGS=$(ceph osd df --format=json-pretty | jq '.nodes[2].pgs') + test $PGS -ge $FINAL_PER_OSD1 || return 1 + PGS=$(ceph osd df --format=json-pretty | jq '.nodes[3].pgs') + test $PGS -ge $FINAL_PER_OSD1 || return 1 + PGS=$(ceph osd df --format=json-pretty | jq '.nodes[4].pgs') + test $PGS -ge $FINAL_PER_OSD1 || return 1 + + create_pool $TEST_POOL2 $TEST_PGS2 + + # Wait up to 2 minutes + OK=no + for i in $(seq 1 25) + do + sleep 5 + COUNT=$(grep "Optimization plan is almost perfect" $dir/mgr.x.log | wc -l) + if test $COUNT = "2" + then + OK=yes + break + fi + done + test $OK = "yes" || return 1 + # Plan is found, but PGs still need to move + sleep 30 + ceph osd df + + # We should be with plue or minus 1 of FINAL_PER_OSD2 + # This is because here each pool is balanced independently + MIN=$(expr $FINAL_PER_OSD2 - 1) + MAX=$(expr $FINAL_PER_OSD2 + 1) + PGS=$(ceph osd df --format=json-pretty | jq '.nodes[0].pgs') + test $PGS -ge $MIN -a $PGS -le $MAX || return 1 + PGS=$(ceph osd df --format=json-pretty | jq '.nodes[1].pgs') + test $PGS -ge $MIN -a $PGS -le $MAX || return 1 + PGS=$(ceph osd df --format=json-pretty | jq '.nodes[2].pgs') + test $PGS -ge $MIN -a $PGS -le $MAX || return 1 + PGS=$(ceph osd df --format=json-pretty | jq '.nodes[3].pgs') + test $PGS -ge $MIN -a $PGS -le $MAX || return 1 + PGS=$(ceph osd df --format=json-pretty | jq '.nodes[4].pgs') + test $PGS -ge $MIN -a $PGS -le $MAX || return 1 + + teardown $dir || return 1 +} + +main balancer "$@" + +# Local Variables: +# compile-command: "make -j4 && ../qa/run-standalone.sh balancer.sh" +# End: diff --git a/qa/standalone/misc/network-ping.sh b/qa/standalone/misc/network-ping.sh new file mode 100755 index 00000000..b2b299d6 --- /dev/null +++ b/qa/standalone/misc/network-ping.sh @@ -0,0 +1,145 @@ +#!/usr/bin/env bash + +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh + +function run() { + local dir=$1 + shift + + export CEPH_MON="127.0.0.1:7146" # git grep '\<7146\>' : there must be only one + export CEPH_ARGS + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " + CEPH_ARGS+="--mon-host=$CEPH_MON " + CEPH_ARGS+="--debug_disable_randomized_ping=true " + CEPH_ARGS+="--debug_heartbeat_testing_span=5 " + CEPH_ARGS+="--osd_heartbeat_interval=1 " + local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} + for func in $funcs ; do + setup $dir || return 1 + $func $dir || return 1 + teardown $dir || return 1 + done +} + +function TEST_network_ping_test1() { + local dir=$1 + + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + run_osd $dir 1 || return 1 + run_osd $dir 2 || return 1 + + sleep 5 + + create_pool foo 16 + + # write some objects + timeout 20 rados bench -p foo 10 write -b 4096 --no-cleanup || return 1 + + # Get 1 cycle worth of ping data "1 minute" + sleep 10 + flush_pg_stats + + CEPH_ARGS='' ceph daemon $(get_asok_path osd.0) dump_osd_network | tee $dir/json + test "$(cat $dir/json | jq '.entries | length')" = "0" || return 1 + test "$(cat $dir/json | jq '.threshold')" = "1000" || return 1 + + CEPH_ARGS='' ceph daemon $(get_asok_path mgr.x) dump_osd_network | tee $dir/json + test "$(cat $dir/json | jq '.entries | length')" = "0" || return 1 + test "$(cat $dir/json | jq '.threshold')" = "1000" || return 1 + + CEPH_ARGS='' ceph daemon $(get_asok_path osd.0) dump_osd_network 0 | tee $dir/json + test "$(cat $dir/json | jq '.entries | length')" = "4" || return 1 + test "$(cat $dir/json | jq '.threshold')" = "0" || return 1 + + CEPH_ARGS='' ceph daemon $(get_asok_path mgr.x) dump_osd_network 0 | tee $dir/json + test "$(cat $dir/json | jq '.entries | length')" = "12" || return 1 + test "$(cat $dir/json | jq '.threshold')" = "0" || return 1 + + # Wait another 4 cycles to get "5 minute interval" + sleep 20 + flush_pg_stats + CEPH_ARGS='' ceph daemon $(get_asok_path osd.0) dump_osd_network | tee $dir/json + test "$(cat $dir/json | jq '.entries | length')" = "0" || return 1 + test "$(cat $dir/json | jq '.threshold')" = "1000" || return 1 + + CEPH_ARGS='' ceph daemon $(get_asok_path mgr.x) dump_osd_network | tee $dir/json + test "$(cat $dir/json | jq '.entries | length')" = "0" || return 1 + test "$(cat $dir/json | jq '.threshold')" = "1000" || return 1 + + CEPH_ARGS='' ceph daemon $(get_asok_path osd.0) dump_osd_network 0 | tee $dir/json + test "$(cat $dir/json | jq '.entries | length')" = "4" || return 1 + test "$(cat $dir/json | jq '.threshold')" = "0" || return 1 + + CEPH_ARGS='' ceph daemon $(get_asok_path mgr.x) dump_osd_network 0 | tee $dir/json + test "$(cat $dir/json | jq '.entries | length')" = "12" || return 1 + test "$(cat $dir/json | jq '.threshold')" = "0" || return 1 + + + # Wait another 10 cycles to get "15 minute interval" + sleep 50 + flush_pg_stats + CEPH_ARGS='' ceph daemon $(get_asok_path osd.0) dump_osd_network | tee $dir/json + test "$(cat $dir/json | jq '.entries | length')" = "0" || return 1 + test "$(cat $dir/json | jq '.threshold')" = "1000" || return 1 + + CEPH_ARGS='' ceph daemon $(get_asok_path mgr.x) dump_osd_network | tee $dir/json + test "$(cat $dir/json | jq '.entries | length')" = "0" || return 1 + test "$(cat $dir/json | jq '.threshold')" = "1000" || return 1 + + CEPH_ARGS='' ceph daemon $(get_asok_path osd.0) dump_osd_network 0 | tee $dir/json + test "$(cat $dir/json | jq '.entries | length')" = "4" || return 1 + test "$(cat $dir/json | jq '.threshold')" = "0" || return 1 + + CEPH_ARGS='' ceph daemon $(get_asok_path mgr.x) dump_osd_network 0 | tee $dir/json + test "$(cat $dir/json | jq '.entries | length')" = "12" || return 1 + test "$(cat $dir/json | jq '.threshold')" = "0" || return 1 + + # Just check the threshold output matches the input + CEPH_ARGS='' ceph daemon $(get_asok_path mgr.x) dump_osd_network 99 | tee $dir/json + test "$(cat $dir/json | jq '.threshold')" = "99" || return 1 + CEPH_ARGS='' ceph daemon $(get_asok_path osd.0) dump_osd_network 98 | tee $dir/json + test "$(cat $dir/json | jq '.threshold')" = "98" || return 1 + + rm -f $dir/json +} + +# Test setting of mon_warn_on_slow_ping_time very low to +# get health warning +function TEST_network_ping_test2() { + local dir=$1 + + export CEPH_ARGS + export EXTRA_OPTS=" --mon_warn_on_slow_ping_time=0.001" + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + run_osd $dir 1 || return 1 + run_osd $dir 2 || return 1 + + sleep 5 + + create_pool foo 16 + + # write some objects + timeout 20 rados bench -p foo 10 write -b 4096 --no-cleanup || return 1 + + # Get at least 1 cycle of ping data (this test runs with 5 second cycles of 1 second pings) + sleep 10 + flush_pg_stats + + ceph health | tee $dir/health + grep -q "Long heartbeat" $dir/health || return 1 + + ceph health detail | tee $dir/health + grep -q "OSD_SLOW_PING_TIME_BACK" $dir/health || return 1 + grep -q "OSD_SLOW_PING_TIME_FRONT" $dir/health || return 1 + rm -f $dir/health +} + +main network-ping "$@" + +# Local Variables: +# compile-command: "cd ../.. ; make -j4 && ../qa/run-standalone.sh network-ping.sh" +# End: diff --git a/qa/standalone/misc/ok-to-stop.sh b/qa/standalone/misc/ok-to-stop.sh new file mode 100755 index 00000000..5465939e --- /dev/null +++ b/qa/standalone/misc/ok-to-stop.sh @@ -0,0 +1,289 @@ +#!/usr/bin/env bash + +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh + +function run() { + local dir=$1 + shift + + export CEPH_MON_A="127.0.0.1:7150" # git grep '\<7150\>' : there must be only one + export CEPH_MON_B="127.0.0.1:7151" # git grep '\<7151\>' : there must be only one + export CEPH_MON_C="127.0.0.1:7152" # git grep '\<7152\>' : there must be only one + export CEPH_MON_D="127.0.0.1:7153" # git grep '\<7153\>' : there must be only one + export CEPH_MON_E="127.0.0.1:7154" # git grep '\<7154\>' : there must be only one + export CEPH_ARGS + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " + export ORIG_CEPH_ARGS="$CEPH_ARGS" + + local funcs=${@:-$(set | ${SED} -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} + for func in $funcs ; do + setup $dir || return 1 + $func $dir || return 1 + kill_daemons $dir KILL || return 1 + teardown $dir || return 1 + done +} + +function TEST_1_mon_checks() { + local dir=$1 + + CEPH_ARGS="$ORIG_CEPH_ARGS --mon-host=$CEPH_MON_A " + + run_mon $dir a --public-addr=$CEPH_MON_A || return 1 + + ceph mon ok-to-stop dne || return 1 + ! ceph mon ok-to-stop a || return 1 + + ! ceph mon ok-to-add-offline || return 1 + + ! ceph mon ok-to-rm a || return 1 + ceph mon ok-to-rm dne || return 1 +} + +function TEST_2_mons_checks() { + local dir=$1 + + CEPH_ARGS="$ORIG_CEPH_ARGS --mon-host=$CEPH_MON_A,$CEPH_MON_B " + + run_mon $dir a --public-addr=$CEPH_MON_A || return 1 + run_mon $dir b --public-addr=$CEPH_MON_B || return 1 + + ceph mon ok-to-stop dne || return 1 + ! ceph mon ok-to-stop a || return 1 + ! ceph mon ok-to-stop b || return 1 + ! ceph mon ok-to-stop a b || return 1 + + ceph mon ok-to-add-offline || return 1 + + ceph mon ok-to-rm a || return 1 + ceph mon ok-to-rm b || return 1 + ceph mon ok-to-rm dne || return 1 +} + +function TEST_3_mons_checks() { + local dir=$1 + + CEPH_ARGS="$ORIG_CEPH_ARGS --mon-host=$CEPH_MON_A,$CEPH_MON_B,$CEPH_MON_C " + + run_mon $dir a --public-addr=$CEPH_MON_A || return 1 + run_mon $dir b --public-addr=$CEPH_MON_B || return 1 + run_mon $dir c --public-addr=$CEPH_MON_C || return 1 + wait_for_quorum 60 3 + + ceph mon ok-to-stop dne || return 1 + ceph mon ok-to-stop a || return 1 + ceph mon ok-to-stop b || return 1 + ceph mon ok-to-stop c || return 1 + ! ceph mon ok-to-stop a b || return 1 + ! ceph mon ok-to-stop b c || return 1 + ! ceph mon ok-to-stop a b c || return 1 + + ceph mon ok-to-add-offline || return 1 + + ceph mon ok-to-rm a || return 1 + ceph mon ok-to-rm b || return 1 + ceph mon ok-to-rm c || return 1 + + kill_daemons $dir KILL mon.b + wait_for_quorum 60 2 + + ! ceph mon ok-to-stop a || return 1 + ceph mon ok-to-stop b || return 1 + ! ceph mon ok-to-stop c || return 1 + + ! ceph mon ok-to-add-offline || return 1 + + ! ceph mon ok-to-rm a || return 1 + ceph mon ok-to-rm b || return 1 + ! ceph mon ok-to-rm c || return 1 +} + +function TEST_4_mons_checks() { + local dir=$1 + + CEPH_ARGS="$ORIG_CEPH_ARGS --mon-host=$CEPH_MON_A,$CEPH_MON_B,$CEPH_MON_C,$CEPH_MON_D " + + run_mon $dir a --public-addr=$CEPH_MON_A || return 1 + run_mon $dir b --public-addr=$CEPH_MON_B || return 1 + run_mon $dir c --public-addr=$CEPH_MON_C || return 1 + run_mon $dir d --public-addr=$CEPH_MON_D || return 1 + wait_for_quorum 60 4 + + ceph mon ok-to-stop dne || return 1 + ceph mon ok-to-stop a || return 1 + ceph mon ok-to-stop b || return 1 + ceph mon ok-to-stop c || return 1 + ceph mon ok-to-stop d || return 1 + ! ceph mon ok-to-stop a b || return 1 + ! ceph mon ok-to-stop c d || return 1 + + ceph mon ok-to-add-offline || return 1 + + ceph mon ok-to-rm a || return 1 + ceph mon ok-to-rm b || return 1 + ceph mon ok-to-rm c || return 1 + + kill_daemons $dir KILL mon.a + wait_for_quorum 60 3 + + ceph mon ok-to-stop a || return 1 + ! ceph mon ok-to-stop b || return 1 + ! ceph mon ok-to-stop c || return 1 + ! ceph mon ok-to-stop d || return 1 + + ceph mon ok-to-add-offline || return 1 + + ceph mon ok-to-rm a || return 1 + ceph mon ok-to-rm b || return 1 + ceph mon ok-to-rm c || return 1 + ceph mon ok-to-rm d || return 1 +} + +function TEST_5_mons_checks() { + local dir=$1 + + CEPH_ARGS="$ORIG_CEPH_ARGS --mon-host=$CEPH_MON_A,$CEPH_MON_B,$CEPH_MON_C,$CEPH_MON_D,$CEPH_MON_E " + + run_mon $dir a --public-addr=$CEPH_MON_A || return 1 + run_mon $dir b --public-addr=$CEPH_MON_B || return 1 + run_mon $dir c --public-addr=$CEPH_MON_C || return 1 + run_mon $dir d --public-addr=$CEPH_MON_D || return 1 + run_mon $dir e --public-addr=$CEPH_MON_E || return 1 + wait_for_quorum 60 5 + + ceph mon ok-to-stop dne || return 1 + ceph mon ok-to-stop a || return 1 + ceph mon ok-to-stop b || return 1 + ceph mon ok-to-stop c || return 1 + ceph mon ok-to-stop d || return 1 + ceph mon ok-to-stop e || return 1 + ceph mon ok-to-stop a b || return 1 + ceph mon ok-to-stop c d || return 1 + ! ceph mon ok-to-stop a b c || return 1 + + ceph mon ok-to-add-offline || return 1 + + ceph mon ok-to-rm a || return 1 + ceph mon ok-to-rm b || return 1 + ceph mon ok-to-rm c || return 1 + ceph mon ok-to-rm d || return 1 + ceph mon ok-to-rm e || return 1 + + kill_daemons $dir KILL mon.a + wait_for_quorum 60 4 + + ceph mon ok-to-stop a || return 1 + ceph mon ok-to-stop b || return 1 + ceph mon ok-to-stop c || return 1 + ceph mon ok-to-stop d || return 1 + ceph mon ok-to-stop e || return 1 + + ceph mon ok-to-add-offline || return 1 + + ceph mon ok-to-rm a || return 1 + ceph mon ok-to-rm b || return 1 + ceph mon ok-to-rm c || return 1 + ceph mon ok-to-rm d || return 1 + ceph mon ok-to-rm e || return 1 + + kill_daemons $dir KILL mon.e + wait_for_quorum 60 3 + + ceph mon ok-to-stop a || return 1 + ! ceph mon ok-to-stop b || return 1 + ! ceph mon ok-to-stop c || return 1 + ! ceph mon ok-to-stop d || return 1 + ceph mon ok-to-stop e || return 1 + + ! ceph mon ok-to-add-offline || return 1 + + ceph mon ok-to-rm a || return 1 + ! ceph mon ok-to-rm b || return 1 + ! ceph mon ok-to-rm c || return 1 + ! ceph mon ok-to-rm d || return 1 + ceph mon ok-to-rm e || return 1 +} + +function TEST_0_mds() { + local dir=$1 + + CEPH_ARGS="$ORIG_CEPH_ARGS --mon-host=$CEPH_MON_A " + + run_mon $dir a --public-addr=$CEPH_MON_A || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + run_mds $dir a || return 1 + + ceph osd pool create meta 1 || return 1 + ceph osd pool create data 1 || return 1 + ceph fs new myfs meta data || return 1 + sleep 5 + + ! ceph mds ok-to-stop a || return 1 + ! ceph mds ok-to-stop a dne || return 1 + ceph mds ok-to-stop dne || return 1 + + run_mds $dir b || return 1 + sleep 5 + + ceph mds ok-to-stop a || return 1 + ceph mds ok-to-stop b || return 1 + ! ceph mds ok-to-stop a b || return 1 + ceph mds ok-to-stop a dne1 dne2 || return 1 + ceph mds ok-to-stop b dne || return 1 + ! ceph mds ok-to-stop a b dne || return 1 + ceph mds ok-to-stop dne1 dne2 || return 1 + + kill_daemons $dir KILL mds.a +} + +function TEST_0_osd() { + local dir=$1 + + CEPH_ARGS="$ORIG_CEPH_ARGS --mon-host=$CEPH_MON_A " + + run_mon $dir a --public-addr=$CEPH_MON_A || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + run_osd $dir 1 || return 1 + run_osd $dir 2 || return 1 + run_osd $dir 3 || return 1 + + ceph osd erasure-code-profile set ec-profile m=2 k=2 crush-failure-domain=osd || return 1 + ceph osd pool create ec 8 erasure ec-profile || return 1 + + wait_for_clean || return 1 + + # with min_size 3, we can stop only 1 osd + ceph osd pool set ec min_size 3 || return 1 + wait_for_clean || return 1 + + ceph osd ok-to-stop 0 || return 1 + ceph osd ok-to-stop 1 || return 1 + ceph osd ok-to-stop 2 || return 1 + ceph osd ok-to-stop 3 || return 1 + ! ceph osd ok-to-stop 0 1 || return 1 + ! ceph osd ok-to-stop 2 3 || return 1 + + # with min_size 2 we can stop 1 osds + ceph osd pool set ec min_size 2 || return 1 + wait_for_clean || return 1 + + ceph osd ok-to-stop 0 1 || return 1 + ceph osd ok-to-stop 2 3 || return 1 + ! ceph osd ok-to-stop 0 1 2 || return 1 + ! ceph osd ok-to-stop 1 2 3 || return 1 + + # we should get the same result with one of the osds already down + kill_daemons $dir TERM osd.0 || return 1 + ceph osd down 0 || return 1 + wait_for_peered || return 1 + + ceph osd ok-to-stop 0 || return 1 + ceph osd ok-to-stop 0 1 || return 1 + ! ceph osd ok-to-stop 0 1 2 || return 1 + ! ceph osd ok-to-stop 1 2 3 || return 1 +} + + +main ok-to-stop "$@" diff --git a/qa/standalone/misc/rados-striper.sh b/qa/standalone/misc/rados-striper.sh new file mode 100755 index 00000000..be6349b8 --- /dev/null +++ b/qa/standalone/misc/rados-striper.sh @@ -0,0 +1,101 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2014 Red Hat +# +# Author: Sebastien Ponce +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Library Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library Public License for more details. +# +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh + +function run() { + local dir=$1 + shift + + export CEPH_MON="127.0.0.1:7116" # git grep '\<7116\>' : there must be only one + export CEPH_ARGS + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " + CEPH_ARGS+="--mon-host=$CEPH_MON " + + # setup + setup $dir || return 1 + + # create a cluster with one monitor and three osds + run_mon $dir a || return 1 + run_osd $dir 0 || return 1 + run_osd $dir 1 || return 1 + run_osd $dir 2 || return 1 + create_rbd_pool || return 1 + + # create toyfile + dd if=/dev/urandom of=$dir/toyfile bs=1234 count=1 + + # put a striped object + rados --pool rbd --striper put toyfile $dir/toyfile || return 1 + + # stat it, with and without striping + rados --pool rbd --striper stat toyfile | cut -d ',' -f 2 > $dir/stripedStat || return 1 + rados --pool rbd stat toyfile.0000000000000000 | cut -d ',' -f 2 > $dir/stat || return 1 + echo ' size 1234' > $dir/refstat + diff -w $dir/stripedStat $dir/refstat || return 1 + diff -w $dir/stat $dir/refstat || return 1 + rados --pool rbd stat toyfile >& $dir/staterror + grep -q 'No such file or directory' $dir/staterror || return 1 + + # get the file back with and without striping + rados --pool rbd --striper get toyfile $dir/stripedGroup || return 1 + diff -w $dir/toyfile $dir/stripedGroup || return 1 + rados --pool rbd get toyfile.0000000000000000 $dir/nonSTripedGroup || return 1 + diff -w $dir/toyfile $dir/nonSTripedGroup || return 1 + + # test truncate + rados --pool rbd --striper truncate toyfile 12 + rados --pool rbd --striper stat toyfile | cut -d ',' -f 2 > $dir/stripedStat || return 1 + rados --pool rbd stat toyfile.0000000000000000 | cut -d ',' -f 2 > $dir/stat || return 1 + echo ' size 12' > $dir/reftrunc + diff -w $dir/stripedStat $dir/reftrunc || return 1 + diff -w $dir/stat $dir/reftrunc || return 1 + + # test xattrs + + rados --pool rbd --striper setxattr toyfile somexattr somevalue || return 1 + rados --pool rbd --striper getxattr toyfile somexattr > $dir/xattrvalue || return 1 + rados --pool rbd getxattr toyfile.0000000000000000 somexattr > $dir/xattrvalue2 || return 1 + echo 'somevalue' > $dir/refvalue + diff -w $dir/xattrvalue $dir/refvalue || return 1 + diff -w $dir/xattrvalue2 $dir/refvalue || return 1 + rados --pool rbd --striper listxattr toyfile > $dir/xattrlist || return 1 + echo 'somexattr' > $dir/reflist + diff -w $dir/xattrlist $dir/reflist || return 1 + rados --pool rbd listxattr toyfile.0000000000000000 | grep -v striper > $dir/xattrlist2 || return 1 + diff -w $dir/xattrlist2 $dir/reflist || return 1 + rados --pool rbd --striper rmxattr toyfile somexattr || return 1 + + local attr_not_found_str="No data available" + [ `uname` = FreeBSD ] && \ + attr_not_found_str="Attribute not found" + expect_failure $dir "$attr_not_found_str" \ + rados --pool rbd --striper getxattr toyfile somexattr || return 1 + expect_failure $dir "$attr_not_found_str" \ + rados --pool rbd getxattr toyfile.0000000000000000 somexattr || return 1 + + # test rm + rados --pool rbd --striper rm toyfile || return 1 + expect_failure $dir 'No such file or directory' \ + rados --pool rbd --striper stat toyfile || return 1 + expect_failure $dir 'No such file or directory' \ + rados --pool rbd stat toyfile.0000000000000000 || return 1 + + # cleanup + teardown $dir || return 1 +} + +main rados-striper "$@" diff --git a/qa/standalone/misc/test-ceph-helpers.sh b/qa/standalone/misc/test-ceph-helpers.sh new file mode 100755 index 00000000..e7805858 --- /dev/null +++ b/qa/standalone/misc/test-ceph-helpers.sh @@ -0,0 +1,21 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2013,2014 Cloudwatt +# Copyright (C) 2014 Red Hat +# Copyright (C) 2014 Federico Gimenez +# +# Author: Loic Dachary +# Author: Federico Gimenez +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Library Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library Public License for more details. +# + +$CEPH_ROOT/qa/standalone/ceph-helpers.sh TESTS "$@" diff --git a/qa/standalone/mon/misc.sh b/qa/standalone/mon/misc.sh new file mode 100755 index 00000000..3e70de85 --- /dev/null +++ b/qa/standalone/mon/misc.sh @@ -0,0 +1,262 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2014 Cloudwatt +# Copyright (C) 2014, 2015 Red Hat +# +# Author: Loic Dachary +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Library Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library Public License for more details. +# +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh + +function run() { + local dir=$1 + shift + + export CEPH_MON="127.0.0.1:7102" # git grep '\<7102\>' : there must be only one + export CEPH_ARGS + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " + CEPH_ARGS+="--mon-host=$CEPH_MON " + + local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} + for func in $funcs ; do + $func $dir || return 1 + done +} + +TEST_POOL=rbd + +function TEST_osd_pool_get_set() { + local dir=$1 + + setup $dir || return 1 + run_mon $dir a || return 1 + create_pool $TEST_POOL 8 + + local flag + for flag in nodelete nopgchange nosizechange write_fadvise_dontneed noscrub nodeep-scrub; do + ceph osd pool set $TEST_POOL $flag 0 || return 1 + ! ceph osd dump | grep 'pool ' | grep $flag || return 1 + ceph osd pool set $TEST_POOL $flag 1 || return 1 + ceph osd dump | grep 'pool ' | grep $flag || return 1 + ceph osd pool set $TEST_POOL $flag false || return 1 + ! ceph osd dump | grep 'pool ' | grep $flag || return 1 + ceph osd pool set $TEST_POOL $flag false || return 1 + # check that setting false twice does not toggle to true (bug) + ! ceph osd dump | grep 'pool ' | grep $flag || return 1 + ceph osd pool set $TEST_POOL $flag true || return 1 + ceph osd dump | grep 'pool ' | grep $flag || return 1 + # cleanup + ceph osd pool set $TEST_POOL $flag 0 || return 1 + done + + local size=$(ceph osd pool get $TEST_POOL size|awk '{print $2}') + local min_size=$(ceph osd pool get $TEST_POOL min_size|awk '{print $2}') + local expected_min_size=$(expr $size - $size / 2) + if [ $min_size -ne $expected_min_size ]; then + echo "default min_size is wrong: expected $expected_min_size, got $min_size" + return 1 + fi + + ceph osd pool set $TEST_POOL scrub_min_interval 123456 || return 1 + ceph osd dump | grep 'pool ' | grep 'scrub_min_interval 123456' || return 1 + ceph osd pool set $TEST_POOL scrub_min_interval 0 || return 1 + ceph osd dump | grep 'pool ' | grep 'scrub_min_interval' && return 1 + ceph osd pool set $TEST_POOL scrub_max_interval 123456 || return 1 + ceph osd dump | grep 'pool ' | grep 'scrub_max_interval 123456' || return 1 + ceph osd pool set $TEST_POOL scrub_max_interval 0 || return 1 + ceph osd dump | grep 'pool ' | grep 'scrub_max_interval' && return 1 + ceph osd pool set $TEST_POOL deep_scrub_interval 123456 || return 1 + ceph osd dump | grep 'pool ' | grep 'deep_scrub_interval 123456' || return 1 + ceph osd pool set $TEST_POOL deep_scrub_interval 0 || return 1 + ceph osd dump | grep 'pool ' | grep 'deep_scrub_interval' && return 1 + + #replicated pool size restrict in 1 and 10 + ! ceph osd pool set $TEST_POOL 11 || return 1 + #replicated pool min_size must be between in 1 and size + ! ceph osd pool set $TEST_POOL min_size $(expr $size + 1) || return 1 + ! ceph osd pool set $TEST_POOL min_size 0 || return 1 + + local ecpool=erasepool + create_pool $ecpool 12 12 erasure default || return 1 + #erasue pool size=k+m, min_size=k + local size=$(ceph osd pool get $ecpool size|awk '{print $2}') + local min_size=$(ceph osd pool get $ecpool min_size|awk '{print $2}') + local k=$(expr $min_size - 1) # default min_size=k+1 + #erasure pool size can't change + ! ceph osd pool set $ecpool size $(expr $size + 1) || return 1 + #erasure pool min_size must be between in k and size + ceph osd pool set $ecpool min_size $(expr $k + 1) || return 1 + ! ceph osd pool set $ecpool min_size $(expr $k - 1) || return 1 + ! ceph osd pool set $ecpool min_size $(expr $size + 1) || return 1 + + teardown $dir || return 1 +} + +function TEST_mon_add_to_single_mon() { + local dir=$1 + + fsid=$(uuidgen) + MONA=127.0.0.1:7117 # git grep '\<7117\>' : there must be only one + MONB=127.0.0.1:7118 # git grep '\<7118\>' : there must be only one + CEPH_ARGS_orig=$CEPH_ARGS + CEPH_ARGS="--fsid=$fsid --auth-supported=none " + CEPH_ARGS+="--mon-initial-members=a " + CEPH_ARGS+="--mon-host=$MONA " + + setup $dir || return 1 + run_mon $dir a --public-addr $MONA || return 1 + # wait for the quorum + timeout 120 ceph -s > /dev/null || return 1 + run_mon $dir b --public-addr $MONB || return 1 + teardown $dir || return 1 + + setup $dir || return 1 + run_mon $dir a --public-addr $MONA || return 1 + # without the fix of #5454, mon.a will assert failure at seeing the MMonJoin + # from mon.b + run_mon $dir b --public-addr $MONB || return 1 + # make sure mon.b get's it's join request in first, then + sleep 2 + # wait for the quorum + timeout 120 ceph -s > /dev/null || return 1 + ceph mon dump + ceph mon dump -f json-pretty + local num_mons + num_mons=$(ceph mon dump --format=json 2>/dev/null | jq ".mons | length") || return 1 + [ $num_mons == 2 ] || return 1 + # no reason to take more than 120 secs to get this submitted + timeout 120 ceph mon add b $MONB || return 1 + teardown $dir || return 1 +} + +function TEST_no_segfault_for_bad_keyring() { + local dir=$1 + setup $dir || return 1 + # create a client.admin key and add it to ceph.mon.keyring + ceph-authtool --create-keyring $dir/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *' + ceph-authtool --create-keyring $dir/ceph.client.admin.keyring --gen-key -n client.admin --cap mon 'allow *' + ceph-authtool $dir/ceph.mon.keyring --import-keyring $dir/ceph.client.admin.keyring + CEPH_ARGS_TMP="--fsid=$(uuidgen) --mon-host=127.0.0.1:7102 --auth-supported=cephx " + CEPH_ARGS_orig=$CEPH_ARGS + CEPH_ARGS="$CEPH_ARGS_TMP --keyring=$dir/ceph.mon.keyring " + run_mon $dir a + # create a bad keyring and make sure no segfault occurs when using the bad keyring + echo -e "[client.admin]\nkey = BQAUlgtWoFePIxAAQ9YLzJSVgJX5V1lh5gyctg==" > $dir/bad.keyring + CEPH_ARGS="$CEPH_ARGS_TMP --keyring=$dir/bad.keyring" + ceph osd dump 2> /dev/null + # 139(11|128) means segfault and core dumped + [ $? -eq 139 ] && return 1 + CEPH_ARGS=$CEPH_ARGS_orig + teardown $dir || return 1 +} + +function TEST_mon_features() { + local dir=$1 + setup $dir || return 1 + + fsid=$(uuidgen) + MONA=127.0.0.1:7127 # git grep '\<7127\>' ; there must be only one + MONB=127.0.0.1:7128 # git grep '\<7128\>' ; there must be only one + MONC=127.0.0.1:7129 # git grep '\<7129\>' ; there must be only one + CEPH_ARGS_orig=$CEPH_ARGS + CEPH_ARGS="--fsid=$fsid --auth-supported=none " + CEPH_ARGS+="--mon-initial-members=a,b,c " + CEPH_ARGS+="--mon-host=$MONA,$MONB,$MONC " + CEPH_ARGS+="--mon-debug-no-initial-persistent-features " + CEPH_ARGS+="--mon-debug-no-require-nautilus " + + run_mon $dir a --public-addr $MONA || return 1 + run_mon $dir b --public-addr $MONB || return 1 + timeout 120 ceph -s > /dev/null || return 1 + + # expect monmap to contain 3 monitors (a, b, and c) + jqinput="$(ceph mon_status --format=json 2>/dev/null)" + jq_success "$jqinput" '.monmap.mons | length == 3' || return 1 + # quorum contains two monitors + jq_success "$jqinput" '.quorum | length == 2' || return 1 + # quorum's monitor features contain kraken, luminous, mimic, and nautilus + jqfilter='.features.quorum_mon[]|select(. == "kraken")' + jq_success "$jqinput" "$jqfilter" "kraken" || return 1 + jqfilter='.features.quorum_mon[]|select(. == "luminous")' + jq_success "$jqinput" "$jqfilter" "luminous" || return 1 + jqfilter='.features.quorum_mon[]|select(. == "mimic")' + jq_success "$jqinput" "$jqfilter" "mimic" || return 1 + jqfilter='.features.quorum_mon[]|select(. == "nautilus")' + jq_success "$jqinput" "$jqfilter" "nautilus" || return 1 + + # monmap must have no persistent features set, because we + # don't currently have a quorum made out of all the monitors + # in the monmap. + jqfilter='.monmap.features.persistent | length == 0' + jq_success "$jqinput" "$jqfilter" || return 1 + + # nor do we have any optional features, for that matter. + jqfilter='.monmap.features.optional | length == 0' + jq_success "$jqinput" "$jqfilter" || return 1 + + # validate 'mon feature ls' + + jqinput="$(ceph mon feature ls --format=json 2>/dev/null)" + # k l m n are supported + jqfilter='.all.supported[] | select(. == "kraken")' + jq_success "$jqinput" "$jqfilter" "kraken" || return 1 + jqfilter='.all.supported[] | select(. == "luminous")' + jq_success "$jqinput" "$jqfilter" "luminous" || return 1 + jqfilter='.all.supported[] | select(. == "mimic")' + jq_success "$jqinput" "$jqfilter" "mimic" || return 1 + jqfilter='.all.supported[] | select(. == "nautilus")' + jq_success "$jqinput" "$jqfilter" "nautilus" || return 1 + + # start third monitor + run_mon $dir c --public-addr $MONC || return 1 + + wait_for_quorum 300 3 || return 1 + + timeout 300 ceph -s > /dev/null || return 1 + + jqinput="$(ceph mon_status --format=json 2>/dev/null)" + # expect quorum to have all three monitors + jqfilter='.quorum | length == 3' + jq_success "$jqinput" "$jqfilter" || return 1 + # quorum's monitor features contain k and l and m + jqfilter='.features.quorum_mon[]|select(. == "kraken")' + jq_success "$jqinput" "$jqfilter" "kraken" || return 1 + jqfilter='.features.quorum_mon[]|select(. == "luminous")' + jq_success "$jqinput" "$jqfilter" "luminous" || return 1 + jqfilter='.features.quorum_mon[]|select(. == "mimic")' + jq_success "$jqinput" "$jqfilter" "mimic" || return 1 + + # monmap must have not all k l m persistent + # features set. + jqfilter='.monmap.features.persistent | length == 5' + jq_success "$jqinput" "$jqfilter" || return 1 + jqfilter='.monmap.features.persistent[]|select(. == "kraken")' + jq_success "$jqinput" "$jqfilter" "kraken" || return 1 + jqfilter='.monmap.features.persistent[]|select(. == "luminous")' + jq_success "$jqinput" "$jqfilter" "luminous" || return 1 + jqfilter='.monmap.features.persistent[]|select(. == "mimic")' + jq_success "$jqinput" "$jqfilter" "mimic" || return 1 + jqfilter='.monmap.features.persistent[]|select(. == "osdmap-prune")' + jq_success "$jqinput" "$jqfilter" "osdmap-prune" || return 1 + jqfilter='.monmap.features.persistent[]|select(. == "nautilus")' + jq_success "$jqinput" "$jqfilter" "nautilus" || return 1 + + CEPH_ARGS=$CEPH_ARGS_orig + # that's all folks. thank you for tuning in. + teardown $dir || return 1 +} + +main misc "$@" + +# Local Variables: +# compile-command: "cd ../.. ; make -j4 && test/mon/misc.sh" +# End: diff --git a/qa/standalone/mon/mkfs.sh b/qa/standalone/mon/mkfs.sh new file mode 100755 index 00000000..6650bdb4 --- /dev/null +++ b/qa/standalone/mon/mkfs.sh @@ -0,0 +1,193 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2013 Cloudwatt +# Copyright (C) 2014 Red Hat +# +# Author: Loic Dachary +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Library Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library Public License for more details. +# +set -xe +PS4='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: ' + + +DIR=mkfs +export CEPH_CONF=/dev/null +unset CEPH_ARGS +MON_ID=a +MON_DIR=$DIR/$MON_ID +CEPH_MON=127.0.0.1:7110 # git grep '\<7110\>' : there must be only one +TIMEOUT=360 + +EXTRAOPTS="" + +function setup() { + teardown + mkdir $DIR +} + +function teardown() { + kill_daemons + rm -fr $DIR +} + +function mon_mkfs() { + local fsid=$(uuidgen) + + ceph-mon \ + --id $MON_ID \ + --fsid $fsid \ + $EXTRAOPTS \ + --mkfs \ + --mon-data=$MON_DIR \ + --mon-initial-members=$MON_ID \ + --mon-host=$CEPH_MON \ + "$@" +} + +function mon_run() { + ceph-mon \ + --id $MON_ID \ + --chdir= \ + --mon-osd-full-ratio=.99 \ + --mon-data-avail-crit=1 \ + $EXTRAOPTS \ + --mon-data=$MON_DIR \ + --log-file=$MON_DIR/log \ + --mon-cluster-log-file=$MON_DIR/log \ + --run-dir=$MON_DIR \ + --pid-file=$MON_DIR/pidfile \ + --public-addr $CEPH_MON \ + "$@" +} + +function kill_daemons() { + for pidfile in $(find $DIR -name pidfile) ; do + pid=$(cat $pidfile) + for try in 0 1 1 1 2 3 ; do + kill $pid || break + sleep $try + done + done +} + +function auth_none() { + mon_mkfs --auth-supported=none + + ceph-mon \ + --id $MON_ID \ + --mon-osd-full-ratio=.99 \ + --mon-data-avail-crit=1 \ + $EXTRAOPTS \ + --mon-data=$MON_DIR \ + --extract-monmap $MON_DIR/monmap + + [ -f $MON_DIR/monmap ] || return 1 + + [ ! -f $MON_DIR/keyring ] || return 1 + + mon_run --auth-supported=none + + timeout $TIMEOUT ceph --mon-host $CEPH_MON mon stat || return 1 +} + +function auth_cephx_keyring() { + cat > $DIR/keyring <&1 | tee $DIR/makedir.log + grep 'toodeep.*No such file' $DIR/makedir.log > /dev/null + rm $DIR/makedir.log + + # an empty directory does not mean the mon exists + mkdir $MON_DIR + mon_mkfs --auth-supported=none 2>&1 | tee $DIR/makedir.log + ! grep "$MON_DIR already exists" $DIR/makedir.log || return 1 +} + +function idempotent() { + mon_mkfs --auth-supported=none + mon_mkfs --auth-supported=none 2>&1 | tee $DIR/makedir.log + grep "'$MON_DIR' already exists" $DIR/makedir.log > /dev/null || return 1 +} + +function run() { + local actions + actions+="makedir " + actions+="idempotent " + actions+="auth_cephx_key " + actions+="auth_cephx_keyring " + actions+="auth_none " + for action in $actions ; do + setup + $action || return 1 + teardown + done +} + +run + +# Local Variables: +# compile-command: "cd ../.. ; make TESTS=test/mon/mkfs.sh check" +# End: diff --git a/qa/standalone/mon/mon-bind.sh b/qa/standalone/mon/mon-bind.sh new file mode 100755 index 00000000..49b0079a --- /dev/null +++ b/qa/standalone/mon/mon-bind.sh @@ -0,0 +1,147 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2017 Quantum Corp. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Library Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library Public License for more details. +# +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh + +SOCAT_PIDS=() + +function port_forward() { + local source_port=$1 + local target_port=$2 + + socat TCP-LISTEN:${source_port},fork,reuseaddr TCP:localhost:${target_port} & + SOCAT_PIDS+=( $! ) +} + +function cleanup() { + for p in "${SOCAT_PIDS[@]}"; do + kill $p + done + SOCAT_PIDS=() +} + +trap cleanup SIGTERM SIGKILL SIGQUIT SIGINT + +function run() { + local dir=$1 + shift + + export MON_IP=127.0.0.1 + export MONA_PUBLIC=7132 # git grep '\<7132\>' ; there must be only one + export MONB_PUBLIC=7133 # git grep '\<7133\>' ; there must be only one + export MONC_PUBLIC=7134 # git grep '\<7134\>' ; there must be only one + export MONA_BIND=7135 # git grep '\<7135\>' ; there must be only one + export MONB_BIND=7136 # git grep '\<7136\>' ; there must be only one + export MONC_BIND=7137 # git grep '\<7137\>' ; there must be only one + export CEPH_ARGS + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " + + local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} + for func in $funcs ; do + setup $dir || return 1 + $func $dir && cleanup || { cleanup; return 1; } + teardown $dir + done +} + +function TEST_mon_client_connect_fails() { + local dir=$1 + + # start the mon with a public-bind-addr that is different + # from the public-addr. + CEPH_ARGS+="--mon-initial-members=a " + CEPH_ARGS+="--mon-host=${MON_IP}:${MONA_PUBLIC} " + run_mon $dir a --mon-host=${MON_IP}:${MONA_PUBLIC} --public-bind-addr=${MON_IP}:${MONA_BIND} || return 1 + + # now attempt to ping it that should fail. + timeout 3 ceph ping mon.a || return 0 + return 1 +} + +function TEST_mon_client_connect() { + local dir=$1 + + # start the mon with a public-bind-addr that is different + # from the public-addr. + CEPH_ARGS+="--mon-initial-members=a " + CEPH_ARGS+="--mon-host=${MON_IP}:${MONA_PUBLIC} " + run_mon $dir a --mon-host=${MON_IP}:${MONA_PUBLIC} --public-bind-addr=${MON_IP}:${MONA_BIND} || return 1 + + # now forward the public port to the bind port. + port_forward ${MONA_PUBLIC} ${MONA_BIND} + + # attempt to connect. we expect that to work + ceph ping mon.a || return 1 +} + +function TEST_mon_quorum() { + local dir=$1 + + # start the mon with a public-bind-addr that is different + # from the public-addr. + CEPH_ARGS+="--mon-initial-members=a,b,c " + CEPH_ARGS+="--mon-host=${MON_IP}:${MONA_PUBLIC},${MON_IP}:${MONB_PUBLIC},${MON_IP}:${MONC_PUBLIC} " + run_mon $dir a --public-addr=${MON_IP}:${MONA_PUBLIC} --public-bind-addr=${MON_IP}:${MONA_BIND} || return 1 + run_mon $dir b --public-addr=${MON_IP}:${MONB_PUBLIC} --public-bind-addr=${MON_IP}:${MONB_BIND} || return 1 + run_mon $dir c --public-addr=${MON_IP}:${MONC_PUBLIC} --public-bind-addr=${MON_IP}:${MONC_BIND} || return 1 + + # now forward the public port to the bind port. + port_forward ${MONA_PUBLIC} ${MONA_BIND} + port_forward ${MONB_PUBLIC} ${MONB_BIND} + port_forward ${MONC_PUBLIC} ${MONC_BIND} + + # expect monmap to contain 3 monitors (a, b, and c) + jqinput="$(ceph mon_status --format=json 2>/dev/null)" + jq_success "$jqinput" '.monmap.mons | length == 3' || return 1 + + # quorum should form + wait_for_quorum 300 3 || return 1 + # expect quorum to have all three monitors + jqfilter='.quorum | length == 3' + jq_success "$jqinput" "$jqfilter" || return 1 +} + +function TEST_put_get() { + local dir=$1 + + # start the mon with a public-bind-addr that is different + # from the public-addr. + CEPH_ARGS+="--mon-initial-members=a,b,c " + CEPH_ARGS+="--mon-host=${MON_IP}:${MONA_PUBLIC},${MON_IP}:${MONB_PUBLIC},${MON_IP}:${MONC_PUBLIC} " + run_mon $dir a --public-addr=${MON_IP}:${MONA_PUBLIC} --public-bind-addr=${MON_IP}:${MONA_BIND} || return 1 + run_mon $dir b --public-addr=${MON_IP}:${MONB_PUBLIC} --public-bind-addr=${MON_IP}:${MONB_BIND} || return 1 + run_mon $dir c --public-addr=${MON_IP}:${MONC_PUBLIC} --public-bind-addr=${MON_IP}:${MONC_BIND} || return 1 + + # now forward the public port to the bind port. + port_forward ${MONA_PUBLIC} ${MONA_BIND} + port_forward ${MONB_PUBLIC} ${MONB_BIND} + port_forward ${MONC_PUBLIC} ${MONC_BIND} + + # quorum should form + wait_for_quorum 300 3 || return 1 + + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + run_osd $dir 1 || return 1 + run_osd $dir 2 || return 1 + + create_pool hello 8 || return 1 + + echo "hello world" > $dir/hello + rados --pool hello put foo $dir/hello || return 1 + rados --pool hello get foo $dir/hello2 || return 1 + diff $dir/hello $dir/hello2 || return 1 +} + +main mon-bind "$@" diff --git a/qa/standalone/mon/mon-created-time.sh b/qa/standalone/mon/mon-created-time.sh new file mode 100755 index 00000000..4b844605 --- /dev/null +++ b/qa/standalone/mon/mon-created-time.sh @@ -0,0 +1,54 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2015 SUSE LINUX GmbH +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Library Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library Public License for more details. +# +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh + +function run() { + local dir=$1 + shift + + export CEPH_MON="127.0.0.1:7125" # git grep '\<7125\>' : there must be only one + export CEPH_ARGS + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " + CEPH_ARGS+="--mon-host=$CEPH_MON " + + local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} + for func in $funcs ; do + setup $dir || return 1 + $func $dir || return 1 + teardown $dir || return 1 + done +} + +function TEST_mon_created_time() { + local dir=$1 + + run_mon $dir a || return 1 + + ceph mon dump || return 1 + + if test "$(ceph mon dump 2>/dev/null | sed -n '/created/p' | awk '{print $NF}')"x = ""x ; then + return 1 + fi + + if test "$(ceph mon dump 2>/dev/null | sed -n '/created/p' | awk '{print $NF}')"x = "0.000000"x ; then + return 1 + fi +} + +main mon-created-time "$@" + +# Local Variables: +# compile-command: "cd ../.. ; make -j4 && test/mon/mon-created-time.sh" +# End: diff --git a/qa/standalone/mon/mon-handle-forward.sh b/qa/standalone/mon/mon-handle-forward.sh new file mode 100755 index 00000000..8633959d --- /dev/null +++ b/qa/standalone/mon/mon-handle-forward.sh @@ -0,0 +1,64 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2013 Cloudwatt +# Copyright (C) 2014,2015 Red Hat +# +# Author: Loic Dachary +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Library Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library Public License for more details. +# +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh + +function run() { + local dir=$1 + + setup $dir || return 1 + + MONA=127.0.0.1:7300 + MONB=127.0.0.1:7301 + ( + FSID=$(uuidgen) + export CEPH_ARGS + CEPH_ARGS+="--fsid=$FSID --auth-supported=none " + CEPH_ARGS+="--mon-initial-members=a,b --mon-host=$MONA,$MONB " + run_mon $dir a --public-addr $MONA || return 1 + run_mon $dir b --public-addr $MONB || return 1 + ) + + timeout 360 ceph --mon-host-override $MONA mon stat || return 1 + # check that MONB is indeed a peon + ceph --admin-daemon $(get_asok_path mon.b) mon_status | + grep '"peon"' || return 1 + # when the leader ( MONA ) is used, there is no message forwarding + ceph --mon-host-override $MONA osd pool create POOL1 12 + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path mon.a) log flush || return 1 + grep 'mon_command(.*"POOL1"' $dir/mon.a.log || return 1 + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path mon.b) log flush || return 1 + grep 'mon_command(.*"POOL1"' $dir/mon.b.log && return 1 + # when the peon ( MONB ) is used, the message is forwarded to the leader + ceph --mon-host-override $MONB osd pool create POOL2 12 + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path mon.b) log flush || return 1 + grep 'forward_request.*mon_command(.*"POOL2"' $dir/mon.b.log || return 1 + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path mon.a) log flush || return 1 + grep ' forward(mon_command(.*"POOL2"' $dir/mon.a.log || return 1 + # forwarded messages must retain features from the original connection + features=$(sed -n -e 's|.*127.0.0.1:0.*accept features \([0-9][0-9]*\)|\1|p' < \ + $dir/mon.b.log) + grep ' forward(mon_command(.*"POOL2".*con_features '$features $dir/mon.a.log || return 1 + + teardown $dir || return 1 +} + +main mon-handle-forward "$@" + +# Local Variables: +# compile-command: "cd ../.. ; make -j4 TESTS=test/mon/mon-handle-forward.sh check" +# End: diff --git a/qa/standalone/mon/mon-last-epoch-clean.sh b/qa/standalone/mon/mon-last-epoch-clean.sh new file mode 100755 index 00000000..172642e8 --- /dev/null +++ b/qa/standalone/mon/mon-last-epoch-clean.sh @@ -0,0 +1,307 @@ +#!/usr/bin/env bash + +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh + + +function run() { + local dir=$1 + shift + + export CEPH_MON="127.0.0.1:7302" # git grep '\<7105\>' : there must be only one + export CEPH_ARGS + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " + CEPH_ARGS+="--mon-host=$CEPH_MON " + + local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} + for func in $funcs ; do + setup $dir || return 1 + $func $dir || return 1 + teardown $dir || return 1 + done +} + + +function check_lec_equals_pools() { + + local pool_id=$1 + + report=$(ceph report) + lec=$(echo $report | \ + jq '.osdmap_clean_epochs.min_last_epoch_clean') + + if [[ -z "$pool_id" ]]; then + pools=($(echo $report | \ + jq \ + ".osdmap_clean_epochs.last_epoch_clean.per_pool[] |" \ + " select(.floor == $lec) | .poolid")) + + [[ ${#pools[*]} -eq 2 ]] || ( echo $report ; return 1 ) + else + floor=($(echo $report | \ + jq \ + ".osdmap_clean_epochs.last_epoch_clean.per_pool[] |" \ + " select(.poolid == $pool_id) | .floor")) + + [[ $lec -eq $floor ]] || ( echo $report ; return 1 ) + fi + return 0 +} + +function check_lec_lower_than_pool() { + + local pool_id=$1 + [[ -z "$pool_id" ]] && ( echo "expected pool_id as parameter" ; exit 1 ) + + report=$(ceph report) + lec=$(echo $report | \ + jq '.osdmap_clean_epochs.min_last_epoch_clean') + + floor=($(echo $report | \ + jq \ + ".osdmap_clean_epochs.last_epoch_clean.per_pool[] |" \ + " select(.poolid == $pool_id) | .floor")) + + [[ $lec -lt $floor ]] || ( echo $report ; return 1 ) + return 0 +} + +function check_floor_pool_greater_than_pool() { + + local pool_a=$1 + local pool_b=$1 + [[ -z "$pool_a" ]] && ( echo "expected id as first parameter" ; exit 1 ) + [[ -z "$pool_b" ]] && ( echo "expected id as second parameter" ; exit 1 ) + + report=$(ceph report) + + floor_a=($(echo $report | \ + jq \ + ".osdmap_clean_epochs.last_epoch_clean.per_pool[] |" \ + " select(.poolid == $pool_a) | .floor")) + + floor_b=($(echo $report | \ + jq \ + ".osdmap_clean_epochs.last_epoch_clean.per_pool[] |" \ + " select(.poolid == $pool_b) | .floor")) + + [[ $floor_a -gt $floor_b ]] || ( echo $report ; return 1 ) + return 0 +} + +function check_lec_honours_osd() { + + local osd=$1 + + report=$(ceph report) + lec=$(echo $report | \ + jq '.osdmap_clean_epochs.min_last_epoch_clean') + + if [[ -z "$osd" ]]; then + osds=($(echo $report | \ + jq \ + ".osdmap_clean_epochs.osd_epochs[] |" \ + " select(.epoch >= $lec) | .id")) + + [[ ${#osds[*]} -eq 3 ]] || ( echo $report ; return 1 ) + else + epoch=($(echo $report | \ + jq \ + ".osdmap_clean_epochs.osd_epochs[] |" \ + " select(.id == $id) | .epoch")) + [[ ${#epoch[*]} -eq 1 ]] || ( echo $report ; return 1 ) + [[ ${epoch[0]} -ge $lec ]] || ( echo $report ; return 1 ) + fi + + return 0 +} + +function validate_fc() { + report=$(ceph report) + lec=$(echo $report | \ + jq '.osdmap_clean_epochs.min_last_epoch_clean') + osdm_fc=$(echo $report | \ + jq '.osdmap_first_committed') + + [[ $lec -eq $osdm_fc ]] || ( echo $report ; return 1 ) + return 0 +} + +function get_fc_lc_diff() { + report=$(ceph report) + osdm_fc=$(echo $report | \ + jq '.osdmap_first_committed') + osdm_lc=$(echo $report | \ + jq '.osdmap_last_committed') + + echo $((osdm_lc - osdm_fc)) +} + +function get_pool_id() { + + local pn=$1 + [[ -z "$pn" ]] && ( echo "expected pool name as argument" ; exit 1 ) + + report=$(ceph report) + pool_id=$(echo $report | \ + jq ".osdmap.pools[] | select(.pool_name == \"$pn\") | .pool") + + [[ $pool_id -ge 0 ]] || \ + ( echo "unexpected pool id for pool \'$pn\': $pool_id" ; return -1 ) + + echo $pool_id + return 0 +} + +function wait_for_total_num_maps() { + # rip wait_for_health, becaue it's easier than deduplicating the code + local -a delays=($(get_timeout_delays $TIMEOUT .1)) + local -i loop=0 + local -i v_diff=$1 + + while [[ $(get_fc_lc_diff) -gt $v_diff ]]; do + if (( $loop >= ${#delays[*]} )) ; then + echo "maps were not trimmed" + return 1 + fi + sleep ${delays[$loop]} + loop+=1 + done +} + +function TEST_mon_last_clean_epoch() { + + local dir=$1 + + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + run_osd $dir 1 || return 1 + run_osd $dir 2 || return 1 + osd_pid=$(cat $dir/osd.2.pid) + + sleep 5 + + ceph tell 'osd.*' injectargs '--osd-beacon-report-interval 10' || exit 1 + ceph tell 'mon.*' injectargs \ + '--mon-min-osdmap-epochs 2 --paxos-service-trim-min 1' || exit 1 + + create_pool foo 32 + create_pool bar 32 + + foo_id=$(get_pool_id "foo") + bar_id=$(get_pool_id "bar") + + [[ $foo_id -lt 0 ]] && ( echo "couldn't find pool 'foo' id" ; exit 1 ) + [[ $bar_id -lt 0 ]] && ( echo "couldn't find pool 'bar' id" ; exit 1 ) + + # no real clue why we are getting these warnings, but let's make them go + # away so we can be happy. + + ceph osd set-full-ratio 0.97 + ceph osd set-backfillfull-ratio 0.97 + + wait_for_health_ok || exit 1 + + pre_map_diff=$(get_fc_lc_diff) + wait_for_total_num_maps 2 + post_map_diff=$(get_fc_lc_diff) + + [[ $post_map_diff -le $pre_map_diff ]] || exit 1 + + pre_map_diff=$post_map_diff + + ceph osd pool set foo size 3 + ceph osd pool set bar size 3 + + wait_for_health_ok || exit 1 + + check_lec_equals_pools || exit 1 + check_lec_honours_osd || exit 1 + validate_fc || exit 1 + + # down osd.2; expected result (because all pools' size equals 3): + # - number of committed maps increase over 2 + # - lec equals fc + # - lec equals osd.2's epoch + # - all pools have floor equal to lec + + while kill $osd_pid ; do sleep 1 ; done + ceph osd out 2 + sleep 5 # seriously, just to make sure things settle; we may not need this. + + # generate some maps + for ((i=0; i <= 10; ++i)); do + ceph osd set noup + sleep 1 + ceph osd unset noup + sleep 1 + done + + post_map_diff=$(get_fc_lc_diff) + [[ $post_map_diff -gt 2 ]] || exit 1 + + validate_fc || exit 1 + check_lec_equals_pools || exit 1 + check_lec_honours_osd 2 || exit 1 + + # adjust pool 'bar' size to 2; expect: + # - number of committed maps still over 2 + # - lec equals fc + # - lec equals pool 'foo' floor + # - pool 'bar' floor greater than pool 'foo' + + ceph osd pool set bar size 2 + + diff_ver=$(get_fc_lc_diff) + [[ $diff_ver -gt 2 ]] || exit 1 + + validate_fc || exit 1 + + check_lec_equals_pools $foo_id || exit 1 + check_lec_lower_than_pool $bar_id || exit 1 + + check_floor_pool_greater_than_pool $bar_id $foo_id || exit 1 + + # set pool 'foo' size to 2; expect: + # - health_ok + # - lec equals pools + # - number of committed maps decreases + # - lec equals fc + + pre_map_diff=$(get_fc_lc_diff) + + ceph osd pool set foo size 2 || exit 1 + wait_for_clean || exit 1 + + check_lec_equals_pools || exit 1 + validate_fc || exit 1 + + if ! wait_for_total_num_maps 2 ; then + post_map_diff=$(get_fc_lc_diff) + # number of maps is decreasing though, right? + [[ $post_map_diff -lt $pre_map_diff ]] || exit 1 + fi + + # bring back osd.2; expect: + # - health_ok + # - lec equals fc + # - number of committed maps equals 2 + # - all pools have floor equal to lec + + pre_map_diff=$(get_fc_lc_diff) + + activate_osd $dir 2 || exit 1 + wait_for_health_ok || exit 1 + validate_fc || exit 1 + check_lec_equals_pools || exit 1 + + if ! wait_for_total_num_maps 2 ; then + post_map_diff=$(get_fc_lc_diff) + # number of maps is decreasing though, right? + [[ $post_map_diff -lt $pre_map_diff ]] || exit 1 + fi + + return 0 +} + +main mon-last-clean-epoch "$@" diff --git a/qa/standalone/mon/mon-osdmap-prune.sh b/qa/standalone/mon/mon-osdmap-prune.sh new file mode 100755 index 00000000..f8f7876b --- /dev/null +++ b/qa/standalone/mon/mon-osdmap-prune.sh @@ -0,0 +1,57 @@ +#!/bin/bash + +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh + +base_test=$CEPH_ROOT/qa/workunits/mon/test_mon_osdmap_prune.sh + +function run() { + + local dir=$1 + shift + + export CEPH_MON="127.0.0.1:7115" + export CEPH_ARGS + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none --mon-host=$CEPH_MON " + + local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} + for func in $funcs; do + setup $dir || return 1 + $func $dir || return 1 + teardown $dir || return 1 + done +} + +function TEST_osdmap_prune() { + + local dir=$1 + + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + run_osd $dir 1 || return 1 + run_osd $dir 2 || return 1 + + sleep 5 + + # we are getting OSD_OUT_OF_ORDER_FULL health errors, and it's not clear + # why. so, to make the health checks happy, mask those errors. + ceph osd set-full-ratio 0.97 + ceph osd set-backfillfull-ratio 0.97 + + ceph config set osd osd_beacon_report_interval 10 || return 1 + ceph config set mon mon_debug_extra_checks true || return 1 + + ceph config set mon mon_min_osdmap_epochs 100 || return 1 + ceph config set mon mon_osdmap_full_prune_enabled true || return 1 + ceph config set mon mon_osdmap_full_prune_min 200 || return 1 + ceph config set mon mon_osdmap_full_prune_interval 10 || return 1 + ceph config set mon mon_osdmap_full_prune_txsize 100 || return 1 + + + bash -x $base_test || return 1 + + return 0 +} + +main mon-osdmap-prune "$@" + diff --git a/qa/standalone/mon/mon-ping.sh b/qa/standalone/mon/mon-ping.sh new file mode 100755 index 00000000..1f5096be --- /dev/null +++ b/qa/standalone/mon/mon-ping.sh @@ -0,0 +1,46 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2015 SUSE LINUX GmbH +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Library Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library Public License for more details. +# +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh + +function run() { + local dir=$1 + shift + + export CEPH_MON="127.0.0.1:7119" # git grep '\<7119\>' : there must be only one + export CEPH_ARGS + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " + CEPH_ARGS+="--mon-host=$CEPH_MON " + + local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} + for func in $funcs ; do + setup $dir || return 1 + $func $dir || return 1 + teardown $dir || return 1 + done +} + +function TEST_mon_ping() { + local dir=$1 + + run_mon $dir a || return 1 + + ceph ping mon.a || return 1 +} + +main mon-ping "$@" + +# Local Variables: +# compile-command: "cd ../.. ; make -j4 && test/mon/mon-ping.sh" +# End: diff --git a/qa/standalone/mon/mon-scrub.sh b/qa/standalone/mon/mon-scrub.sh new file mode 100755 index 00000000..158bd434 --- /dev/null +++ b/qa/standalone/mon/mon-scrub.sh @@ -0,0 +1,49 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2014 Cloudwatt +# Copyright (C) 2014, 2015 Red Hat +# +# Author: Loic Dachary +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Library Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library Public License for more details. +# +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh + +function run() { + local dir=$1 + shift + + export CEPH_MON="127.0.0.1:7120" # git grep '\<7120\>' : there must be only one + export CEPH_ARGS + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " + CEPH_ARGS+="--mon-host=$CEPH_MON " + + local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} + for func in $funcs ; do + setup $dir || return 1 + $func $dir || return 1 + teardown $dir || return 1 + done +} + +function TEST_mon_scrub() { + local dir=$1 + + run_mon $dir a || return 1 + + ceph mon scrub || return 1 +} + +main mon-scrub "$@" + +# Local Variables: +# compile-command: "cd ../.. ; make -j4 && test/mon/mon-scrub.sh" +# End: diff --git a/qa/standalone/mon/mon-seesaw.sh b/qa/standalone/mon/mon-seesaw.sh new file mode 100755 index 00000000..1c97847b --- /dev/null +++ b/qa/standalone/mon/mon-seesaw.sh @@ -0,0 +1,72 @@ +#!/usr/bin/env bash + +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh + +function run() { + local dir=$1 + shift + + export CEPH_MON_A="127.0.0.1:7139" # git grep '\<7139\>' : there must be only one + export CEPH_MON_B="127.0.0.1:7141" # git grep '\<7141\>' : there must be only one + export CEPH_MON_C="127.0.0.1:7142" # git grep '\<7142\>' : there must be only one + export CEPH_ARGS + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " + + export BASE_CEPH_ARGS=$CEPH_ARGS + CEPH_ARGS+="--mon-host=$CEPH_MON_A " + + local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} + for func in $funcs ; do + setup $dir || return 1 + $func $dir || return 1 + teardown $dir || return 1 + done +} + +function TEST_mon_seesaw() { + local dir=$1 + + setup $dir || return + + # start with 1 mon + run_mon $dir aa --public-addr $CEPH_MON_A || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + run_osd $dir 1 || return 1 + run_osd $dir 2 || return 1 + + wait_for_quorum 300 1 || return 1 + + # add in a second + run_mon $dir bb --public-addr $CEPH_MON_B || return 1 + CEPH_ARGS="$BASE_CEPH_ARGS --mon-host=$CEPH_MON_A,$CEPH_MON_B" + wait_for_quorum 300 2 || return 1 + + # remove the first one + ceph mon rm aa || return 1 + CEPH_ARGS="$BASE_CEPH_ARGS --mon-host=$CEPH_MON_B" + sleep 5 + wait_for_quorum 300 1 || return 1 + + # do some stuff that requires the osds be able to communicate with the + # mons. (see http://tracker.ceph.com/issues/17558) + ceph osd pool create foo 8 + rados -p foo bench 1 write + wait_for_clean || return 1 + + # nuke monstore so that it will rejoin (otherwise we get + # "not in monmap and have been in a quorum before; must have been removed" + rm -rf $dir/aa + + # add a back in + # (use a different addr to avoid bind issues) + run_mon $dir aa --public-addr $CEPH_MON_C || return 1 + CEPH_ARGS="$BASE_CEPH_ARGS --mon-host=$CEPH_MON_C,$CEPH_MON_B" + wait_for_quorum 300 2 || return 1 +} + +main mon-seesaw "$@" + +# Local Variables: +# compile-command: "cd ../.. ; make -j4 && test/mon/mon-ping.sh" +# End: diff --git a/qa/standalone/mon/msgr-v2-transition.sh b/qa/standalone/mon/msgr-v2-transition.sh new file mode 100755 index 00000000..b489d431 --- /dev/null +++ b/qa/standalone/mon/msgr-v2-transition.sh @@ -0,0 +1,82 @@ +#!/usr/bin/env bash +# + +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh + +function run() { + local dir=$1 + shift + + export CEPH_MON_V1="v1:127.0.0.1:7148" # git grep '\<7148\>' : there must be only one + export CEPH_MON_V2="v2:127.0.0.1:7149" # git grep '\<7149\>' : there must be only one + export CEPH_ARGS + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " + + local funcs=${@:-$(set | ${SED} -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} + for func in $funcs ; do + setup $dir || return 1 + $func $dir || return 1 + teardown $dir || return 1 + done +} + +function TEST_mon_v1_osd_addrs() { + local dir=$1 + + export CEPH_ARGS="$CEPH_ARGS --mon-host=$CEPH_MON_V1 --mon-debug-no-require-nautilus" + run_mon $dir a || return 1 + + ceph mon dump | grep mon.a | grep $CEPH_MON_V1 + + run_osd $dir 0 || return 1 + wait_for_osd up 0 || return 1 + ceph osd dump | grep osd.0 | grep v1: || return 1 + ceph osd dump | grep osd.0 | grep v2: && return 1 + + ceph osd require-osd-release nautilus + + ceph osd down 0 + wait_for_osd up 0 || return 1 + + # public should be v1, cluster v2 + ceph osd dump | grep osd.0 | grep v1: || return 1 + ceph osd dump -f json | jq '.osds[0].public_addrs.addrvec[0]' | grep v1 || return 1 + ceph osd dump -f json | jq '.osds[0].cluster_addrs.addrvec[0]' | grep v2 || return 1 + + # enable v2 port on mon + ceph mon set-addrs a "[$CEPH_MON_V2,$CEPH_MON_V1]" + + ceph osd down 0 + wait_for_osd up 0 || return 1 + + # both public and cluster should be v2+v1 + ceph osd dump | grep osd.0 | grep v1: || return 1 + ceph osd dump -f json | jq '.osds[0].public_addrs.addrvec[0]' | grep v2 || return 1 + ceph osd dump -f json | jq '.osds[0].cluster_addrs.addrvec[0]' | grep v2 || return 1 +} + +function TEST_mon_v2v1_osd_addrs() { + local dir=$1 + + export CEPH_ARGS="$CEPH_ARGS --mon-host=[$CEPH_MON_V2,$CEPH_MON_V1] --mon-debug-no-require-nautilus" + run_mon $dir a || return 1 + + ceph mon dump | grep mon.a | grep $CEPH_MON_V1 + + run_osd $dir 0 || return 1 + wait_for_osd up 0 || return 1 + ceph osd dump | grep osd.0 | grep v1: || return 1 + ceph osd dump | grep osd.0 | grep v2: && return 1 + + ceph osd require-osd-release nautilus + + ceph osd down 0 + wait_for_osd up 0 || return 1 + + # both public and cluster should be v2+v1 + ceph osd dump | grep osd.0 | grep v1: || return 1 + ceph osd dump -f json | jq '.osds[0].public_addrs.addrvec[0]' | grep v2 || return 1 + ceph osd dump -f json | jq '.osds[0].cluster_addrs.addrvec[0]' | grep v2 || return 1 +} + +main msgr-v2-transition "$@" diff --git a/qa/standalone/mon/osd-crush.sh b/qa/standalone/mon/osd-crush.sh new file mode 100755 index 00000000..6b0f95dc --- /dev/null +++ b/qa/standalone/mon/osd-crush.sh @@ -0,0 +1,239 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2014 Cloudwatt +# Copyright (C) 2014, 2015 Red Hat +# +# Author: Loic Dachary +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Library Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library Public License for more details. +# +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh + +function run() { + local dir=$1 + shift + + export CEPH_MON="127.0.0.1:7104" # git grep '\<7104\>' : there must be only one + export CEPH_ARGS + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " + CEPH_ARGS+="--mon-host=$CEPH_MON " + + local funcs=${@:-$(set | ${SED} -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} + for func in $funcs ; do + setup $dir || return 1 + $func $dir || return 1 + teardown $dir || return 1 + done +} + +function TEST_crush_rule_create_simple() { + local dir=$1 + + run_mon $dir a || return 1 + + ceph --format xml osd crush rule dump replicated_rule | \ + egrep 'take[^<]+default' | \ + grep 'choose_firstn0osd' || return 1 + local ruleset=ruleset0 + local root=host1 + ceph osd crush add-bucket $root host + local failure_domain=osd + ceph osd crush rule create-simple $ruleset $root $failure_domain || return 1 + ceph osd crush rule create-simple $ruleset $root $failure_domain 2>&1 | \ + grep "$ruleset already exists" || return 1 + ceph --format xml osd crush rule dump $ruleset | \ + egrep 'take[^<]+'$root'' | \ + grep 'choose_firstn0'$failure_domain'' || return 1 + ceph osd crush rule rm $ruleset || return 1 +} + +function TEST_crush_rule_dump() { + local dir=$1 + + run_mon $dir a || return 1 + + local ruleset=ruleset1 + ceph osd crush rule create-erasure $ruleset || return 1 + test $(ceph --format json osd crush rule dump $ruleset | \ + jq ".rule_name == \"$ruleset\"") == true || return 1 + test $(ceph --format json osd crush rule dump | \ + jq "map(select(.rule_name == \"$ruleset\")) | length == 1") == true || return 1 + ! ceph osd crush rule dump non_existent_ruleset || return 1 + ceph osd crush rule rm $ruleset || return 1 +} + +function TEST_crush_rule_rm() { + local ruleset=erasure2 + + run_mon $dir a || return 1 + + ceph osd crush rule create-erasure $ruleset default || return 1 + ceph osd crush rule ls | grep $ruleset || return 1 + ceph osd crush rule rm $ruleset || return 1 + ! ceph osd crush rule ls | grep $ruleset || return 1 +} + +function TEST_crush_rule_create_erasure() { + local dir=$1 + + run_mon $dir a || return 1 + # should have at least one OSD + run_osd $dir 0 || return 1 + + local ruleset=ruleset3 + # + # create a new ruleset with the default profile, implicitly + # + ceph osd crush rule create-erasure $ruleset || return 1 + ceph osd crush rule create-erasure $ruleset 2>&1 | \ + grep "$ruleset already exists" || return 1 + ceph --format xml osd crush rule dump $ruleset | \ + egrep 'take[^<]+default' | \ + grep 'chooseleaf_indep0host' || return 1 + ceph osd crush rule rm $ruleset || return 1 + ! ceph osd crush rule ls | grep $ruleset || return 1 + # + # create a new ruleset with the default profile, explicitly + # + ceph osd crush rule create-erasure $ruleset default || return 1 + ceph osd crush rule ls | grep $ruleset || return 1 + ceph osd crush rule rm $ruleset || return 1 + ! ceph osd crush rule ls | grep $ruleset || return 1 + # + # create a new ruleset and the default profile, implicitly + # + ceph osd erasure-code-profile rm default || return 1 + ! ceph osd erasure-code-profile ls | grep default || return 1 + ceph osd crush rule create-erasure $ruleset || return 1 + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path mon.a) log flush || return 1 + grep 'profile set default' $dir/mon.a.log || return 1 + ceph osd erasure-code-profile ls | grep default || return 1 + ceph osd crush rule rm $ruleset || return 1 + ! ceph osd crush rule ls | grep $ruleset || return 1 +} + +function check_ruleset_id_match_rule_id() { + local rule_name=$1 + rule_id=`ceph osd crush rule dump $rule_name | grep "\"rule_id\":" | awk -F ":|," '{print int($2)}'` + ruleset_id=`ceph osd crush rule dump $rule_name | grep "\"ruleset\":"| awk -F ":|," '{print int($2)}'` + test $ruleset_id = $rule_id || return 1 +} + +function generate_manipulated_rules() { + local dir=$1 + ceph osd crush add-bucket $root host + ceph osd crush rule create-simple test_rule1 $root osd firstn || return 1 + ceph osd crush rule create-simple test_rule2 $root osd firstn || return 1 + ceph osd getcrushmap -o $dir/original_map + crushtool -d $dir/original_map -o $dir/decoded_original_map + #manipulate the rulesets , to make the rule_id != ruleset_id + ${SED} -i 's/ruleset 0/ruleset 3/' $dir/decoded_original_map + ${SED} -i 's/ruleset 2/ruleset 0/' $dir/decoded_original_map + ${SED} -i 's/ruleset 1/ruleset 2/' $dir/decoded_original_map + + crushtool -c $dir/decoded_original_map -o $dir/new_map + ceph osd setcrushmap -i $dir/new_map + + ceph osd crush rule dump +} + +function TEST_crush_ruleset_match_rule_when_creating() { + local dir=$1 + + run_mon $dir a || return 1 + + local root=host1 + + generate_manipulated_rules $dir + + ceph osd crush rule create-simple special_rule_simple $root osd firstn || return 1 + + ceph osd crush rule dump + #show special_rule_simple has same rule_id and ruleset_id + check_ruleset_id_match_rule_id special_rule_simple || return 1 +} + +function TEST_add_ruleset_failed() { + local dir=$1 + + run_mon $dir a || return 1 + + local root=host1 + + ceph osd crush add-bucket $root host + ceph osd crush rule create-simple test_rule1 $root osd firstn || return 1 + ceph osd crush rule create-simple test_rule2 $root osd firstn || return 1 + ceph osd getcrushmap > $dir/crushmap || return 1 + crushtool --decompile $dir/crushmap > $dir/crushmap.txt || return 1 + for i in $(seq 3 255) + do + cat <> $dir/crushmap.txt + crushtool --compile $dir/crushmap.txt -o $dir/crushmap || return 1 + ceph osd setcrushmap -i $dir/crushmap || return 1 + ceph osd crush rule create-simple test_rule_nospace $root osd firstn 2>&1 | grep "Error ENOSPC" || return 1 + +} + +function TEST_crush_rename_bucket() { + local dir=$1 + + run_mon $dir a || return 1 + + ceph osd crush add-bucket host1 host + ceph osd tree + ! ceph osd tree | grep host2 || return 1 + ceph osd crush rename-bucket host1 host2 || return 1 + ceph osd tree + ceph osd tree | grep host2 || return 1 + ceph osd crush rename-bucket host1 host2 || return 1 # idempotency + ceph osd crush rename-bucket nonexistent something 2>&1 | grep "Error ENOENT" || return 1 +} + +function TEST_crush_ls_node() { + local dir=$1 + run_mon $dir a || return 1 + ceph osd crush add-bucket default1 root + ceph osd crush add-bucket host1 host + ceph osd crush move host1 root=default1 + ceph osd crush ls default1 | grep host1 || return 1 + ceph osd crush ls default2 2>&1 | grep "Error ENOENT" || return 1 +} + +function TEST_crush_reject_empty() { + local dir=$1 + run_mon $dir a --osd_pool_default_size=1 || return 1 + # should have at least one OSD + run_osd $dir 0 || return 1 + create_rbd_pool || return 1 + + local empty_map=$dir/empty_map + :> $empty_map.txt + crushtool -c $empty_map.txt -o $empty_map.map || return 1 + expect_failure $dir "Error EINVAL" \ + ceph osd setcrushmap -i $empty_map.map || return 1 +} + +main osd-crush "$@" + +# Local Variables: +# compile-command: "cd ../.. ; make -j4 && test/mon/osd-crush.sh" +# End: diff --git a/qa/standalone/mon/osd-erasure-code-profile.sh b/qa/standalone/mon/osd-erasure-code-profile.sh new file mode 100755 index 00000000..8de64316 --- /dev/null +++ b/qa/standalone/mon/osd-erasure-code-profile.sh @@ -0,0 +1,240 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2014 Cloudwatt +# Copyright (C) 2014, 2015 Red Hat +# +# Author: Loic Dachary +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Library Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library Public License for more details. +# +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh + +function run() { + local dir=$1 + shift + + export CEPH_MON="127.0.0.1:7220" # git grep '\<7220\>' : there must be only one + export CEPH_ARGS + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " + CEPH_ARGS+="--mon-host=$CEPH_MON " + + local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} + for func in $funcs ; do + setup $dir || return 1 + $func $dir || return 1 + teardown $dir || return 1 + done +} + +function TEST_set() { + local dir=$1 + local id=$2 + + run_mon $dir a || return 1 + + local profile=myprofile + # + # no key=value pairs : use the default configuration + # + ceph osd erasure-code-profile set $profile 2>&1 || return 1 + ceph osd erasure-code-profile get $profile | \ + grep plugin=jerasure || return 1 + ceph osd erasure-code-profile rm $profile + # + # key=value pairs override the default + # + ceph osd erasure-code-profile set $profile \ + key=value plugin=isa || return 1 + ceph osd erasure-code-profile get $profile | \ + grep -e key=value -e plugin=isa || return 1 + # + # --force is required to override an existing profile + # + ! ceph osd erasure-code-profile set $profile > $dir/out 2>&1 || return 1 + grep 'will not override' $dir/out || return 1 + ceph osd erasure-code-profile set $profile key=other --force || return 1 + ceph osd erasure-code-profile get $profile | \ + grep key=other || return 1 + + ceph osd erasure-code-profile rm $profile # cleanup +} + +function TEST_ls() { + local dir=$1 + local id=$2 + + run_mon $dir a || return 1 + + local profile=myprofile + ! ceph osd erasure-code-profile ls | grep $profile || return 1 + ceph osd erasure-code-profile set $profile 2>&1 || return 1 + ceph osd erasure-code-profile ls | grep $profile || return 1 + ceph --format xml osd erasure-code-profile ls | \ + grep "$profile" || return 1 + + ceph osd erasure-code-profile rm $profile # cleanup +} + +function TEST_rm() { + local dir=$1 + local id=$2 + + run_mon $dir a || return 1 + + local profile=myprofile + ceph osd erasure-code-profile set $profile 2>&1 || return 1 + ceph osd erasure-code-profile ls | grep $profile || return 1 + ceph osd erasure-code-profile rm $profile || return 1 + ! ceph osd erasure-code-profile ls | grep $profile || return 1 + ceph osd erasure-code-profile rm WRONG 2>&1 | \ + grep "WRONG does not exist" || return 1 + + ceph osd erasure-code-profile set $profile || return 1 + create_pool poolname 12 12 erasure $profile || return 1 + ! ceph osd erasure-code-profile rm $profile > $dir/out 2>&1 || return 1 + grep "poolname.*using.*$profile" $dir/out || return 1 + ceph osd pool delete poolname poolname --yes-i-really-really-mean-it || return 1 + ceph osd erasure-code-profile rm $profile || return 1 + + ceph osd erasure-code-profile rm $profile # cleanup +} + +function TEST_get() { + local dir=$1 + local id=$2 + + run_mon $dir a || return 1 + + local default_profile=default + ceph osd erasure-code-profile get $default_profile | \ + grep plugin=jerasure || return 1 + ceph --format xml osd erasure-code-profile get $default_profile | \ + grep 'jerasure' || return 1 + ! ceph osd erasure-code-profile get WRONG > $dir/out 2>&1 || return 1 + grep -q "unknown erasure code profile 'WRONG'" $dir/out || return 1 +} + +function TEST_set_idempotent() { + local dir=$1 + local id=$2 + + run_mon $dir a || return 1 + # + # The default profile is set using a code path different from + # ceph osd erasure-code-profile set: verify that it is idempotent, + # as if it was using the same code path. + # + ceph osd erasure-code-profile set default k=2 m=1 2>&1 || return 1 + local profile + # + # Because plugin=jerasure is the default, it uses a slightly + # different code path where defaults (m=1 for instance) are added + # implicitly. + # + profile=profileidempotent1 + ! ceph osd erasure-code-profile ls | grep $profile || return 1 + ceph osd erasure-code-profile set $profile k=2 crush-failure-domain=osd 2>&1 || return 1 + ceph osd erasure-code-profile ls | grep $profile || return 1 + ceph osd erasure-code-profile set $profile k=2 crush-failure-domain=osd 2>&1 || return 1 + ceph osd erasure-code-profile rm $profile # cleanup + + # + # In the general case the profile is exactly what is on + # + profile=profileidempotent2 + ! ceph osd erasure-code-profile ls | grep $profile || return 1 + ceph osd erasure-code-profile set $profile plugin=lrc k=4 m=2 l=3 crush-failure-domain=osd 2>&1 || return 1 + ceph osd erasure-code-profile ls | grep $profile || return 1 + ceph osd erasure-code-profile set $profile plugin=lrc k=4 m=2 l=3 crush-failure-domain=osd 2>&1 || return 1 + ceph osd erasure-code-profile rm $profile # cleanup +} + +function TEST_format_invalid() { + local dir=$1 + + local profile=profile + # osd_pool_default_erasure-code-profile is + # valid JSON but not of the expected type + run_mon $dir a \ + --osd_pool_default_erasure-code-profile 1 || return 1 + ! ceph osd erasure-code-profile set $profile > $dir/out 2>&1 || return 1 + cat $dir/out + grep 'must be a JSON object' $dir/out || return 1 +} + +function TEST_format_json() { + local dir=$1 + + # osd_pool_default_erasure-code-profile is JSON + expected='"plugin":"isa"' + run_mon $dir a \ + --osd_pool_default_erasure-code-profile "{$expected}" || return 1 + ceph --format json osd erasure-code-profile get default | \ + grep "$expected" || return 1 +} + +function TEST_format_plain() { + local dir=$1 + + # osd_pool_default_erasure-code-profile is plain text + expected='"plugin":"isa"' + run_mon $dir a \ + --osd_pool_default_erasure-code-profile "plugin=isa" || return 1 + ceph --format json osd erasure-code-profile get default | \ + grep "$expected" || return 1 +} + +function TEST_profile_k_sanity() { + local dir=$1 + local profile=profile-sanity + + run_mon $dir a || return 1 + + expect_failure $dir 'k must be a multiple of (k + m) / l' \ + ceph osd erasure-code-profile set $profile \ + plugin=lrc \ + l=1 \ + k=1 \ + m=1 || return 1 + + if erasure_code_plugin_exists isa ; then + expect_failure $dir 'k=1 must be >= 2' \ + ceph osd erasure-code-profile set $profile \ + plugin=isa \ + k=1 \ + m=1 || return 1 + else + echo "SKIP because plugin isa has not been built" + fi + + expect_failure $dir 'k=1 must be >= 2' \ + ceph osd erasure-code-profile set $profile \ + plugin=jerasure \ + k=1 \ + m=1 || return 1 +} + +function TEST_invalid_crush_failure_domain() { + local dir=$1 + + run_mon $dir a || return 1 + + local profile=ec_profile + local crush_failure_domain=invalid_failure_domain + + ! ceph osd erasure-code-profile set $profile k=4 m=2 crush-failure-domain=$crush_failure_domain 2>&1 || return 1 +} + +main osd-erasure-code-profile "$@" + +# Local Variables: +# compile-command: "cd ../.. ; make -j4 && test/mon/osd-erasure-code-profile.sh" +# End: diff --git a/qa/standalone/mon/osd-pool-create.sh b/qa/standalone/mon/osd-pool-create.sh new file mode 100755 index 00000000..ecb94cb3 --- /dev/null +++ b/qa/standalone/mon/osd-pool-create.sh @@ -0,0 +1,328 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2013, 2014 Cloudwatt +# Copyright (C) 2014, 2015 Red Hat +# +# Author: Loic Dachary +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Library Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library Public License for more details. +# +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh + +function run() { + local dir=$1 + shift + + export CEPH_MON="127.0.0.1:7105" # git grep '\<7105\>' : there must be only one + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " + CEPH_ARGS+="--mon-host=$CEPH_MON " + export CEPH_ARGS + + local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} + for func in $funcs ; do + setup $dir || return 1 + $func $dir || return 1 + teardown $dir || return 1 + done +} + +# Before http://tracker.ceph.com/issues/8307 the invalid profile was created +function TEST_erasure_invalid_profile() { + local dir=$1 + run_mon $dir a || return 1 + local poolname=pool_erasure + local notaprofile=not-a-valid-erasure-code-profile + ! ceph osd pool create $poolname 12 12 erasure $notaprofile || return 1 + ! ceph osd erasure-code-profile ls | grep $notaprofile || return 1 +} + +function TEST_erasure_crush_rule() { + local dir=$1 + run_mon $dir a || return 1 + # + # choose the crush ruleset used with an erasure coded pool + # + local crush_ruleset=myruleset + ! ceph osd crush rule ls | grep $crush_ruleset || return 1 + ceph osd crush rule create-erasure $crush_ruleset + ceph osd crush rule ls | grep $crush_ruleset + local poolname + poolname=pool_erasure1 + ! ceph --format json osd dump | grep '"crush_rule":1' || return 1 + ceph osd pool create $poolname 12 12 erasure default $crush_ruleset + ceph --format json osd dump | grep '"crush_rule":1' || return 1 + # + # a crush ruleset by the name of the pool is implicitly created + # + poolname=pool_erasure2 + ceph osd erasure-code-profile set myprofile + ceph osd pool create $poolname 12 12 erasure myprofile + ceph osd crush rule ls | grep $poolname || return 1 + # + # a non existent crush ruleset given in argument is an error + # http://tracker.ceph.com/issues/9304 + # + poolname=pool_erasure3 + ! ceph osd pool create $poolname 12 12 erasure myprofile INVALIDRULESET || return 1 +} + +function TEST_erasure_code_profile_default() { + local dir=$1 + run_mon $dir a || return 1 + ceph osd erasure-code-profile rm default || return 1 + ! ceph osd erasure-code-profile ls | grep default || return 1 + ceph osd pool create $poolname 12 12 erasure default + ceph osd erasure-code-profile ls | grep default || return 1 +} + +function TEST_erasure_crush_stripe_unit() { + local dir=$1 + # the default stripe unit is used to initialize the pool + run_mon $dir a --public-addr $CEPH_MON + stripe_unit=$(ceph-conf --show-config-value osd_pool_erasure_code_stripe_unit) + eval local $(ceph osd erasure-code-profile get myprofile | grep k=) + stripe_width = $((stripe_unit * k)) + ceph osd pool create pool_erasure 12 12 erasure + ceph --format json osd dump | tee $dir/osd.json + grep '"stripe_width":'$stripe_width $dir/osd.json > /dev/null || return 1 +} + +function TEST_erasure_crush_stripe_unit_padded() { + local dir=$1 + # setting osd_pool_erasure_code_stripe_unit modifies the stripe_width + # and it is padded as required by the default plugin + profile+=" plugin=jerasure" + profile+=" technique=reed_sol_van" + k=4 + profile+=" k=$k" + profile+=" m=2" + actual_stripe_unit=2048 + desired_stripe_unit=$((actual_stripe_unit - 1)) + actual_stripe_width=$((actual_stripe_unit * k)) + run_mon $dir a \ + --osd_pool_erasure_code_stripe_unit $desired_stripe_unit \ + --osd_pool_default_erasure_code_profile "$profile" || return 1 + ceph osd pool create pool_erasure 12 12 erasure + ceph osd dump | tee $dir/osd.json + grep "stripe_width $actual_stripe_width" $dir/osd.json > /dev/null || return 1 +} + +function TEST_erasure_code_pool() { + local dir=$1 + run_mon $dir a || return 1 + ceph --format json osd dump > $dir/osd.json + local expected='"erasure_code_profile":"default"' + ! grep "$expected" $dir/osd.json || return 1 + ceph osd pool create erasurecodes 12 12 erasure + ceph --format json osd dump | tee $dir/osd.json + grep "$expected" $dir/osd.json > /dev/null || return 1 + + ceph osd pool create erasurecodes 12 12 erasure 2>&1 | \ + grep 'already exists' || return 1 + ceph osd pool create erasurecodes 12 12 2>&1 | \ + grep 'cannot change to type replicated' || return 1 +} + +function TEST_replicated_pool_with_ruleset() { + local dir=$1 + run_mon $dir a + local ruleset=ruleset0 + local root=host1 + ceph osd crush add-bucket $root host + local failure_domain=osd + local poolname=mypool + ceph osd crush rule create-simple $ruleset $root $failure_domain || return 1 + ceph osd crush rule ls | grep $ruleset + ceph osd pool create $poolname 12 12 replicated $ruleset || return 1 + rule_id=`ceph osd crush rule dump $ruleset | grep "rule_id" | awk -F[' ':,] '{print $4}'` + ceph osd pool get $poolname crush_rule 2>&1 | \ + grep "crush_rule: $rule_id" || return 1 + #non-existent crush ruleset + ceph osd pool create newpool 12 12 replicated non-existent 2>&1 | \ + grep "doesn't exist" || return 1 +} + +function TEST_erasure_code_pool_lrc() { + local dir=$1 + run_mon $dir a || return 1 + + ceph osd erasure-code-profile set LRCprofile \ + plugin=lrc \ + mapping=DD_ \ + layers='[ [ "DDc", "" ] ]' || return 1 + + ceph --format json osd dump > $dir/osd.json + local expected='"erasure_code_profile":"LRCprofile"' + local poolname=erasurecodes + ! grep "$expected" $dir/osd.json || return 1 + ceph osd pool create $poolname 12 12 erasure LRCprofile + ceph --format json osd dump | tee $dir/osd.json + grep "$expected" $dir/osd.json > /dev/null || return 1 + ceph osd crush rule ls | grep $poolname || return 1 +} + +function TEST_replicated_pool() { + local dir=$1 + run_mon $dir a || return 1 + ceph osd pool create replicated 12 12 replicated replicated_rule || return 1 + ceph osd pool create replicated 12 12 replicated replicated_rule 2>&1 | \ + grep 'already exists' || return 1 + # default is replicated + ceph osd pool create replicated1 12 12 || return 1 + # default is replicated, pgp_num = pg_num + ceph osd pool create replicated2 12 || return 1 + ceph osd pool create replicated 12 12 erasure 2>&1 | \ + grep 'cannot change to type erasure' || return 1 +} + +function TEST_no_pool_delete() { + local dir=$1 + run_mon $dir a || return 1 + ceph osd pool create foo 1 || return 1 + ceph tell mon.a injectargs -- --no-mon-allow-pool-delete || return 1 + ! ceph osd pool delete foo foo --yes-i-really-really-mean-it || return 1 + ceph tell mon.a injectargs -- --mon-allow-pool-delete || return 1 + ceph osd pool delete foo foo --yes-i-really-really-mean-it || return 1 +} + +function TEST_utf8_cli() { + local dir=$1 + run_mon $dir a || return 1 + # Hopefully it's safe to include literal UTF-8 characters to test + # the fix for http://tracker.ceph.com/issues/7387. If it turns out + # to not be OK (when is the default encoding *not* UTF-8?), maybe + # the character '黄' can be replaced with the escape $'\xe9\xbb\x84' + ceph osd pool create 黄 16 || return 1 + ceph osd lspools 2>&1 | \ + grep "黄" || return 1 + ceph -f json-pretty osd dump | \ + python -c "import json; import sys; json.load(sys.stdin)" || return 1 + ceph osd pool delete 黄 黄 --yes-i-really-really-mean-it +} + +function TEST_pool_create_rep_expected_num_objects() { + local dir=$1 + setup $dir || return 1 + + export CEPH_ARGS + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + # disable pg dir merge + run_osd_filestore $dir 0 || return 1 + + ceph osd pool create rep_expected_num_objects 64 64 replicated replicated_rule 100000 || return 1 + # wait for pg dir creating + sleep 30 + ceph pg ls + find ${dir}/0/current -ls + ret=$(find ${dir}/0/current/1.0_head/ | grep DIR | wc -l) + if [ "$ret" -le 2 ]; + then + return 1 + else + echo "TEST_pool_create_rep_expected_num_objects PASS" + fi +} + +function check_pool_priority() { + local dir=$1 + shift + local pools=$1 + shift + local spread="$1" + shift + local results="$1" + + setup $dir || return 1 + + EXTRA_OPTS="--debug_allow_any_pool_priority=true" + export EXTRA_OPTS + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + run_osd $dir 1 || return 1 + run_osd $dir 2 || return 1 + + # Add pool 0 too + for i in $(seq 0 $pools) + do + num=$(expr $i + 1) + ceph osd pool create test${num} 1 1 + done + + wait_for_clean || return 1 + for i in $(seq 0 $pools) + do + num=$(expr $i + 1) + ceph osd pool set test${num} recovery_priority $(expr $i \* $spread) + done + + #grep "recovery_priority.*pool set" out/mon.a.log + + bin/ceph osd dump + + # Restart everything so mon converts the priorities + kill_daemons + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + activate_osd $dir 0 || return 1 + activate_osd $dir 1 || return 1 + activate_osd $dir 2 || return 1 + sleep 5 + + grep convert $dir/mon.a.log + ceph osd dump + + pos=1 + for i in $(ceph osd dump | grep ^pool | sed 's/.*recovery_priority //' | awk '{ print $1 }') + do + result=$(echo $results | awk "{ print \$${pos} }") + # A value of 0 is an unset value so sed/awk gets "pool" + if test $result = "0" + then + result="pool" + fi + test "$result" = "$i" || return 1 + pos=$(expr $pos + 1) + done +} + +function TEST_pool_pos_only_prio() { + local dir=$1 + check_pool_priority $dir 20 5 "0 0 1 1 2 2 3 3 4 4 5 5 6 6 7 7 8 8 9 9 10" || return 1 +} + +function TEST_pool_neg_only_prio() { + local dir=$1 + check_pool_priority $dir 20 -5 "0 0 -1 -1 -2 -2 -3 -3 -4 -4 -5 -5 -6 -6 -7 -7 -8 -8 -9 -9 -10" || return 1 +} + +function TEST_pool_both_prio() { + local dir=$1 + check_pool_priority $dir 20 "5 - 50" "-10 -9 -8 -7 -6 -5 -4 -3 -2 -1 0 1 2 3 4 5 6 7 8 9 10" || return 1 +} + +function TEST_pool_both_prio_no_neg() { + local dir=$1 + check_pool_priority $dir 20 "2 - 4" "-4 -2 0 0 1 1 2 2 3 3 4 5 5 6 6 7 7 8 8 9 10" || return 1 +} + +function TEST_pool_both_prio_no_pos() { + local dir=$1 + check_pool_priority $dir 20 "2 - 36" "-10 -9 -8 -8 -7 -7 -6 -6 -5 -5 -4 -3 -3 -2 -2 -1 -1 0 0 2 4" || return 1 +} + + +main osd-pool-create "$@" + +# Local Variables: +# compile-command: "cd ../.. ; make -j4 && test/mon/osd-pool-create.sh" +# End: diff --git a/qa/standalone/mon/osd-pool-df.sh b/qa/standalone/mon/osd-pool-df.sh new file mode 100755 index 00000000..3ed169d8 --- /dev/null +++ b/qa/standalone/mon/osd-pool-df.sh @@ -0,0 +1,75 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2017 Tencent +# +# Author: Chang Liu +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Library Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library Public License for more details. +# +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh + +function run() { + local dir=$1 + shift + + export CEPH_MON="127.0.0.1:7113" # git grep '\<7113\>' : there must be only one + export CEPH_ARGS + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " + CEPH_ARGS+="--mon-host=$CEPH_MON " + + local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} + for func in $funcs ; do + setup $dir || return 1 + $func $dir || return 1 + teardown $dir || return 1 + done +} + +function TEST_ceph_df() { + local dir=$1 + setup $dir || return 1 + + run_mon $dir a || return 1 + run_osd $dir 0 || return 1 + run_osd $dir 1 || return 1 + run_osd $dir 2 || return 1 + run_osd $dir 3 || return 1 + run_osd $dir 4 || return 1 + run_osd $dir 5 || return 1 + run_mgr $dir x || return 1 + + profile+=" plugin=jerasure" + profile+=" technique=reed_sol_van" + profile+=" k=4" + profile+=" m=2" + profile+=" crush-failure-domain=osd" + + ceph osd erasure-code-profile set ec42profile ${profile} + + local rep_poolname=testcephdf_replicate + local ec_poolname=testcephdf_erasurecode + create_pool $rep_poolname 6 6 replicated + create_pool $ec_poolname 6 6 erasure ec42profile + + local global_avail=`ceph df -f json | jq '.stats.total_avail_bytes'` + local rep_avail=`ceph df -f json | jq '.pools | map(select(.name == "$rep_poolname"))[0].stats.max_avail'` + local ec_avail=`ceph df -f json | jq '.pools | map(select(.name == "$ec_poolname"))[0].stats.max_avail'` + + echo "${global_avail} >= ${rep_avail}*3" | bc || return 1 + echo "${global_avail} >= ${ec_avail}*1.5" | bc || return 1 + + ceph osd pool delete $rep_poolname $rep_poolname --yes-i-really-really-mean-it + ceph osd pool delete $ec_poolname $ec_poolname --yes-i-really-really-mean-it + ceph osd erasure-code-profile rm ec42profile + teardown $dir || return 1 +} + +main osd-pool-df "$@" diff --git a/qa/standalone/mon/test_pool_quota.sh b/qa/standalone/mon/test_pool_quota.sh new file mode 100755 index 00000000..b87ec223 --- /dev/null +++ b/qa/standalone/mon/test_pool_quota.sh @@ -0,0 +1,63 @@ +#!/usr/bin/env bash + +# +# Generic pool quota test +# + +# Includes + + +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh + +function run() { + local dir=$1 + shift + + export CEPH_MON="127.0.0.1:17108" # git grep '\<17108\>' : there must be only one + export CEPH_ARGS + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " + CEPH_ARGS+="--mon-host=$CEPH_MON " + + local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} + for func in $funcs ; do + $func $dir || return 1 + done +} + +function TEST_pool_quota() { + local dir=$1 + setup $dir || return 1 + + run_mon $dir a || return 1 + run_osd $dir 0 || return 1 + run_osd $dir 1 || return 1 + run_osd $dir 2 || return 1 + + local poolname=testquota + create_pool $poolname 20 + local objects=`ceph df detail | grep -w $poolname|awk '{print $3}'` + local bytes=`ceph df detail | grep -w $poolname|awk '{print $4}'` + + echo $objects + echo $bytes + if [ $objects != 'N/A' ] || [ $bytes != 'N/A' ] ; + then + return 1 + fi + + ceph osd pool set-quota $poolname max_objects 1000 + ceph osd pool set-quota $poolname max_bytes 1024 + + objects=`ceph df detail | grep -w $poolname|awk '{print $3}'` + bytes=`ceph df detail | grep -w $poolname|awk '{print $4}'` + + if [ $objects != '1000' ] || [ $bytes != '1K' ] ; + then + return 1 + fi + + ceph osd pool delete $poolname $poolname --yes-i-really-really-mean-it + teardown $dir || return 1 +} + +main testpoolquota diff --git a/qa/standalone/osd/bad-inc-map.sh b/qa/standalone/osd/bad-inc-map.sh new file mode 100755 index 00000000..cc3cf27c --- /dev/null +++ b/qa/standalone/osd/bad-inc-map.sh @@ -0,0 +1,62 @@ +#!/usr/bin/env bash + +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh + +mon_port=$(get_unused_port) + +function run() { + local dir=$1 + shift + + export CEPH_MON="127.0.0.1:$mon_port" + export CEPH_ARGS + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " + CEPH_ARGS+="--mon-host=$CEPH_MON " + set -e + + local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} + for func in $funcs ; do + setup $dir || return 1 + $func $dir || return 1 + teardown $dir || return 1 + done +} + +function TEST_bad_inc_map() { + local dir=$1 + + run_mon $dir a + run_mgr $dir x + run_osd $dir 0 + run_osd $dir 1 + run_osd $dir 2 + + ceph config set osd.2 osd_inject_bad_map_crc_probability 1 + + # osd map churn + create_pool foo 8 + ceph osd pool set foo min_size 1 + ceph osd pool set foo min_size 2 + + sleep 5 + + # make sure all the OSDs are still up + TIMEOUT=10 wait_for_osd up 0 + TIMEOUT=10 wait_for_osd up 1 + TIMEOUT=10 wait_for_osd up 2 + + # check for the signature in the log + grep "injecting map crc failure" $dir/osd.2.log || return 1 + grep "bailing because last" $dir/osd.2.log || return 1 + + echo success + + delete_pool foo + kill_daemons $dir || return 1 +} + +main bad-inc-map "$@" + +# Local Variables: +# compile-command: "make -j4 && ../qa/run-standalone.sh bad-inc-map.sh" +# End: diff --git a/qa/standalone/osd/divergent-priors.sh b/qa/standalone/osd/divergent-priors.sh new file mode 100755 index 00000000..dec0e7ad --- /dev/null +++ b/qa/standalone/osd/divergent-priors.sh @@ -0,0 +1,840 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2019 Red Hat +# +# Author: David Zafman +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Library Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library Public License for more details. +# + +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh + +function run() { + local dir=$1 + shift + + # This should multiple of 6 + export loglen=12 + export divisor=3 + export trim=$(expr $loglen / 2) + export DIVERGENT_WRITE=$(expr $trim / $divisor) + export DIVERGENT_REMOVE=$(expr $trim / $divisor) + export DIVERGENT_CREATE=$(expr $trim / $divisor) + export poolname=test + export testobjects=100 + # Fix port???? + export CEPH_MON="127.0.0.1:7115" # git grep '\<7115\>' : there must be only one + export CEPH_ARGS + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " + CEPH_ARGS+="--mon-host=$CEPH_MON " + # so we will not force auth_log_shard to be acting_primary + CEPH_ARGS+="--osd_force_auth_primary_missing_objects=1000000 " + CEPH_ARGS+="--osd_debug_pg_log_writeout=true " + CEPH_ARGS+="--osd_min_pg_log_entries=$loglen --osd_max_pg_log_entries=$loglen --osd_pg_log_trim_min=$trim " + + local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} + for func in $funcs ; do + setup $dir || return 1 + $func $dir || return 1 + teardown $dir || return 1 + done +} + + +# Special case divergence test +# Test handling of divergent entries with prior_version +# prior to log_tail +# based on qa/tasks/divergent_prior.py +function TEST_divergent() { + local dir=$1 + + # something that is always there + local dummyfile='/etc/fstab' + local dummyfile2='/etc/resolv.conf' + + local num_osds=3 + local osds="$(seq 0 $(expr $num_osds - 1))" + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + for i in $osds + do + run_osd $dir $i || return 1 + done + + ceph osd set noout + ceph osd set noin + ceph osd set nodown + create_pool $poolname 1 1 + ceph osd pool set $poolname size 3 + ceph osd pool set $poolname min_size 2 + + flush_pg_stats || return 1 + wait_for_clean || return 1 + + # determine primary + local divergent="$(ceph pg dump pgs --format=json | jq '.pg_stats[0].up_primary')" + echo "primary and soon to be divergent is $divergent" + ceph pg dump pgs + local non_divergent="" + for i in $osds + do + if [ "$i" = "$divergent" ]; then + continue + fi + non_divergent="$non_divergent $i" + done + + echo "writing initial objects" + # write a bunch of objects + for i in $(seq 1 $testobjects) + do + rados -p $poolname put existing_$i $dummyfile + done + + WAIT_FOR_CLEAN_TIMEOUT=20 wait_for_clean + + local pgid=$(get_pg $poolname existing_1) + + # blackhole non_divergent + echo "blackholing osds $non_divergent" + ceph pg dump pgs + for i in $non_divergent + do + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${i}) config set objectstore_blackhole 1 + done + + local case5=$testobjects + local case3=$(expr $testobjects - 1) + # Write some soon to be divergent + echo 'writing divergent object' + rados -p $poolname put existing_$case5 $dummyfile & + echo 'create missing divergent object' + inject_eio rep data $poolname existing_$case3 $dir 0 || return 1 + rados -p $poolname get existing_$case3 $dir/existing & + sleep 10 + killall -9 rados + + # kill all the osds but leave divergent in + echo 'killing all the osds' + ceph pg dump pgs + kill_daemons $dir KILL osd || return 1 + for i in $osds + do + ceph osd down osd.$i + done + for i in $non_divergent + do + ceph osd out osd.$i + done + + # bring up non-divergent + echo "bringing up non_divergent $non_divergent" + ceph pg dump pgs + for i in $non_divergent + do + activate_osd $dir $i || return 1 + done + for i in $non_divergent + do + ceph osd in osd.$i + done + + WAIT_FOR_CLEAN_TIMEOUT=20 wait_for_clean + + # write 1 non-divergent object (ensure that old divergent one is divergent) + objname="existing_$(expr $DIVERGENT_WRITE + $DIVERGENT_REMOVE)" + echo "writing non-divergent object $objname" + ceph pg dump pgs + rados -p $poolname put $objname $dummyfile2 + + # ensure no recovery of up osds first + echo 'delay recovery' + ceph pg dump pgs + for i in $non_divergent + do + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${i}) set_recovery_delay 100000 + done + + # bring in our divergent friend + echo "revive divergent $divergent" + ceph pg dump pgs + ceph osd set noup + activate_osd $dir $divergent + sleep 5 + + echo 'delay recovery divergent' + ceph pg dump pgs + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${divergent}) set_recovery_delay 100000 + + ceph osd unset noup + + wait_for_osd up 0 + wait_for_osd up 1 + wait_for_osd up 2 + + ceph pg dump pgs + echo 'wait for peering' + ceph pg dump pgs + rados -p $poolname put foo $dummyfile + + echo "killing divergent $divergent" + ceph pg dump pgs + kill_daemons $dir KILL osd.$divergent + #_objectstore_tool_nodown $dir $divergent --op log --pgid $pgid + echo "reviving divergent $divergent" + ceph pg dump pgs + activate_osd $dir $divergent + + sleep 20 + + echo "allowing recovery" + ceph pg dump pgs + # Set osd_recovery_delay_start back to 0 and kick the queue + for i in $osds + do + ceph tell osd.$i debug kick_recovery_wq 0 + done + + echo 'reading divergent objects' + ceph pg dump pgs + for i in $(seq 1 $(expr $DIVERGENT_WRITE + $DIVERGENT_REMOVE)) + do + rados -p $poolname get existing_$i $dir/existing || return 1 + done + rm -f $dir/existing + + grep _merge_object_divergent_entries $(find $dir -name '*osd*log') + # Check for _merge_object_divergent_entries for case #5 + if ! grep -q "_merge_object_divergent_entries.*cannot roll back, removing and adding to missing" $(find $dir -name '*osd*log') + then + echo failure + return 1 + fi + echo "success" + + delete_pool $poolname + kill_daemons $dir || return 1 +} + +function TEST_divergent_ec() { + local dir=$1 + + # something that is always there + local dummyfile='/etc/fstab' + local dummyfile2='/etc/resolv.conf' + + local num_osds=3 + local osds="$(seq 0 $(expr $num_osds - 1))" + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + for i in $osds + do + run_osd $dir $i || return 1 + done + + ceph osd set noout + ceph osd set noin + ceph osd set nodown + create_ec_pool $poolname true k=2 m=1 || return 1 + + flush_pg_stats || return 1 + wait_for_clean || return 1 + + # determine primary + local divergent="$(ceph pg dump pgs --format=json | jq '.pg_stats[0].up_primary')" + echo "primary and soon to be divergent is $divergent" + ceph pg dump pgs + local non_divergent="" + for i in $osds + do + if [ "$i" = "$divergent" ]; then + continue + fi + non_divergent="$non_divergent $i" + done + + echo "writing initial objects" + # write a bunch of objects + for i in $(seq 1 $testobjects) + do + rados -p $poolname put existing_$i $dummyfile + done + + WAIT_FOR_CLEAN_TIMEOUT=20 wait_for_clean + + local pgid=$(get_pg $poolname existing_1) + + # blackhole non_divergent + echo "blackholing osds $non_divergent" + ceph pg dump pgs + for i in $non_divergent + do + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${i}) config set objectstore_blackhole 1 + done + + # Write some soon to be divergent + echo 'writing divergent object' + rados -p $poolname put existing_$testobjects $dummyfile2 & + sleep 1 + rados -p $poolname put existing_$testobjects $dummyfile & + rados -p $poolname mksnap snap1 + rados -p $poolname put existing_$(expr $testobjects - 1) $dummyfile & + sleep 10 + killall -9 rados + + # kill all the osds but leave divergent in + echo 'killing all the osds' + ceph pg dump pgs + kill_daemons $dir KILL osd || return 1 + for i in $osds + do + ceph osd down osd.$i + done + for i in $non_divergent + do + ceph osd out osd.$i + done + + # bring up non-divergent + echo "bringing up non_divergent $non_divergent" + ceph pg dump pgs + for i in $non_divergent + do + activate_osd $dir $i || return 1 + done + for i in $non_divergent + do + ceph osd in osd.$i + done + + sleep 5 + #WAIT_FOR_CLEAN_TIMEOUT=20 wait_for_clean + + # write 1 non-divergent object (ensure that old divergent one is divergent) + objname="existing_$(expr $DIVERGENT_WRITE + $DIVERGENT_REMOVE)" + echo "writing non-divergent object $objname" + ceph pg dump pgs + rados -p $poolname put $objname $dummyfile2 + + WAIT_FOR_CLEAN_TIMEOUT=20 wait_for_clean + + # Dump logs + for i in $non_divergent + do + kill_daemons $dir KILL osd.$i || return 1 + _objectstore_tool_nodown $dir $i --op log --pgid $pgid + activate_osd $dir $i || return 1 + done + _objectstore_tool_nodown $dir $divergent --op log --pgid $pgid + + WAIT_FOR_CLEAN_TIMEOUT=20 wait_for_clean + + # ensure no recovery of up osds first + echo 'delay recovery' + ceph pg dump pgs + for i in $non_divergent + do + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${i}) set_recovery_delay 100000 + done + + # bring in our divergent friend + echo "revive divergent $divergent" + ceph pg dump pgs + ceph osd set noup + activate_osd $dir $divergent + sleep 5 + + echo 'delay recovery divergent' + ceph pg dump pgs + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${divergent}) set_recovery_delay 100000 + + ceph osd unset noup + + wait_for_osd up 0 + wait_for_osd up 1 + wait_for_osd up 2 + + ceph pg dump pgs + echo 'wait for peering' + ceph pg dump pgs + rados -p $poolname put foo $dummyfile + + echo "killing divergent $divergent" + ceph pg dump pgs + kill_daemons $dir KILL osd.$divergent + #_objectstore_tool_nodown $dir $divergent --op log --pgid $pgid + echo "reviving divergent $divergent" + ceph pg dump pgs + activate_osd $dir $divergent + + sleep 20 + + echo "allowing recovery" + ceph pg dump pgs + # Set osd_recovery_delay_start back to 0 and kick the queue + for i in $osds + do + ceph tell osd.$i debug kick_recovery_wq 0 + done + + echo 'reading divergent objects' + ceph pg dump pgs + for i in $(seq 1 $(expr $DIVERGENT_WRITE + $DIVERGENT_REMOVE)) + do + rados -p $poolname get existing_$i $dir/existing || return 1 + done + rm -f $dir/existing + + grep _merge_object_divergent_entries $(find $dir -name '*osd*log') + # Check for _merge_object_divergent_entries for case #3 + # XXX: Not reproducing this case +# if ! grep -q "_merge_object_divergent_entries.* missing, .* adjusting" $(find $dir -name '*osd*log') +# then +# echo failure +# return 1 +# fi + # Check for _merge_object_divergent_entries for case #4 + if ! grep -q "_merge_object_divergent_entries.*rolled back" $(find $dir -name '*osd*log') + then + echo failure + return 1 + fi + echo "success" + + delete_pool $poolname + kill_daemons $dir || return 1 +} + +# Special case divergence test with ceph-objectstore-tool export/remove/import +# Test handling of divergent entries with prior_version +# prior to log_tail and a ceph-objectstore-tool export/import +# based on qa/tasks/divergent_prior2.py +function TEST_divergent_2() { + local dir=$1 + + # something that is always there + local dummyfile='/etc/fstab' + local dummyfile2='/etc/resolv.conf' + + local num_osds=3 + local osds="$(seq 0 $(expr $num_osds - 1))" + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + for i in $osds + do + run_osd $dir $i || return 1 + done + + ceph osd set noout + ceph osd set noin + ceph osd set nodown + create_pool $poolname 1 1 + ceph osd pool set $poolname size 3 + ceph osd pool set $poolname min_size 2 + + flush_pg_stats || return 1 + wait_for_clean || return 1 + + # determine primary + local divergent="$(ceph pg dump pgs --format=json | jq '.pg_stats[0].up_primary')" + echo "primary and soon to be divergent is $divergent" + ceph pg dump pgs + local non_divergent="" + for i in $osds + do + if [ "$i" = "$divergent" ]; then + continue + fi + non_divergent="$non_divergent $i" + done + + echo "writing initial objects" + # write a bunch of objects + for i in $(seq 1 $testobjects) + do + rados -p $poolname put existing_$i $dummyfile + done + + WAIT_FOR_CLEAN_TIMEOUT=20 wait_for_clean + + local pgid=$(get_pg $poolname existing_1) + + # blackhole non_divergent + echo "blackholing osds $non_divergent" + ceph pg dump pgs + for i in $non_divergent + do + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${i}) config set objectstore_blackhole 1 + done + + # Do some creates to hit case 2 + echo 'create new divergent objects' + for i in $(seq 1 $DIVERGENT_CREATE) + do + rados -p $poolname create newobject_$i & + done + # Write some soon to be divergent + echo 'writing divergent objects' + for i in $(seq 1 $DIVERGENT_WRITE) + do + rados -p $poolname put existing_$i $dummyfile2 & + done + # Remove some soon to be divergent + echo 'remove divergent objects' + for i in $(seq 1 $DIVERGENT_REMOVE) + do + rmi=$(expr $i + $DIVERGENT_WRITE) + rados -p $poolname rm existing_$rmi & + done + sleep 10 + killall -9 rados + + # kill all the osds but leave divergent in + echo 'killing all the osds' + ceph pg dump pgs + kill_daemons $dir KILL osd || return 1 + for i in $osds + do + ceph osd down osd.$i + done + for i in $non_divergent + do + ceph osd out osd.$i + done + + # bring up non-divergent + echo "bringing up non_divergent $non_divergent" + ceph pg dump pgs + for i in $non_divergent + do + activate_osd $dir $i || return 1 + done + for i in $non_divergent + do + ceph osd in osd.$i + done + + WAIT_FOR_CLEAN_TIMEOUT=20 wait_for_clean + + # write 1 non-divergent object (ensure that old divergent one is divergent) + objname="existing_$(expr $DIVERGENT_WRITE + $DIVERGENT_REMOVE)" + echo "writing non-divergent object $objname" + ceph pg dump pgs + rados -p $poolname put $objname $dummyfile2 + + WAIT_FOR_CLEAN_TIMEOUT=20 wait_for_clean + + # ensure no recovery of up osds first + echo 'delay recovery' + ceph pg dump pgs + for i in $non_divergent + do + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${i}) set_recovery_delay 100000 + done + + # bring in our divergent friend + echo "revive divergent $divergent" + ceph pg dump pgs + ceph osd set noup + activate_osd $dir $divergent + sleep 5 + + echo 'delay recovery divergent' + ceph pg dump pgs + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${divergent}) set_recovery_delay 100000 + + ceph osd unset noup + + wait_for_osd up 0 + wait_for_osd up 1 + wait_for_osd up 2 + + ceph pg dump pgs + echo 'wait for peering' + ceph pg dump pgs + rados -p $poolname put foo $dummyfile + + # At this point the divergent_priors should have been detected + + echo "killing divergent $divergent" + ceph pg dump pgs + kill_daemons $dir KILL osd.$divergent + + # export a pg + expfile=$dir/exp.$$.out + _objectstore_tool_nodown $dir $divergent --op export-remove --pgid $pgid --file $expfile + _objectstore_tool_nodown $dir $divergent --op import --file $expfile + + echo "reviving divergent $divergent" + ceph pg dump pgs + activate_osd $dir $divergent + wait_for_osd up $divergent + + sleep 20 + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${divergent}) dump_ops_in_flight + + echo "allowing recovery" + ceph pg dump pgs + # Set osd_recovery_delay_start back to 0 and kick the queue + for i in $osds + do + ceph tell osd.$i debug kick_recovery_wq 0 + done + + echo 'reading divergent objects' + ceph pg dump pgs + for i in $(seq 1 $(expr $DIVERGENT_WRITE + $DIVERGENT_REMOVE)) + do + rados -p $poolname get existing_$i $dir/existing || return 1 + done + for i in $(seq 1 $DIVERGENT_CREATE) + do + rados -p $poolname get newobject_$i $dir/existing + done + rm -f $dir/existing + + grep _merge_object_divergent_entries $(find $dir -name '*osd*log') + # Check for _merge_object_divergent_entries for case #1 + if ! grep -q "_merge_object_divergent_entries: more recent entry found:" $(find $dir -name '*osd*log') + then + echo failure + return 1 + fi + # Check for _merge_object_divergent_entries for case #2 + if ! grep -q "_merge_object_divergent_entries.*prior_version or op type indicates creation" $(find $dir -name '*osd*log') + then + echo failure + return 1 + fi + echo "success" + + rm $dir/$expfile + + delete_pool $poolname + kill_daemons $dir || return 1 +} + +# this is the same as case _2 above, except we enable pg autoscaling in order +# to reproduce https://tracker.ceph.com/issues/41816 +function TEST_divergent_3() { + local dir=$1 + + # something that is always there + local dummyfile='/etc/fstab' + local dummyfile2='/etc/resolv.conf' + + local num_osds=3 + local osds="$(seq 0 $(expr $num_osds - 1))" + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + for i in $osds + do + run_osd $dir $i || return 1 + done + + ceph osd set noout + ceph osd set noin + ceph osd set nodown + create_pool $poolname 1 1 + ceph osd pool set $poolname size 3 + ceph osd pool set $poolname min_size 2 + + # reproduce https://tracker.ceph.com/issues/41816 + ceph osd pool set $poolname pg_autoscale_mode on + + flush_pg_stats || return 1 + wait_for_clean || return 1 + + # determine primary + local divergent="$(ceph pg dump pgs --format=json | jq '.pg_stats[0].up_primary')" + echo "primary and soon to be divergent is $divergent" + ceph pg dump pgs + local non_divergent="" + for i in $osds + do + if [ "$i" = "$divergent" ]; then + continue + fi + non_divergent="$non_divergent $i" + done + + echo "writing initial objects" + # write a bunch of objects + for i in $(seq 1 $testobjects) + do + rados -p $poolname put existing_$i $dummyfile + done + + WAIT_FOR_CLEAN_TIMEOUT=20 wait_for_clean + + local pgid=$(get_pg $poolname existing_1) + + # blackhole non_divergent + echo "blackholing osds $non_divergent" + ceph pg dump pgs + for i in $non_divergent + do + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${i}) config set objectstore_blackhole 1 + done + + # Do some creates to hit case 2 + echo 'create new divergent objects' + for i in $(seq 1 $DIVERGENT_CREATE) + do + rados -p $poolname create newobject_$i & + done + # Write some soon to be divergent + echo 'writing divergent objects' + for i in $(seq 1 $DIVERGENT_WRITE) + do + rados -p $poolname put existing_$i $dummyfile2 & + done + # Remove some soon to be divergent + echo 'remove divergent objects' + for i in $(seq 1 $DIVERGENT_REMOVE) + do + rmi=$(expr $i + $DIVERGENT_WRITE) + rados -p $poolname rm existing_$rmi & + done + sleep 10 + killall -9 rados + + # kill all the osds but leave divergent in + echo 'killing all the osds' + ceph pg dump pgs + kill_daemons $dir KILL osd || return 1 + for i in $osds + do + ceph osd down osd.$i + done + for i in $non_divergent + do + ceph osd out osd.$i + done + + # bring up non-divergent + echo "bringing up non_divergent $non_divergent" + ceph pg dump pgs + for i in $non_divergent + do + activate_osd $dir $i || return 1 + done + for i in $non_divergent + do + ceph osd in osd.$i + done + + WAIT_FOR_CLEAN_TIMEOUT=20 wait_for_clean + + # write 1 non-divergent object (ensure that old divergent one is divergent) + objname="existing_$(expr $DIVERGENT_WRITE + $DIVERGENT_REMOVE)" + echo "writing non-divergent object $objname" + ceph pg dump pgs + rados -p $poolname put $objname $dummyfile2 + + WAIT_FOR_CLEAN_TIMEOUT=20 wait_for_clean + + # ensure no recovery of up osds first + echo 'delay recovery' + ceph pg dump pgs + for i in $non_divergent + do + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${i}) set_recovery_delay 100000 + done + + # bring in our divergent friend + echo "revive divergent $divergent" + ceph pg dump pgs + ceph osd set noup + activate_osd $dir $divergent + sleep 5 + + echo 'delay recovery divergent' + ceph pg dump pgs + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${divergent}) set_recovery_delay 100000 + + ceph osd unset noup + + wait_for_osd up 0 + wait_for_osd up 1 + wait_for_osd up 2 + + ceph pg dump pgs + echo 'wait for peering' + ceph pg dump pgs + rados -p $poolname put foo $dummyfile + + # At this point the divergent_priors should have been detected + + echo "killing divergent $divergent" + ceph pg dump pgs + kill_daemons $dir KILL osd.$divergent + + # export a pg + expfile=$dir/exp.$$.out + _objectstore_tool_nodown $dir $divergent --op export-remove --pgid $pgid --file $expfile + _objectstore_tool_nodown $dir $divergent --op import --file $expfile + + echo "reviving divergent $divergent" + ceph pg dump pgs + activate_osd $dir $divergent + wait_for_osd up $divergent + + sleep 20 + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${divergent}) dump_ops_in_flight + + echo "allowing recovery" + ceph pg dump pgs + # Set osd_recovery_delay_start back to 0 and kick the queue + for i in $osds + do + ceph tell osd.$i debug kick_recovery_wq 0 + done + + echo 'reading divergent objects' + ceph pg dump pgs + for i in $(seq 1 $(expr $DIVERGENT_WRITE + $DIVERGENT_REMOVE)) + do + rados -p $poolname get existing_$i $dir/existing || return 1 + done + for i in $(seq 1 $DIVERGENT_CREATE) + do + rados -p $poolname get newobject_$i $dir/existing + done + rm -f $dir/existing + + grep _merge_object_divergent_entries $(find $dir -name '*osd*log') + # Check for _merge_object_divergent_entries for case #1 + if ! grep -q "_merge_object_divergent_entries: more recent entry found:" $(find $dir -name '*osd*log') + then + echo failure + return 1 + fi + # Check for _merge_object_divergent_entries for case #2 + if ! grep -q "_merge_object_divergent_entries.*prior_version or op type indicates creation" $(find $dir -name '*osd*log') + then + echo failure + return 1 + fi + echo "success" + + rm $dir/$expfile + + delete_pool $poolname + kill_daemons $dir || return 1 +} + + +main divergent-priors "$@" + +# Local Variables: +# compile-command: "make -j4 && ../qa/run-standalone.sh divergent-priors.sh" +# End: diff --git a/qa/standalone/osd/ec-error-rollforward.sh b/qa/standalone/osd/ec-error-rollforward.sh new file mode 100755 index 00000000..621e6b13 --- /dev/null +++ b/qa/standalone/osd/ec-error-rollforward.sh @@ -0,0 +1,66 @@ +#!/usr/bin/env bash + +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh + +function run() { + local dir=$1 + shift + + # Fix port???? + export CEPH_MON="127.0.0.1:7132" # git grep '\<7132\>' : there must be only one + export CEPH_ARGS + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " + CEPH_ARGS+="--mon-host=$CEPH_MON " + export margin=10 + export objects=200 + export poolname=test + + local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} + for func in $funcs ; do + setup $dir || return 1 + $func $dir || return 1 + teardown $dir || return 1 + done +} + +function TEST_ec_error_rollforward() { + local dir=$1 + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + run_osd $dir 1 || return 1 + run_osd $dir 2 || return 1 + run_osd $dir 3 || return 1 + + ceph osd erasure-code-profile set ec-profile m=2 k=2 crush-failure-domain=osd + ceph osd pool create ec 1 1 erasure ec-profile + + rados -p ec put foo /etc/passwd + + kill -STOP $(cat $dir/osd.2.pid) + + rados -p ec rm foo & + pids="$!" + sleep 1 + rados -p ec rm a & + pids+=" $!" + rados -p ec rm b & + pids+=" $!" + rados -p ec rm c & + pids+=" $!" + sleep 1 + # Use SIGKILL so stopped osd.2 will terminate + # and kill_daemons waits for daemons to die + kill_daemons $dir KILL osd + kill $pids + wait + + activate_osd $dir 0 || return 1 + activate_osd $dir 1 || return 1 + activate_osd $dir 2 || return 1 + activate_osd $dir 3 || return 1 + + wait_for_clean || return 1 +} + +main ec-error-rollforward "$@" diff --git a/qa/standalone/osd/osd-backfill-prio.sh b/qa/standalone/osd/osd-backfill-prio.sh new file mode 100755 index 00000000..a089696b --- /dev/null +++ b/qa/standalone/osd/osd-backfill-prio.sh @@ -0,0 +1,519 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2019 Red Hat +# +# Author: David Zafman +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Library Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library Public License for more details. +# + +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh + +function run() { + local dir=$1 + shift + + # Fix port???? + export CEPH_MON="127.0.0.1:7114" # git grep '\<7114\>' : there must be only one + export CEPH_ARGS + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " + CEPH_ARGS+="--mon-host=$CEPH_MON --osd_max_backfills=1 --debug_reserver=20 " + CEPH_ARGS+="--osd_min_pg_log_entries=5 --osd_max_pg_log_entries=10 " + export objects=50 + export poolprefix=test + export FORCE_PRIO="254" # See OSD_BACKFILL_PRIORITY_FORCED + export DEGRADED_PRIO="150" # See OSD_BACKFILL_DEGRADED_PRIORITY_BASE + 10 + export NORMAL_PRIO="110" # See OSD_BACKFILL_PRIORITY_BASE + 10 + + local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} + for func in $funcs ; do + setup $dir || return 1 + $func $dir || return 1 + teardown $dir || return 1 + done +} + + +function TEST_backfill_priority() { + local dir=$1 + local pools=10 + local OSDS=5 + # size 2 -> 1 means degraded by 1, so add 1 to base prio + local degraded_prio=$(expr $DEGRADED_PRIO + 1) + local max_tries=10 + + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + export CEPH_ARGS + + for osd in $(seq 0 $(expr $OSDS - 1)) + do + run_osd $dir $osd || return 1 + done + + for p in $(seq 1 $pools) + do + create_pool "${poolprefix}$p" 1 1 + ceph osd pool set "${poolprefix}$p" size 2 + done + sleep 5 + + wait_for_clean || return 1 + + ceph pg dump pgs + + # Find 3 pools with a pg with the same primaries but second + # replica on another osd. + local PG1 + local POOLNUM1 + local pool1 + local chk_osd1_1 + local chk_osd1_2 + + local PG2 + local POOLNUM2 + local pool2 + local chk_osd2 + + local PG3 + local POOLNUM3 + local pool3 + + for p in $(seq 1 $pools) + do + ceph pg map ${p}.0 --format=json | jq '.acting[]' > $dir/acting + local test_osd1=$(head -1 $dir/acting) + local test_osd2=$(tail -1 $dir/acting) + if [ -z "$PG1" ]; + then + PG1="${p}.0" + POOLNUM1=$p + pool1="${poolprefix}$p" + chk_osd1_1=$test_osd1 + chk_osd1_2=$test_osd2 + elif [ -z "$PG2" -a $chk_osd1_1 = $test_osd1 -a $chk_osd1_2 != $test_osd2 ]; + then + PG2="${p}.0" + POOLNUM2=$p + pool2="${poolprefix}$p" + chk_osd2=$test_osd2 + elif [ -n "$PG2" -a $chk_osd1_1 = $test_osd1 -a $chk_osd1_2 != $test_osd2 -a "$chk_osd2" != $test_osd2 ]; + then + PG3="${p}.0" + POOLNUM3=$p + pool3="${poolprefix}$p" + break + fi + done + rm -f $dir/acting + + if [ "$pool2" = "" -o "pool3" = "" ]; + then + echo "Failure to find appropirate PGs" + return 1 + fi + + for p in $(seq 1 $pools) + do + if [ $p != $POOLNUM1 -a $p != $POOLNUM2 -a $p != $POOLNUM3 ]; + then + delete_pool ${poolprefix}$p + fi + done + + ceph osd pool set $pool2 size 1 + ceph osd pool set $pool3 size 1 + wait_for_clean || return 1 + + dd if=/dev/urandom of=$dir/data bs=1M count=10 + p=1 + for pname in $pool1 $pool2 $pool3 + do + for i in $(seq 1 $objects) + do + rados -p ${pname} put obj${i}-p${p} $dir/data + done + p=$(expr $p + 1) + done + + local otherosd=$(get_not_primary $pool1 obj1-p1) + + ceph pg dump pgs + ERRORS=0 + + ceph osd set nobackfill + ceph osd set noout + + # Get a pg to want to backfill and quickly force it + # to be preempted. + ceph osd pool set $pool3 size 2 + sleep 2 + + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations || return 1 + + # 3. Item is in progress, adjust priority with no higher priority waiting + for i in $(seq 1 $max_tries) + do + if ! ceph pg force-backfill $PG3 2>&1 | grep -q "doesn't require backfilling"; then + break + fi + if [ "$i" = "$max_tries" ]; then + echo "ERROR: Didn't appear to be able to force-backfill" + ERRORS=$(expr $ERRORS + 1) + fi + sleep 2 + done + flush_pg_stats || return 1 + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations || return 1 + + ceph osd out osd.$chk_osd1_2 + sleep 2 + flush_pg_stats || return 1 + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations || return 1 + ceph pg dump pgs + + ceph osd pool set $pool2 size 2 + sleep 2 + flush_pg_stats || return 1 + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations > $dir/out || return 1 + cat $dir/out + ceph pg dump pgs + + PRIO=$(cat $dir/out | jq "(.local_reservations.queues[].items[] | select(.item == \"${PG1}\")).prio") + if [ "$PRIO" != "$NORMAL_PRIO" ]; + then + echo "The normal PG ${PG1} doesn't have prio $NORMAL_PRIO queued waiting" + ERRORS=$(expr $ERRORS + 1) + fi + + # Using eval will strip double-quotes from item + eval ITEM=$(cat $dir/out | jq '.local_reservations.in_progress[0].item') + if [ "$ITEM" != ${PG3} ]; + then + echo "The force-backfill PG $PG3 didn't become the in progress item" + ERRORS=$(expr $ERRORS + 1) + else + PRIO=$(cat $dir/out | jq '.local_reservations.in_progress[0].prio') + if [ "$PRIO" != $FORCE_PRIO ]; + then + echo "The force-backfill PG ${PG3} doesn't have prio $FORCE_PRIO" + ERRORS=$(expr $ERRORS + 1) + fi + fi + + # 1. Item is queued, re-queue with new priority + for i in $(seq 1 $max_tries) + do + if ! ceph pg force-backfill $PG2 2>&1 | grep -q "doesn't require backfilling"; then + break + fi + if [ "$i" = "$max_tries" ]; then + echo "ERROR: Didn't appear to be able to force-backfill" + ERRORS=$(expr $ERRORS + 1) + fi + sleep 2 + done + sleep 2 + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations > $dir/out || return 1 + cat $dir/out + PRIO=$(cat $dir/out | jq "(.local_reservations.queues[].items[] | select(.item == \"${PG2}\")).prio") + if [ "$PRIO" != "$FORCE_PRIO" ]; + then + echo "The second force-backfill PG ${PG2} doesn't have prio $FORCE_PRIO" + ERRORS=$(expr $ERRORS + 1) + fi + flush_pg_stats || return 1 + + # 4. Item is in progress, if higher priority items waiting prempt item + ceph pg cancel-force-backfill $PG3 || return 1 + sleep 2 + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations > $dir/out || return 1 + cat $dir/out + PRIO=$(cat $dir/out | jq "(.local_reservations.queues[].items[] | select(.item == \"${PG3}\")).prio") + if [ "$PRIO" != "$degraded_prio" ]; + then + echo "After cancel-force-backfill PG ${PG3} doesn't have prio $degraded_prio" + ERRORS=$(expr $ERRORS + 1) + fi + + eval ITEM=$(cat $dir/out | jq '.local_reservations.in_progress[0].item') + if [ "$ITEM" != ${PG2} ]; + then + echo "The force-recovery PG $PG2 didn't become the in progress item" + ERRORS=$(expr $ERRORS + 1) + else + PRIO=$(cat $dir/out | jq '.local_reservations.in_progress[0].prio') + if [ "$PRIO" != $FORCE_PRIO ]; + then + echo "The first force-recovery PG ${PG2} doesn't have prio $FORCE_PRIO" + ERRORS=$(expr $ERRORS + 1) + fi + fi + + ceph pg cancel-force-backfill $PG2 || return 1 + sleep 5 + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations || return 1 + + # 2. Item is queued, re-queue and preempt because new priority higher than an in progress item + flush_pg_stats || return 1 + ceph pg force-backfill $PG3 || return 1 + sleep 2 + + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations > $dir/out || return 1 + cat $dir/out + PRIO=$(cat $dir/out | jq "(.local_reservations.queues[].items[] | select(.item == \"${PG2}\")).prio") + if [ "$PRIO" != "$degraded_prio" ]; + then + echo "After cancel-force-backfill PG ${PG2} doesn't have prio $degraded_prio" + ERRORS=$(expr $ERRORS + 1) + fi + + eval ITEM=$(cat $dir/out | jq '.local_reservations.in_progress[0].item') + if [ "$ITEM" != ${PG3} ]; + then + echo "The force-backfill PG $PG3 didn't get promoted to an in progress item" + ERRORS=$(expr $ERRORS + 1) + else + PRIO=$(cat $dir/out | jq '.local_reservations.in_progress[0].prio') + if [ "$PRIO" != $FORCE_PRIO ]; + then + echo "The force-backfill PG ${PG2} doesn't have prio $FORCE_PRIO" + ERRORS=$(expr $ERRORS + 1) + fi + fi + + ceph osd unset noout + ceph osd unset nobackfill + + wait_for_clean "CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations" || return 1 + + ceph pg dump pgs + + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_pgstate_history + + if [ $ERRORS != "0" ]; + then + echo "$ERRORS error(s) found" + else + echo TEST PASSED + fi + + delete_pool $pool1 + delete_pool $pool2 + delete_pool $pool3 + kill_daemons $dir || return 1 + return $ERRORS +} + +# +# Show that pool recovery_priority is added to the backfill priority +# +# Create 2 pools with 2 OSDs with different primarys +# pool 1 with recovery_priority 1 +# pool 2 with recovery_priority 2 +# +# Start backfill by changing the pool sizes from 1 to 2 +# Use dump_recovery_reservations to verify priorities +function TEST_backfill_pool_priority() { + local dir=$1 + local pools=3 # Don't assume the first 2 pools are exact what we want + local OSDS=2 + + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + export CEPH_ARGS + + for osd in $(seq 0 $(expr $OSDS - 1)) + do + run_osd $dir $osd || return 1 + done + + for p in $(seq 1 $pools) + do + create_pool "${poolprefix}$p" 1 1 + ceph osd pool set "${poolprefix}$p" size 2 + done + sleep 5 + + wait_for_clean || return 1 + + ceph pg dump pgs + + # Find 2 pools with different primaries which + # means the replica must be on another osd. + local PG1 + local POOLNUM1 + local pool1 + local chk_osd1_1 + local chk_osd1_2 + + local PG2 + local POOLNUM2 + local pool2 + local chk_osd2_1 + local chk_osd2_2 + + for p in $(seq 1 $pools) + do + ceph pg map ${p}.0 --format=json | jq '.acting[]' > $dir/acting + local test_osd1=$(head -1 $dir/acting) + local test_osd2=$(tail -1 $dir/acting) + if [ -z "$PG1" ]; + then + PG1="${p}.0" + POOLNUM1=$p + pool1="${poolprefix}$p" + chk_osd1_1=$test_osd1 + chk_osd1_2=$test_osd2 + elif [ $chk_osd1_1 != $test_osd1 ]; + then + PG2="${p}.0" + POOLNUM2=$p + pool2="${poolprefix}$p" + chk_osd2_1=$test_osd1 + chk_osd2_2=$test_osd2 + break + fi + done + rm -f $dir/acting + + if [ "$pool2" = "" ]; + then + echo "Failure to find appropirate PGs" + return 1 + fi + + for p in $(seq 1 $pools) + do + if [ $p != $POOLNUM1 -a $p != $POOLNUM2 ]; + then + delete_pool ${poolprefix}$p + fi + done + + pool1_extra_prio=1 + pool2_extra_prio=2 + # size 2 -> 1 means degraded by 1, so add 1 to base prio + pool1_prio=$(expr $DEGRADED_PRIO + 1 + $pool1_extra_prio) + pool2_prio=$(expr $DEGRADED_PRIO + 1 + $pool2_extra_prio) + + ceph osd pool set $pool1 size 1 + ceph osd pool set $pool1 recovery_priority $pool1_extra_prio + ceph osd pool set $pool2 size 1 + ceph osd pool set $pool2 recovery_priority $pool2_extra_prio + wait_for_clean || return 1 + + dd if=/dev/urandom of=$dir/data bs=1M count=10 + p=1 + for pname in $pool1 $pool2 + do + for i in $(seq 1 $objects) + do + rados -p ${pname} put obj${i}-p${p} $dir/data + done + p=$(expr $p + 1) + done + + local otherosd=$(get_not_primary $pool1 obj1-p1) + + ceph pg dump pgs + ERRORS=0 + + ceph osd pool set $pool1 size 2 + ceph osd pool set $pool2 size 2 + sleep 5 + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations > $dir/dump.${chk_osd1_1}.out + echo osd.${chk_osd1_1} + cat $dir/dump.${chk_osd1_1}.out + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_2}) dump_recovery_reservations > $dir/dump.${chk_osd1_2}.out + echo osd.${chk_osd1_2} + cat $dir/dump.${chk_osd1_2}.out + + # Using eval will strip double-quotes from item + eval ITEM=$(cat $dir/dump.${chk_osd1_1}.out | jq '.local_reservations.in_progress[0].item') + if [ "$ITEM" != ${PG1} ]; + then + echo "The primary PG ${PG1} didn't become the in progress item" + ERRORS=$(expr $ERRORS + 1) + else + PRIO=$(cat $dir/dump.${chk_osd1_1}.out | jq '.local_reservations.in_progress[0].prio') + if [ "$PRIO" != $pool1_prio ]; + then + echo "The primary PG ${PG1} doesn't have prio $pool1_prio" + ERRORS=$(expr $ERRORS + 1) + fi + fi + + # Using eval will strip double-quotes from item + eval ITEM=$(cat $dir/dump.${chk_osd1_2}.out | jq '.remote_reservations.in_progress[0].item') + if [ "$ITEM" != ${PG1} ]; + then + echo "The primary PG ${PG1} didn't become the in progress item on remote" + ERRORS=$(expr $ERRORS + 1) + else + PRIO=$(cat $dir/dump.${chk_osd1_2}.out | jq '.remote_reservations.in_progress[0].prio') + if [ "$PRIO" != $pool1_prio ]; + then + echo "The primary PG ${PG1} doesn't have prio $pool1_prio on remote" + ERRORS=$(expr $ERRORS + 1) + fi + fi + + # Using eval will strip double-quotes from item + eval ITEM=$(cat $dir/dump.${chk_osd2_1}.out | jq '.local_reservations.in_progress[0].item') + if [ "$ITEM" != ${PG2} ]; + then + echo "The primary PG ${PG2} didn't become the in progress item" + ERRORS=$(expr $ERRORS + 1) + else + PRIO=$(cat $dir/dump.${chk_osd2_1}.out | jq '.local_reservations.in_progress[0].prio') + if [ "$PRIO" != $pool2_prio ]; + then + echo "The primary PG ${PG2} doesn't have prio $pool2_prio" + ERRORS=$(expr $ERRORS + 1) + fi + fi + + # Using eval will strip double-quotes from item + eval ITEM=$(cat $dir/dump.${chk_osd2_2}.out | jq '.remote_reservations.in_progress[0].item') + if [ "$ITEM" != ${PG2} ]; + then + echo "The primary PG $PG2 didn't become the in progress item on remote" + ERRORS=$(expr $ERRORS + 1) + else + PRIO=$(cat $dir/dump.${chk_osd2_2}.out | jq '.remote_reservations.in_progress[0].prio') + if [ "$PRIO" != $pool2_prio ]; + then + echo "The primary PG ${PG2} doesn't have prio $pool2_prio on remote" + ERRORS=$(expr $ERRORS + 1) + fi + fi + + wait_for_clean || return 1 + + if [ $ERRORS != "0" ]; + then + echo "$ERRORS error(s) found" + else + echo TEST PASSED + fi + + delete_pool $pool1 + delete_pool $pool2 + kill_daemons $dir || return 1 + return $ERRORS +} + +main osd-backfill-prio "$@" + +# Local Variables: +# compile-command: "make -j4 && ../qa/run-standalone.sh osd-backfill-prio.sh" +# End: diff --git a/qa/standalone/osd/osd-backfill-recovery-log.sh b/qa/standalone/osd/osd-backfill-recovery-log.sh new file mode 100755 index 00000000..e55250e8 --- /dev/null +++ b/qa/standalone/osd/osd-backfill-recovery-log.sh @@ -0,0 +1,136 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2019 Red Hat +# +# Author: David Zafman +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Library Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library Public License for more details. +# + +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh + +function run() { + local dir=$1 + shift + + # Fix port???? + export CEPH_MON="127.0.0.1:7129" # git grep '\<7129\>' : there must be only one + export CEPH_ARGS + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " + CEPH_ARGS+="--mon-host=$CEPH_MON --osd_max_backfills=1 --debug_reserver=20 " + + local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} + for func in $funcs ; do + setup $dir || return 1 + $func $dir || return 1 + teardown $dir || return 1 + done +} + + +function _common_test() { + local dir=$1 + local extra_opts="$2" + local loglen="$3" + local dupslen="$4" + local objects="$5" + local moreobjects=${6:-0} + + local OSDS=6 + + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + export CEPH_ARGS + export EXTRA_OPTS=" $extra_opts" + + for osd in $(seq 0 $(expr $OSDS - 1)) + do + run_osd $dir $osd || return 1 + done + + create_pool test 1 1 + + for j in $(seq 1 $objects) + do + rados -p test put obj-${j} /etc/passwd + done + + # Mark out all OSDs for this pool + ceph osd out $(ceph pg dump pgs --format=json | jq '.pg_stats[0].up[]') + if [ "$moreobjects" != "0" ]; then + for j in $(seq 1 $moreobjects) + do + rados -p test put obj-more-${j} /etc/passwd + done + fi + sleep 1 + wait_for_clean + + newprimary=$(ceph pg dump pgs --format=json | jq '.pg_stats[0].up_primary') + kill_daemons + + ERRORS=0 + _objectstore_tool_nodown $dir $newprimary --no-mon-config --pgid 1.0 --op log | tee $dir/result.log + LOGLEN=$(jq '.pg_log_t.log | length' $dir/result.log) + if [ $LOGLEN != "$loglen" ]; then + echo "FAILED: Wrong log length got $LOGLEN (expected $loglen)" + ERRORS=$(expr $ERRORS + 1) + fi + DUPSLEN=$(jq '.pg_log_t.dups | length' $dir/result.log) + if [ $DUPSLEN != "$dupslen" ]; then + echo "FAILED: Wrong dups length got $DUPSLEN (expected $dupslen)" + ERRORS=$(expr $ERRORS + 1) + fi + grep "copy_up_to\|copy_after" $dir/osd.*.log + rm -f $dir/result.log + if [ $ERRORS != "0" ]; then + echo TEST FAILED + return 1 + fi +} + + +# Cause copy_up_to() to only partially copy logs, copy additional dups, and trim dups +function TEST_backfill_log_1() { + local dir=$1 + + _common_test $dir "--osd_min_pg_log_entries=1 --osd_max_pg_log_entries=2 --osd_pg_log_dups_tracked=10" 1 9 150 +} + + +# Cause copy_up_to() to only partially copy logs, copy additional dups +function TEST_backfill_log_2() { + local dir=$1 + + _common_test $dir "--osd_min_pg_log_entries=1 --osd_max_pg_log_entries=2" 1 149 150 +} + + +# Cause copy_after() to only copy logs, no dups +function TEST_recovery_1() { + local dir=$1 + + _common_test $dir "--osd_min_pg_log_entries=50 --osd_max_pg_log_entries=50 --osd_pg_log_dups_tracked=60 --osd_pg_log_trim_min=10" 40 0 40 +} + + +# Cause copy_after() to copy logs with dups +function TEST_recovery_2() { + local dir=$1 + + _common_test $dir "--osd_min_pg_log_entries=150 --osd_max_pg_log_entries=150 --osd_pg_log_dups_tracked=3000 --osd_pg_log_trim_min=10" 151 10 141 20 +} + +main osd-backfill-recovery-log "$@" + +# Local Variables: +# compile-command: "make -j4 && ../qa/run-standalone.sh osd-backfill-recovery-log.sh" +# End: diff --git a/qa/standalone/osd/osd-backfill-space.sh b/qa/standalone/osd/osd-backfill-space.sh new file mode 100755 index 00000000..3978668e --- /dev/null +++ b/qa/standalone/osd/osd-backfill-space.sh @@ -0,0 +1,1175 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2018 Red Hat +# +# Author: David Zafman +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Library Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library Public License for more details. +# + +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh + +function run() { + local dir=$1 + shift + + export CEPH_MON="127.0.0.1:7180" # git grep '\<7180\>' : there must be only one + export CEPH_ARGS + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " + CEPH_ARGS+="--mon-host=$CEPH_MON " + CEPH_ARGS+="--osd_min_pg_log_entries=5 --osd_max_pg_log_entries=10 " + CEPH_ARGS+="--fake_statfs_for_testing=3686400 " + CEPH_ARGS+="--osd_max_backfills=10 " + export objects=600 + export poolprefix=test + + local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} + for func in $funcs ; do + setup $dir || return 1 + $func $dir || return 1 + teardown $dir || return 1 + done +} + + +function get_num_in_state() { + local state=$1 + local expression + expression+="select(contains(\"${state}\"))" + ceph --format json pg dump pgs 2>/dev/null | \ + jq ".pg_stats | [.[] | .state | $expression] | length" +} + + +function wait_for_not_state() { + local state=$1 + local num_in_state=-1 + local cur_in_state + local -a delays=($(get_timeout_delays $2 5)) + local -i loop=0 + + flush_pg_stats || return 1 + while test $(get_num_pgs) == 0 ; do + sleep 1 + done + + while true ; do + cur_in_state=$(get_num_in_state ${state}) + test $cur_in_state = "0" && break + if test $cur_in_state != $num_in_state ; then + loop=0 + num_in_state=$cur_in_state + elif (( $loop >= ${#delays[*]} )) ; then + ceph pg dump pgs + return 1 + fi + sleep ${delays[$loop]} + loop+=1 + done + return 0 +} + + +function wait_for_not_backfilling() { + local timeout=$1 + wait_for_not_state backfilling $timeout +} + + +function wait_for_not_activating() { + local timeout=$1 + wait_for_not_state activating $timeout +} + +# All tests are created in an environment which has fake total space +# of 3600K (3686400) which can hold 600 6K replicated objects or +# 200 18K shards of erasure coded objects. For a k=3, m=2 EC pool +# we have a theoretical 54K object but with the chunk size of 4K +# and a rounding of 4K to account for the chunks is 36K max object +# which is ((36K / 3) + 4K) * 200 = 3200K which is 88% of +# 3600K for a shard. + +# Create 2 pools with size 1 +# Write enough data that only 1 pool pg can fit per osd +# Incresase the pool size to 2 +# On 3 OSDs this should result in 1 OSD with overlapping replicas, +# so both pools can't fit. We assume pgid 1.0 and 2.0 won't +# map to the same 2 OSDs. +# At least 1 pool shouldn't have room to backfill +# All other pools should go active+clean +function TEST_backfill_test_simple() { + local dir=$1 + local pools=2 + local OSDS=3 + + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + export CEPH_ARGS + + for osd in $(seq 0 $(expr $OSDS - 1)) + do + run_osd $dir $osd || return 1 + done + + ceph osd set-backfillfull-ratio .85 + + for p in $(seq 1 $pools) + do + create_pool "${poolprefix}$p" 1 1 + ceph osd pool set "${poolprefix}$p" size 1 + done + + wait_for_clean || return 1 + + # This won't work is if the 2 pools primary and only osds + # are the same. + + dd if=/dev/urandom of=$dir/datafile bs=1024 count=4 + for o in $(seq 1 $objects) + do + for p in $(seq 1 $pools) + do + rados -p "${poolprefix}$p" put obj$o $dir/datafile + done + done + + ceph pg dump pgs + + for p in $(seq 1 $pools) + do + ceph osd pool set "${poolprefix}$p" size 2 + done + sleep 30 + + wait_for_not_backfilling 240 || return 1 + wait_for_not_activating 60 || return 1 + + ERRORS=0 + if [ "$(ceph pg dump pgs | grep +backfill_toofull | wc -l)" != "1" ]; + then + echo "One pool should have been in backfill_toofull" + ERRORS="$(expr $ERRORS + 1)" + fi + + expected="$(expr $pools - 1)" + if [ "$(ceph pg dump pgs | grep active+clean | wc -l)" != "$expected" ]; + then + echo "$expected didn't finish backfill" + ERRORS="$(expr $ERRORS + 1)" + fi + + ceph pg dump pgs + + if [ $ERRORS != "0" ]; + then + return 1 + fi + + for i in $(seq 1 $pools) + do + delete_pool "${poolprefix}$i" + done + kill_daemons $dir || return 1 + ! grep -q "num_bytes mismatch" $dir/osd.*.log || return 1 +} + + +# Create 8 pools of size 1 on 20 OSDs +# Write 4K * 600 objects (only 1 pool pg can fit on any given osd) +# Increase pool size to 2 +# At least 1 pool shouldn't have room to backfill +# All other pools should go active+clean +function TEST_backfill_test_multi() { + local dir=$1 + local pools=8 + local OSDS=20 + + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + export CEPH_ARGS + + for osd in $(seq 0 $(expr $OSDS - 1)) + do + run_osd $dir $osd || return 1 + done + + ceph osd set-backfillfull-ratio .85 + + for p in $(seq 1 $pools) + do + create_pool "${poolprefix}$p" 1 1 + ceph osd pool set "${poolprefix}$p" size 1 + done + + wait_for_clean || return 1 + + dd if=/dev/urandom of=$dir/datafile bs=1024 count=4 + for o in $(seq 1 $objects) + do + for p in $(seq 1 $pools) + do + rados -p "${poolprefix}$p" put obj$o $dir/datafile + done + done + + ceph pg dump pgs + + for p in $(seq 1 $pools) + do + ceph osd pool set "${poolprefix}$p" size 2 + done + sleep 30 + + wait_for_not_backfilling 240 || return 1 + wait_for_not_activating 60 || return 1 + + ERRORS=0 + full="$(ceph pg dump pgs | grep +backfill_toofull | wc -l)" + if [ "$full" -lt "1" ]; + then + echo "At least one pool should have been in backfill_toofull" + ERRORS="$(expr $ERRORS + 1)" + fi + + expected="$(expr $pools - $full)" + if [ "$(ceph pg dump pgs | grep active+clean | wc -l)" != "$expected" ]; + then + echo "$expected didn't finish backfill" + ERRORS="$(expr $ERRORS + 1)" + fi + + ceph pg dump pgs + ceph status + + ceph status --format=json-pretty > $dir/stat.json + + eval SEV=$(jq '.health.checks.PG_BACKFILL_FULL.severity' $dir/stat.json) + if [ "$SEV" != "HEALTH_WARN" ]; then + echo "PG_BACKFILL_FULL severity $SEV not HEALTH_WARN" + ERRORS="$(expr $ERRORS + 1)" + fi + eval MSG=$(jq '.health.checks.PG_BACKFILL_FULL.summary.message' $dir/stat.json) + if [ "$MSG" != "Low space hindering backfill (add storage if this doesn't resolve itself): 4 pgs backfill_toofull" ]; then + echo "PG_BACKFILL_FULL message '$MSG' mismatched" + ERRORS="$(expr $ERRORS + 1)" + fi + rm -f $dir/stat.json + + if [ $ERRORS != "0" ]; + then + return 1 + fi + + for i in $(seq 1 $pools) + do + delete_pool "${poolprefix}$i" + done + # Work around for http://tracker.ceph.com/issues/38195 + kill_daemons $dir #|| return 1 + ! grep -q "num_bytes mismatch" $dir/osd.*.log || return 1 +} + + +# To make sure that when 2 pg try to backfill at the same time to +# the same target. This might be covered by the simple test above +# but this makes sure we get it. +# +# Create 10 pools of size 2 and identify 2 that have the same +# non-primary osd. +# Delete all other pools +# Set size to 1 and write 4K * 600 to each pool +# Set size back to 2 +# The 2 pools should race to backfill. +# One pool goes active+clean +# The other goes acitve+...+backfill_toofull +function TEST_backfill_test_sametarget() { + local dir=$1 + local pools=10 + local OSDS=5 + + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + export CEPH_ARGS + + for osd in $(seq 0 $(expr $OSDS - 1)) + do + run_osd $dir $osd || return 1 + done + + ceph osd set-backfillfull-ratio .85 + + for p in $(seq 1 $pools) + do + create_pool "${poolprefix}$p" 1 1 + ceph osd pool set "${poolprefix}$p" size 2 + done + sleep 5 + + wait_for_clean || return 1 + + ceph pg dump pgs + + # Find 2 pools with a pg that distinct primaries but second + # replica on the same osd. + local PG1 + local POOLNUM1 + local pool1 + local chk_osd1 + local chk_osd2 + + local PG2 + local POOLNUM2 + local pool2 + for p in $(seq 1 $pools) + do + ceph pg map ${p}.0 --format=json | jq '.acting[]' > $dir/acting + local test_osd1=$(head -1 $dir/acting) + local test_osd2=$(tail -1 $dir/acting) + if [ $p = "1" ]; + then + PG1="${p}.0" + POOLNUM1=$p + pool1="${poolprefix}$p" + chk_osd1=$test_osd1 + chk_osd2=$test_osd2 + elif [ $chk_osd1 != $test_osd1 -a $chk_osd2 = $test_osd2 ]; + then + PG2="${p}.0" + POOLNUM2=$p + pool2="${poolprefix}$p" + break + fi + done + rm -f $dir/acting + + if [ "$pool2" = "" ]; + then + echo "Failure to find appropirate PGs" + return 1 + fi + + for p in $(seq 1 $pools) + do + if [ $p != $POOLNUM1 -a $p != $POOLNUM2 ]; + then + delete_pool ${poolprefix}$p + fi + done + + ceph osd pool set $pool1 size 1 + ceph osd pool set $pool2 size 1 + + wait_for_clean || return 1 + + dd if=/dev/urandom of=$dir/datafile bs=1024 count=4 + for i in $(seq 1 $objects) + do + rados -p $pool1 put obj$i $dir/datafile + rados -p $pool2 put obj$i $dir/datafile + done + + ceph osd pool set $pool1 size 2 + ceph osd pool set $pool2 size 2 + sleep 30 + + wait_for_not_backfilling 240 || return 1 + wait_for_not_activating 60 || return 1 + + ERRORS=0 + if [ "$(ceph pg dump pgs | grep +backfill_toofull | wc -l)" != "1" ]; + then + echo "One pool should have been in backfill_toofull" + ERRORS="$(expr $ERRORS + 1)" + fi + + if [ "$(ceph pg dump pgs | grep active+clean | wc -l)" != "1" ]; + then + echo "One didn't finish backfill" + ERRORS="$(expr $ERRORS + 1)" + fi + + ceph pg dump pgs + + if [ $ERRORS != "0" ]; + then + return 1 + fi + + delete_pool $pool1 + delete_pool $pool2 + kill_daemons $dir || return 1 + ! grep -q "num_bytes mismatch" $dir/osd.*.log || return 1 +} + +# 2 pools can't both backfill to a target which has other data +# 1 of the pools has objects that increase from 1024 to 2611 bytes +# +# Write to fill pool which is size 1 +# Take fill pool osd down (other 2 pools must go to the remaining OSDs +# Save an export of data on fill OSD and restart it +# Write an intial 1K to pool1 which has pg 2.0 +# Export 2.0 from non-fillpool OSD don't wait for it to start-up +# Take down fillpool OSD +# Put 1K object version of 2.0 on fillpool OSD +# Put back fillpool data on fillpool OSD +# With fillpool down write 2611 byte objects +# Take down $osd and bring back $fillosd simultaneously +# Wait for backfilling +# One PG will be able to backfill its remaining data +# One PG must get backfill_toofull +function TEST_backfill_multi_partial() { + local dir=$1 + local EC=$2 + local pools=2 + local OSDS=3 + + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + export CEPH_ARGS + + for osd in $(seq 0 $(expr $OSDS - 1)) + do + run_osd $dir $osd || return 1 + done + + ceph osd set-backfillfull-ratio .85 + + ceph osd set-require-min-compat-client luminous + create_pool fillpool 1 1 + ceph osd pool set fillpool size 1 + for p in $(seq 1 $pools) + do + create_pool "${poolprefix}$p" 1 1 + ceph osd pool set "${poolprefix}$p" size 2 + done + + wait_for_clean || return 1 + + # Partially fill an osd + # We have room for 600 6K replicated objects, if we create 2611 byte objects + # there is 3600K - (2611 * 600) = 2070K, so the fill pool and one + # replica from the other 2 is 85% of 3600K + + dd if=/dev/urandom of=$dir/datafile bs=2611 count=1 + for o in $(seq 1 $objects) + do + rados -p fillpool put obj-fill-${o} $dir/datafile + done + + local fillosd=$(get_primary fillpool obj-fill-1) + osd=$(expr $fillosd + 1) + if [ "$osd" = "$OSDS" ]; then + osd="0" + fi + + kill_daemon $dir/osd.$fillosd.pid TERM + ceph osd out osd.$fillosd + + _objectstore_tool_nodown $dir $fillosd --op export-remove --pgid 1.0 --file $dir/fillexport.out || return 1 + activate_osd $dir $fillosd || return 1 + + ceph pg dump pgs + + dd if=/dev/urandom of=$dir/datafile bs=1024 count=1 + for o in $(seq 1 $objects) + do + rados -p "${poolprefix}1" put obj-1-${o} $dir/datafile + done + + ceph pg dump pgs + # The $osd OSD is started, but we don't wait so we can kill $fillosd at the same time + _objectstore_tool_nowait $dir $osd --op export --pgid 2.0 --file $dir/export.out + kill_daemon $dir/osd.$fillosd.pid TERM + _objectstore_tool_nodown $dir $fillosd --force --op remove --pgid 2.0 + _objectstore_tool_nodown $dir $fillosd --op import --pgid 2.0 --file $dir/export.out || return 1 + _objectstore_tool_nodown $dir $fillosd --op import --pgid 1.0 --file $dir/fillexport.out || return 1 + ceph pg dump pgs + sleep 20 + ceph pg dump pgs + + # re-write everything + dd if=/dev/urandom of=$dir/datafile bs=2611 count=1 + for o in $(seq 1 $objects) + do + for p in $(seq 1 $pools) + do + rados -p "${poolprefix}$p" put obj-${p}-${o} $dir/datafile + done + done + + kill_daemon $dir/osd.$osd.pid TERM + ceph osd out osd.$osd + + activate_osd $dir $fillosd || return 1 + ceph osd in osd.$fillosd + sleep 30 + + wait_for_not_backfilling 240 || return 1 + wait_for_not_activating 60 || return 1 + + flush_pg_stats || return 1 + ceph pg dump pgs + + ERRORS=0 + if [ "$(get_num_in_state backfill_toofull)" != "1" ]; + then + echo "One PG should be in backfill_toofull" + ERRORS="$(expr $ERRORS + 1)" + fi + + if [ "$(get_num_in_state active+clean)" != "2" ]; + then + echo "Two PGs should be active+clean after one PG completed backfill" + ERRORS="$(expr $ERRORS + 1)" + fi + + if [ $ERRORS != "0" ]; + then + return 1 + fi + + delete_pool fillpool + for i in $(seq 1 $pools) + do + delete_pool "${poolprefix}$i" + done + kill_daemons $dir || return 1 + ! grep -q "num_bytes mismatch" $dir/osd.*.log || return 1 +} + +# Make sure that the amount of bytes already on the replica doesn't +# cause an out of space condition +# +# Create 1 pool and write 4K * 600 objects +# Remove 25% (150) of the objects with one OSD down (noout set) +# Increase the size of the remaining 75% (450) of the objects to 6K +# Bring back down OSD +# The pool should go active+clean +function TEST_backfill_grow() { + local dir=$1 + local poolname="test" + local OSDS=3 + + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + + for osd in $(seq 0 $(expr $OSDS - 1)) + do + run_osd $dir $osd || return 1 + done + + ceph osd set-backfillfull-ratio .85 + + create_pool $poolname 1 1 + ceph osd pool set $poolname size 3 + sleep 5 + + wait_for_clean || return 1 + + dd if=/dev/urandom of=${dir}/4kdata bs=1k count=4 + for i in $(seq 1 $objects) + do + rados -p $poolname put obj$i $dir/4kdata + done + + local PG=$(get_pg $poolname obj1) + # Remember primary during the backfill + local primary=$(get_primary $poolname obj1) + local otherosd=$(get_not_primary $poolname obj1) + + ceph osd set noout + kill_daemons $dir TERM $otherosd || return 1 + + rmobjects=$(expr $objects / 4) + for i in $(seq 1 $rmobjects) + do + rados -p $poolname rm obj$i + done + + dd if=/dev/urandom of=${dir}/6kdata bs=6k count=1 + for i in $(seq $(expr $rmobjects + 1) $objects) + do + rados -p $poolname put obj$i $dir/6kdata + done + + activate_osd $dir $otherosd || return 1 + + ceph tell osd.$primary debug kick_recovery_wq 0 + + sleep 2 + + wait_for_clean || return 1 + + delete_pool $poolname + kill_daemons $dir || return 1 + ! grep -q "num_bytes mismatch" $dir/osd.*.log || return 1 +} + +# Create a 5 shard EC pool on 6 OSD cluster +# Fill 1 OSD with 2600K of data take that osd down. +# Write the EC pool on 5 OSDs +# Take down 1 (must contain an EC shard) +# Bring up OSD with fill data +# Not enought room to backfill to partially full OSD +function TEST_ec_backfill_simple() { + local dir=$1 + local EC=$2 + local pools=1 + local OSDS=6 + local k=3 + local m=2 + local ecobjects=$(expr $objects / $k) + + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + export CEPH_ARGS + + for osd in $(seq 0 $(expr $OSDS - 1)) + do + run_osd $dir $osd || return 1 + done + + ceph osd set-backfillfull-ratio .85 + create_pool fillpool 1 1 + ceph osd pool set fillpool size 1 + + # Partially fill an osd + # We have room for 200 18K replicated objects, if we create 13K objects + # there is only 3600K - (13K * 200) = 1000K which won't hold + # a k=3 shard below ((18K / 3) + 4K) * 200 = 2000K + # Actual usage per shard is 8K * 200 = 1600K because 18K/3 is 6K which + # rounds to 8K. The 2000K is the ceiling on the 18K * 200 = 3600K logical + # bytes in the pool. + dd if=/dev/urandom of=$dir/datafile bs=1024 count=13 + for o in $(seq 1 $ecobjects) + do + rados -p fillpool put obj$o $dir/datafile + done + + local fillosd=$(get_primary fillpool obj1) + osd=$(expr $fillosd + 1) + if [ "$osd" = "$OSDS" ]; then + osd="0" + fi + + sleep 5 + kill_daemon $dir/osd.$fillosd.pid TERM + ceph osd out osd.$fillosd + sleep 2 + ceph osd erasure-code-profile set ec-profile k=$k m=$m crush-failure-domain=osd technique=reed_sol_van plugin=jerasure || return 1 + + for p in $(seq 1 $pools) + do + ceph osd pool create "${poolprefix}$p" 1 1 erasure ec-profile + done + + # Can't wait for clean here because we created a stale pg + #wait_for_clean || return 1 + sleep 5 + + ceph pg dump pgs + + dd if=/dev/urandom of=$dir/datafile bs=1024 count=18 + for o in $(seq 1 $ecobjects) + do + for p in $(seq 1 $pools) + do + rados -p "${poolprefix}$p" put obj$o $dir/datafile + done + done + + kill_daemon $dir/osd.$osd.pid TERM + ceph osd out osd.$osd + + activate_osd $dir $fillosd || return 1 + ceph osd in osd.$fillosd + sleep 30 + + ceph pg dump pgs + + wait_for_not_backfilling 240 || return 1 + wait_for_not_activating 60 || return 1 + + ceph pg dump pgs + + ERRORS=0 + if [ "$(ceph pg dump pgs | grep -v "^1.0" | grep +backfill_toofull | wc -l)" != "1" ]; then + echo "One pool should have been in backfill_toofull" + ERRORS="$(expr $ERRORS + 1)" + fi + + if [ $ERRORS != "0" ]; + then + return 1 + fi + + delete_pool fillpool + for i in $(seq 1 $pools) + do + delete_pool "${poolprefix}$i" + done + kill_daemons $dir || return 1 +} + +function osdlist() { + local OSDS=$1 + local excludeosd=$2 + + osds="" + for osd in $(seq 0 $(expr $OSDS - 1)) + do + if [ $osd = $excludeosd ]; + then + continue + fi + if [ -n "$osds" ]; then + osds="${osds} " + fi + osds="${osds}${osd}" + done + echo $osds +} + +# Create a pool with size 1 and fill with data so that only 1 EC shard can fit. +# Write data to 2 EC pools mapped to the same OSDs (excluding filled one) +# Remap the last OSD to partially full OSD on both pools +# The 2 pools should race to backfill. +# One pool goes active+clean +# The other goes acitve+...+backfill_toofull +function TEST_ec_backfill_multi() { + local dir=$1 + local EC=$2 + local pools=2 + local OSDS=6 + local k=3 + local m=2 + local ecobjects=$(expr $objects / $k) + + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + export CEPH_ARGS + + for osd in $(seq 0 $(expr $OSDS - 1)) + do + run_osd $dir $osd || return 1 + done + + # This test requires that shards from 2 different pools + # fit on a given OSD, but both will not fix. I'm using + # making the fillosd plus 1 shard use 75% of the space, + # leaving not enough to be under the 85% set here. + ceph osd set-backfillfull-ratio .85 + + ceph osd set-require-min-compat-client luminous + create_pool fillpool 1 1 + ceph osd pool set fillpool size 1 + + # Partially fill an osd + # We have room for 200 18K replicated objects, if we create 9K objects + # there is only 3600K - (9K * 200) = 1800K which will only hold + # one k=3 shard below ((12K / 3) + 4K) * 200 = 1600K + # The actual data will be (12K / 3) * 200 = 800K because the extra + # is the reservation padding for chunking. + dd if=/dev/urandom of=$dir/datafile bs=1024 count=9 + for o in $(seq 1 $ecobjects) + do + rados -p fillpool put obj$o $dir/datafile + done + + local fillosd=$(get_primary fillpool obj1) + ceph osd erasure-code-profile set ec-profile k=3 m=2 crush-failure-domain=osd technique=reed_sol_van plugin=jerasure || return 1 + + nonfillosds="$(osdlist $OSDS $fillosd)" + + for p in $(seq 1 $pools) + do + ceph osd pool create "${poolprefix}$p" 1 1 erasure ec-profile + ceph osd pg-upmap "$(expr $p + 1).0" $nonfillosds + done + + # Can't wait for clean here because we created a stale pg + #wait_for_clean || return 1 + sleep 15 + + ceph pg dump pgs + + dd if=/dev/urandom of=$dir/datafile bs=1024 count=12 + for o in $(seq 1 $ecobjects) + do + for p in $(seq 1 $pools) + do + rados -p "${poolprefix}$p" put obj$o-$p $dir/datafile + done + done + + ceph pg dump pgs + + for p in $(seq 1 $pools) + do + ceph osd pg-upmap $(expr $p + 1).0 ${nonfillosds% *} $fillosd + done + + sleep 30 + + wait_for_not_backfilling 240 || return 1 + wait_for_not_activating 60 || return 1 + + ceph pg dump pgs + + ERRORS=0 + if [ "$(ceph pg dump pgs | grep -v "^1.0" | grep +backfill_toofull | wc -l)" != "1" ]; + then + echo "One pool should have been in backfill_toofull" + ERRORS="$(expr $ERRORS + 1)" + fi + + if [ "$(ceph pg dump pgs | grep -v "^1.0" | grep active+clean | wc -l)" != "1" ]; + then + echo "One didn't finish backfill" + ERRORS="$(expr $ERRORS + 1)" + fi + + if [ $ERRORS != "0" ]; + then + return 1 + fi + + delete_pool fillpool + for i in $(seq 1 $pools) + do + delete_pool "${poolprefix}$i" + done + kill_daemons $dir || return 1 +} + +# Similar to TEST_ec_backfill_multi but one of the ec pools +# already had some data on the target OSD + +# Create a pool with size 1 and fill with data so that only 1 EC shard can fit. +# Write a small amount of data to 1 EC pool that still includes the filled one +# Take down fillosd with noout set +# Write data to 2 EC pools mapped to the same OSDs (excluding filled one) +# Remap the last OSD to partially full OSD on both pools +# The 2 pools should race to backfill. +# One pool goes active+clean +# The other goes acitve+...+backfill_toofull +function SKIP_TEST_ec_backfill_multi_partial() { + local dir=$1 + local EC=$2 + local pools=2 + local OSDS=5 + local k=3 + local m=2 + local ecobjects=$(expr $objects / $k) + local lastosd=$(expr $OSDS - 1) + + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + export CEPH_ARGS + + for osd in $(seq 0 $(expr $OSDS - 1)) + do + run_osd $dir $osd || return 1 + done + + # This test requires that shards from 2 different pools + # fit on a given OSD, but both will not fix. I'm using + # making the fillosd plus 1 shard use 75% of the space, + # leaving not enough to be under the 85% set here. + ceph osd set-backfillfull-ratio .85 + + ceph osd set-require-min-compat-client luminous + create_pool fillpool 1 1 + ceph osd pool set fillpool size 1 + # last osd + ceph osd pg-upmap 1.0 $lastosd + + # Partially fill an osd + # We have room for 200 18K replicated objects, if we create 9K objects + # there is only 3600K - (9K * 200) = 1800K which will only hold + # one k=3 shard below ((12K / 3) + 4K) * 200 = 1600K + # The actual data will be (12K / 3) * 200 = 800K because the extra + # is the reservation padding for chunking. + dd if=/dev/urandom of=$dir/datafile bs=1024 count=9 + for o in $(seq 1 $ecobjects) + do + rados -p fillpool put obj$o $dir/datafile + done + + local fillosd=$(get_primary fillpool obj1) + ceph osd erasure-code-profile set ec-profile k=3 m=2 crush-failure-domain=osd technique=reed_sol_van plugin=jerasure || return 1 + + nonfillosds="$(osdlist $OSDS $fillosd)" + + for p in $(seq 1 $pools) + do + ceph osd pool create "${poolprefix}$p" 1 1 erasure ec-profile + ceph osd pg-upmap "$(expr $p + 1).0" $(seq 0 $lastosd) + done + + # Can't wait for clean here because we created a stale pg + #wait_for_clean || return 1 + sleep 15 + + ceph pg dump pgs + + dd if=/dev/urandom of=$dir/datafile bs=1024 count=1 + for o in $(seq 1 $ecobjects) + do + rados -p "${poolprefix}1" put obj$o-1 $dir/datafile + done + + for p in $(seq 1 $pools) + do + ceph osd pg-upmap "$(expr $p + 1).0" $(seq 0 $(expr $lastosd - 1)) + done + ceph pg dump pgs + + #ceph osd set noout + #kill_daemons $dir TERM osd.$lastosd || return 1 + + dd if=/dev/urandom of=$dir/datafile bs=1024 count=12 + for o in $(seq 1 $ecobjects) + do + for p in $(seq 1 $pools) + do + rados -p "${poolprefix}$p" put obj$o-$p $dir/datafile + done + done + + ceph pg dump pgs + + # Now backfill lastosd by adding back into the upmap + for p in $(seq 1 $pools) + do + ceph osd pg-upmap "$(expr $p + 1).0" $(seq 0 $lastosd) + done + #activate_osd $dir $lastosd || return 1 + #ceph tell osd.0 debug kick_recovery_wq 0 + + sleep 30 + ceph pg dump pgs + + wait_for_not_backfilling 240 || return 1 + wait_for_not_activating 60 || return 1 + + ceph pg dump pgs + + ERRORS=0 + if [ "$(ceph pg dump pgs | grep -v "^1.0" | grep +backfill_toofull | wc -l)" != "1" ]; + then + echo "One pool should have been in backfill_toofull" + ERRORS="$(expr $ERRORS + 1)" + fi + + if [ "$(ceph pg dump pgs | grep -v "^1.0" | grep active+clean | wc -l)" != "1" ]; + then + echo "One didn't finish backfill" + ERRORS="$(expr $ERRORS + 1)" + fi + + if [ $ERRORS != "0" ]; + then + return 1 + fi + + delete_pool fillpool + for i in $(seq 1 $pools) + do + delete_pool "${poolprefix}$i" + done + kill_daemons $dir || return 1 +} + +function SKIP_TEST_ec_backfill_multi_partial() { + local dir=$1 + local EC=$2 + local pools=2 + local OSDS=6 + + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + export CEPH_ARGS + + for osd in $(seq 0 $(expr $OSDS - 1)) + do + run_osd $dir $osd || return 1 + done + + # Below we need to fit 3200K in 3600K which is 88% + # so set to 90% + ceph osd set-backfillfull-ratio .90 + + ceph osd set-require-min-compat-client luminous + create_pool fillpool 1 1 + ceph osd pool set fillpool size 1 + + # Partially fill an osd + # We have room for 200 48K ec objects, if we create 4k replicated objects + # there is 3600K - (4K * 200) = 2800K which won't hold 2 k=3 shard + # of 200 12K objects which takes ((12K / 3) + 4K) * 200 = 1600K each. + # On the other OSDs 2 * 1600K = 3200K which is 88% of 3600K. + dd if=/dev/urandom of=$dir/datafile bs=1024 count=4 + for o in $(seq 1 $objects) + do + rados -p fillpool put obj$o $dir/datafile + done + + local fillosd=$(get_primary fillpool obj1) + osd=$(expr $fillosd + 1) + if [ "$osd" = "$OSDS" ]; then + osd="0" + fi + + sleep 5 + kill_daemon $dir/osd.$fillosd.pid TERM + ceph osd out osd.$fillosd + sleep 2 + ceph osd erasure-code-profile set ec-profile k=3 m=2 crush-failure-domain=osd technique=reed_sol_van plugin=jerasure || return 1 + + for p in $(seq 1 $pools) + do + ceph osd pool create "${poolprefix}$p" 1 1 erasure ec-profile + done + + # Can't wait for clean here because we created a stale pg + #wait_for_clean || return 1 + sleep 5 + + ceph pg dump pgs + + dd if=/dev/urandom of=$dir/datafile bs=1024 count=12 + for o in $(seq 1 $objects) + do + for p in $(seq 1 $pools) + do + rados -p "${poolprefix}$p" put obj$o $dir/datafile + done + done + + #ceph pg map 2.0 --format=json | jq '.' + kill_daemon $dir/osd.$osd.pid TERM + ceph osd out osd.$osd + + _objectstore_tool_nodown $dir $osd --op export --pgid 2.0 --file $dir/export.out + _objectstore_tool_nodown $dir $fillosd --op import --pgid 2.0 --file $dir/export.out + + activate_osd $dir $fillosd || return 1 + ceph osd in osd.$fillosd + sleep 30 + + wait_for_not_backfilling 240 || return 1 + wait_for_not_activating 60 || return 1 + + ERRORS=0 + if [ "$(ceph pg dump pgs | grep -v "^1.0" | grep +backfill_toofull | wc -l)" != "1" ]; + then + echo "One pool should have been in backfill_toofull" + ERRORS="$(expr $ERRORS + 1)" + fi + + if [ "$(ceph pg dump pgs | grep -v "^1.0" | grep active+clean | wc -l)" != "1" ]; + then + echo "One didn't finish backfill" + ERRORS="$(expr $ERRORS + 1)" + fi + + ceph pg dump pgs + + if [ $ERRORS != "0" ]; + then + return 1 + fi + + delete_pool fillpool + for i in $(seq 1 $pools) + do + delete_pool "${poolprefix}$i" + done + kill_daemons $dir || return 1 +} + +# Create 1 EC pool +# Write 200 12K objects ((12K / 3) + 4K) *200) = 1600K +# Take 1 shard's OSD down (with noout set) +# Remove 50 objects ((12K / 3) + 4k) * 50) = 400K +# Write 150 36K objects (grow 150 objects) 2400K +# But there is already 1600K usage so backfill +# would be too full if it didn't account for existing data +# Bring back down OSD so it must backfill +# It should go active+clean taking into account data already there +function TEST_ec_backfill_grow() { + local dir=$1 + local poolname="test" + local OSDS=6 + local k=3 + local m=2 + local ecobjects=$(expr $objects / $k) + + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + + for osd in $(seq 0 $(expr $OSDS - 1)) + do + run_osd $dir $osd || return 1 + done + + ceph osd set-backfillfull-ratio .85 + + ceph osd set-require-min-compat-client luminous + ceph osd erasure-code-profile set ec-profile k=$k m=$m crush-failure-domain=osd technique=reed_sol_van plugin=jerasure || return 1 + ceph osd pool create $poolname 1 1 erasure ec-profile + + wait_for_clean || return 1 + + dd if=/dev/urandom of=${dir}/12kdata bs=1k count=12 + for i in $(seq 1 $ecobjects) + do + rados -p $poolname put obj$i $dir/12kdata + done + + local PG=$(get_pg $poolname obj1) + # Remember primary during the backfill + local primary=$(get_primary $poolname obj1) + local otherosd=$(get_not_primary $poolname obj1) + + ceph osd set noout + kill_daemons $dir TERM $otherosd || return 1 + + rmobjects=$(expr $ecobjects / 4) + for i in $(seq 1 $rmobjects) + do + rados -p $poolname rm obj$i + done + + dd if=/dev/urandom of=${dir}/36kdata bs=1k count=36 + for i in $(seq $(expr $rmobjects + 1) $ecobjects) + do + rados -p $poolname put obj$i $dir/36kdata + done + + activate_osd $dir $otherosd || return 1 + + ceph tell osd.$primary debug kick_recovery_wq 0 + + sleep 2 + + wait_for_clean || return 1 + + delete_pool $poolname + kill_daemons $dir || return 1 +} + +main osd-backfill-space "$@" + +# Local Variables: +# compile-command: "make -j4 && ../qa/run-standalone.sh osd-backfill-space.sh" +# End: diff --git a/qa/standalone/osd/osd-backfill-stats.sh b/qa/standalone/osd/osd-backfill-stats.sh new file mode 100755 index 00000000..104533e7 --- /dev/null +++ b/qa/standalone/osd/osd-backfill-stats.sh @@ -0,0 +1,753 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2017 Red Hat +# +# Author: David Zafman +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Library Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library Public License for more details. +# + +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh + +function run() { + local dir=$1 + shift + + # Fix port???? + export CEPH_MON="127.0.0.1:7114" # git grep '\<7114\>' : there must be only one + export CEPH_ARGS + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " + CEPH_ARGS+="--mon-host=$CEPH_MON " + CEPH_ARGS+="--osd_min_pg_log_entries=5 --osd_max_pg_log_entries=10 " + export margin=10 + export objects=200 + export poolname=test + + local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} + for func in $funcs ; do + setup $dir || return 1 + $func $dir || return 1 + teardown $dir || return 1 + done +} + +function below_margin() { + local -i check=$1 + shift + local -i target=$1 + + return $(( $check <= $target && $check >= $target - $margin ? 0 : 1 )) +} + +function above_margin() { + local -i check=$1 + shift + local -i target=$1 + + return $(( $check >= $target && $check <= $target + $margin ? 0 : 1 )) +} + +FIND_UPACT='grep "pg[[]${PG}.*backfilling.*_update_calc_stats " $log | tail -1 | sed "s/.*[)] \([[][^ p]*\).*$/\1/"' +FIND_FIRST='grep "pg[[]${PG}.*backfilling.*_update_calc_stats $which " $log | grep -F " ${UPACT}${addp}" | grep -v est | head -1 | sed "s/.* \([0-9]*\)$/\1/"' +FIND_LAST='grep "pg[[]${PG}.*backfilling.*_update_calc_stats $which " $log | tail -1 | sed "s/.* \([0-9]*\)$/\1/"' + +function check() { + local dir=$1 + local PG=$2 + local primary=$3 + local type=$4 + local degraded_start=$5 + local degraded_end=$6 + local misplaced_start=$7 + local misplaced_end=$8 + local primary_start=${9:-} + local primary_end=${10:-} + local check_setup=${11:-true} + + local log=$(grep -l +backfilling $dir/osd.$primary.log) + if [ $check_setup = "true" ]; + then + local alllogs=$(grep -l +backfilling $dir/osd.*.log) + if [ "$(echo "$alllogs" | wc -w)" != "1" ]; + then + echo "Test setup failure, a single OSD should have performed backfill" + return 1 + fi + fi + + local addp=" " + if [ "$type" = "erasure" ]; + then + addp="p" + fi + + UPACT=$(eval $FIND_UPACT) + [ -n "$UPACT" ] || return 1 + + # Check 3rd line at start because of false recovery starts + local which="degraded" + FIRST=$(eval $FIND_FIRST) + [ -n "$FIRST" ] || return 1 + below_margin $FIRST $degraded_start || return 1 + LAST=$(eval $FIND_LAST) + [ -n "$LAST" ] || return 1 + above_margin $LAST $degraded_end || return 1 + + # Check 3rd line at start because of false recovery starts + which="misplaced" + FIRST=$(eval $FIND_FIRST) + [ -n "$FIRST" ] || return 1 + below_margin $FIRST $misplaced_start || return 1 + LAST=$(eval $FIND_LAST) + [ -n "$LAST" ] || return 1 + above_margin $LAST $misplaced_end || return 1 + + # This is the value of set into MISSING_ON_PRIMARY + if [ -n "$primary_start" ]; + then + which="shard $primary" + FIRST=$(eval $FIND_FIRST) + [ -n "$FIRST" ] || return 1 + below_margin $FIRST $primary_start || return 1 + LAST=$(eval $FIND_LAST) + [ -n "$LAST" ] || return 1 + above_margin $LAST $primary_end || return 1 + fi +} + +# [1] -> [1, 0, 2] +# degraded 1000 -> 0 +# state: active+undersized+degraded+remapped+backfilling + +# PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP +# 1.0 500 0 1000 0 0 0 100 100 active+undersized+degraded+remapped+backfilling 2017-10-27 09:44:23.531466 22'500 26:617 [1,0,2] 1 [1] 1 0'0 2017-10-27 09:43:44.654882 0'0 2017-10-27 09:43:44.654882 +function TEST_backfill_sizeup() { + local dir=$1 + + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + export CEPH_ARGS + run_osd $dir 0 || return 1 + run_osd $dir 1 || return 1 + run_osd $dir 2 || return 1 + run_osd $dir 3 || return 1 + run_osd $dir 4 || return 1 + run_osd $dir 5 || return 1 + + create_pool $poolname 1 1 + ceph osd pool set $poolname size 1 + + wait_for_clean || return 1 + + for i in $(seq 1 $objects) + do + rados -p $poolname put obj$i /dev/null + done + + ceph osd pool set $poolname size 3 + sleep 15 + + wait_for_clean || return 1 + + local primary=$(get_primary $poolname obj1) + local PG=$(get_pg $poolname obj1) + + local degraded=$(expr $objects \* 2) + check $dir $PG $primary replicated $degraded 0 0 0 || return 1 + + delete_pool $poolname + kill_daemons $dir || return 1 +} + + + +# [1] -> [0, 2, 4] +# degraded 1000 -> 0 +# misplaced 500 -> 0 +# state: active+undersized+degraded+remapped+backfilling + +# PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP +# 1.0 500 0 1000 500 0 0 100 100 active+undersized+degraded+remapped+backfilling 2017-10-27 09:48:53.326849 22'500 26:603 [0,2,4] 0 [1] 1 0'0 2017-10-27 09:48:13.236253 0'0 2017-10-27 09:48:13.236253 +function TEST_backfill_sizeup_out() { + local dir=$1 + + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + run_osd $dir 1 || return 1 + run_osd $dir 2 || return 1 + run_osd $dir 3 || return 1 + run_osd $dir 4 || return 1 + run_osd $dir 5 || return 1 + + create_pool $poolname 1 1 + ceph osd pool set $poolname size 1 + + wait_for_clean || return 1 + + for i in $(seq 1 $objects) + do + rados -p $poolname put obj$i /dev/null + done + + local PG=$(get_pg $poolname obj1) + # Remember primary during the backfill + local primary=$(get_primary $poolname obj1) + + ceph osd out osd.$primary + ceph osd pool set $poolname size 3 + sleep 15 + + wait_for_clean || return 1 + + local degraded=$(expr $objects \* 2) + check $dir $PG $primary replicated $degraded 0 $objects 0 || return 1 + + delete_pool $poolname + kill_daemons $dir || return 1 +} + + +# [1 0] -> [1,2]/[1,0] +# misplaced 500 -> 0 +# state: active+remapped+backfilling + +# PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP +# 1.0 500 0 0 500 0 0 100 100 active+remapped+backfilling 2017-10-27 09:51:18.800517 22'500 25:570 [1,2] 1 [1,0] 1 0'0 2017-10-27 09:50:40.441274 0'0 2017-10-27 09:50:40.441274 +function TEST_backfill_out() { + local dir=$1 + + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + run_osd $dir 1 || return 1 + run_osd $dir 2 || return 1 + run_osd $dir 3 || return 1 + run_osd $dir 4 || return 1 + run_osd $dir 5 || return 1 + + create_pool $poolname 1 1 + ceph osd pool set $poolname size 2 + sleep 5 + + wait_for_clean || return 1 + + for i in $(seq 1 $objects) + do + rados -p $poolname put obj$i /dev/null + done + + local PG=$(get_pg $poolname obj1) + # Remember primary during the backfill + local primary=$(get_primary $poolname obj1) + + ceph osd out osd.$(get_not_primary $poolname obj1) + sleep 15 + + wait_for_clean || return 1 + + check $dir $PG $primary replicated 0 0 $objects 0 || return 1 + + delete_pool $poolname + kill_daemons $dir || return 1 +} + + +# [0, 1] -> [0, 2]/[0] +# osd 1 down/out +# degraded 500 -> 0 +# state: active+undersized+degraded+remapped+backfilling + +# PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP +# 1.0 500 0 500 0 0 0 100 100 active+undersized+degraded+remapped+backfilling 2017-10-27 09:53:24.051091 22'500 27:719 [0,2] 0 [0] 0 0'0 2017-10-27 09:52:43.188368 0'0 2017-10-27 09:52:43.188368 +function TEST_backfill_down_out() { + local dir=$1 + + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + run_osd $dir 1 || return 1 + run_osd $dir 2 || return 1 + run_osd $dir 3 || return 1 + run_osd $dir 4 || return 1 + run_osd $dir 5 || return 1 + + create_pool $poolname 1 1 + ceph osd pool set $poolname size 2 + sleep 5 + + wait_for_clean || return 1 + + for i in $(seq 1 $objects) + do + rados -p $poolname put obj$i /dev/null + done + + local PG=$(get_pg $poolname obj1) + # Remember primary during the backfill + local primary=$(get_primary $poolname obj1) + local otherosd=$(get_not_primary $poolname obj1) + + kill $(cat $dir/osd.${otherosd}.pid) + ceph osd down osd.${otherosd} + ceph osd out osd.${otherosd} + sleep 15 + + wait_for_clean || return 1 + + check $dir $PG $primary replicated $objects 0 0 0 || return 1 + + delete_pool $poolname + kill_daemons $dir || return 1 +} + + +# [1, 0] -> [2, 3, 4] +# degraded 500 -> 0 +# misplaced 1000 -> 0 +# state: active+undersized+degraded+remapped+backfilling + +# PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP +# 1.0 500 0 500 1000 0 0 100 100 active+undersized+degraded+remapped+backfilling 2017-10-27 09:55:50.375722 23'500 27:553 [2,4,3] 2 [1,0] 1 0'0 2017-10-27 09:55:10.230919 0'0 2017-10-27 09:55:10.230919 +function TEST_backfill_out2() { + local dir=$1 + + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + run_osd $dir 1 || return 1 + run_osd $dir 2 || return 1 + run_osd $dir 3 || return 1 + run_osd $dir 4 || return 1 + run_osd $dir 5 || return 1 + + create_pool $poolname 1 1 + ceph osd pool set $poolname size 2 + sleep 5 + + wait_for_clean || return 1 + + for i in $(seq 1 $objects) + do + rados -p $poolname put obj$i /dev/null + done + + local PG=$(get_pg $poolname obj1) + # Remember primary during the backfill + local primary=$(get_primary $poolname obj1) + local otherosd=$(get_not_primary $poolname obj1) + + ceph osd set nobackfill + ceph osd pool set $poolname size 3 + ceph osd out osd.${otherosd} + ceph osd out osd.${primary} + # Primary might change before backfill starts + sleep 2 + primary=$(get_primary $poolname obj1) + ceph osd unset nobackfill + ceph tell osd.$primary get_latest_osdmap + ceph tell osd.$primary debug kick_recovery_wq 0 + sleep 2 + + wait_for_clean || return 1 + + local misplaced=$(expr $objects \* 2) + + check $dir $PG $primary replicated $objects 0 $misplaced 0 || return 1 + + delete_pool $poolname + kill_daemons $dir || return 1 +} + + +# [0,1] -> [2,4,3]/[0,1] +# degraded 1000 -> 0 +# misplaced 1000 -> 500 +# state ends at active+clean+remapped [2,4,3]/[2,4,3,0] +# PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP +# 1.0 500 0 1000 1000 0 0 100 100 active+undersized+degraded+remapped+backfilling 2017-10-30 18:21:45.995149 19'500 23:1817 [2,4,3] 2 [0,1] 0 0'0 2017-10-30 18:21:05.109904 0'0 2017-10-30 18:21:05.109904 +# ENDS: +# PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP +# 1.0 500 0 0 500 0 0 5 5 active+clean+remapped 2017-10-30 18:22:42.293730 19'500 25:2557 [2,4,3] 2 [2,4,3,0] 2 0'0 2017-10-30 18:21:05.109904 0'0 2017-10-30 18:21:05.109904 +function TEST_backfill_sizeup4_allout() { + local dir=$1 + + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + run_osd $dir 1 || return 1 + run_osd $dir 2 || return 1 + run_osd $dir 3 || return 1 + run_osd $dir 4 || return 1 + + create_pool $poolname 1 1 + ceph osd pool set $poolname size 2 + + wait_for_clean || return 1 + + for i in $(seq 1 $objects) + do + rados -p $poolname put obj$i /dev/null + done + + local PG=$(get_pg $poolname obj1) + # Remember primary during the backfill + local primary=$(get_primary $poolname obj1) + local otherosd=$(get_not_primary $poolname obj1) + + ceph osd set nobackfill + ceph osd out osd.$otherosd + ceph osd out osd.$primary + ceph osd pool set $poolname size 4 + # Primary might change before backfill starts + sleep 2 + primary=$(get_primary $poolname obj1) + ceph osd unset nobackfill + ceph tell osd.$primary get_latest_osdmap + ceph tell osd.$primary debug kick_recovery_wq 0 + sleep 2 + + wait_for_clean || return 1 + + local misdeg=$(expr $objects \* 2) + check $dir $PG $primary replicated $misdeg 0 $misdeg $objects || return 1 + + delete_pool $poolname + kill_daemons $dir || return 1 +} + + +# [1,2,0] -> [3]/[1,2] +# misplaced 1000 -> 500 +# state ends at active+clean+remapped [3]/[3,1] +# PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP +# 1.0 500 0 0 1000 0 0 100 100 active+remapped+backfilling 2017-11-28 19:13:56.092439 21'500 31:790 [3] 3 [1,2] 1 0'0 2017-11-28 19:13:28.698661 0'0 2017-11-28 19:13:28.698661 +function TEST_backfill_remapped() { + local dir=$1 + + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + run_osd $dir 1 || return 1 + run_osd $dir 2 || return 1 + run_osd $dir 3 || return 1 + + create_pool $poolname 1 1 + ceph osd pool set $poolname size 3 + sleep 5 + + wait_for_clean || return 1 + + for i in $(seq 1 $objects) + do + rados -p $poolname put obj$i /dev/null + done + + local PG=$(get_pg $poolname obj1) + # Remember primary during the backfill + local primary=$(get_primary $poolname obj1) + local otherosd=$(get_not_primary $poolname obj1) + + ceph osd set nobackfill + ceph osd out osd.${otherosd} + for i in $(get_osds $poolname obj1) + do + if [ $i = $primary -o $i = $otherosd ]; + then + continue + fi + ceph osd out osd.$i + break + done + ceph osd out osd.${primary} + ceph osd pool set $poolname size 2 + sleep 2 + + # primary may change due to invalidating the old pg_temp, which was [1,2,0], + # but up_primary (3) chooses [0,1] for acting. + primary=$(get_primary $poolname obj1) + + ceph osd unset nobackfill + ceph tell osd.$primary get_latest_osdmap + ceph tell osd.$primary debug kick_recovery_wq 0 + + sleep 2 + + wait_for_clean || return 1 + + local misplaced=$(expr $objects \* 2) + + check $dir $PG $primary replicated 0 0 $misplaced $objects "" "" false || return 1 + + delete_pool $poolname + kill_daemons $dir || return 1 +} + +# [1,0,2] -> [4,3,NONE]/[1,0,2] +# misplaced 1500 -> 500 +# state ends at active+clean+remapped [4,3,NONE]/[4,3,2] + +# PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP +# 1.0 500 0 0 1500 0 0 100 100 active+degraded+remapped+backfilling 2017-10-31 16:53:39.467126 19'500 23:615 [4,3,NONE] 4 [1,0,2] 1 0'0 2017-10-31 16:52:59.624429 0'0 2017-10-31 16:52:59.624429 + + +# ENDS: + +# PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP +# 1.0 500 0 0 500 0 0 5 5 active+clean+remapped 2017-10-31 16:48:34.414040 19'500 25:2049 [4,3,NONE] 4 [4,3,2] 4 0'0 2017-10-31 16:46:58.203440 0'0 2017-10-31 16:46:58.203440 +function TEST_backfill_ec_all_out() { + local dir=$1 + + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + run_osd $dir 1 || return 1 + run_osd $dir 2 || return 1 + run_osd $dir 3 || return 1 + run_osd $dir 4 || return 1 + + ceph osd erasure-code-profile set myprofile plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd + create_pool $poolname 1 1 erasure myprofile + + wait_for_clean || return 1 + + for i in $(seq 1 $objects) + do + rados -p $poolname put obj$i /dev/null + done + + local PG=$(get_pg $poolname obj1) + # Remember primary during the backfill + local primary=$(get_primary $poolname obj1) + + ceph osd set nobackfill + for o in $(get_osds $poolname obj1) + do + ceph osd out osd.$o + done + # Primary might change before backfill starts + sleep 2 + primary=$(get_primary $poolname obj1) + ceph osd unset nobackfill + ceph tell osd.$primary get_latest_osdmap + ceph tell osd.$primary debug kick_recovery_wq 0 + sleep 2 + + wait_for_clean || return 1 + + local misplaced=$(expr $objects \* 3) + check $dir $PG $primary erasure 0 0 $misplaced $objects || return 1 + + delete_pool $poolname + kill_daemons $dir || return 1 +} + + +# [1,0,2] -> [4, 0, 2] +# misplaced 500 -> 0 +# active+remapped+backfilling +# +# PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP +# 1.0 500 0 0 500 0 0 100 100 active+remapped+backfilling 2017-11-08 18:05:39.036420 24'500 27:742 [4,0,2] 4 [1,0,2] 1 0'0 2017-11-08 18:04:58.697315 0'0 2017-11-08 18:04:58.697315 +function TEST_backfill_ec_prim_out() { + local dir=$1 + + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + run_osd $dir 1 || return 1 + run_osd $dir 2 || return 1 + run_osd $dir 3 || return 1 + run_osd $dir 4 || return 1 + + ceph osd erasure-code-profile set myprofile plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd + create_pool $poolname 1 1 erasure myprofile + + wait_for_clean || return 1 + + for i in $(seq 1 $objects) + do + rados -p $poolname put obj$i /dev/null + done + + local PG=$(get_pg $poolname obj1) + # Remember primary during the backfill + local primary=$(get_primary $poolname obj1) + + ceph osd set nobackfill + ceph osd out osd.$primary + # Primary might change before backfill starts + sleep 2 + primary=$(get_primary $poolname obj1) + ceph osd unset nobackfill + ceph tell osd.$primary get_latest_osdmap + ceph tell osd.$primary debug kick_recovery_wq 0 + sleep 2 + + wait_for_clean || return 1 + + local misplaced=$(expr $objects \* 3) + check $dir $PG $primary erasure 0 0 $objects 0 || return 1 + + delete_pool $poolname + kill_daemons $dir || return 1 +} + +# [1,0] -> [1,2] +# degraded 500 -> 0 +# misplaced 1000 -> 0 +# +# PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP +# 1.0 500 0 500 1000 0 0 100 100 active+undersized+degraded+remapped+backfilling 2017-11-06 14:02:29.439105 24'500 29:1020 [4,3,5] 4 [1,NONE,2] 1 0'0 2017-11-06 14:01:46.509963 0'0 2017-11-06 14:01:46.509963 +function TEST_backfill_ec_down_all_out() { + local dir=$1 + + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + run_osd $dir 1 || return 1 + run_osd $dir 2 || return 1 + run_osd $dir 3 || return 1 + run_osd $dir 4 || return 1 + run_osd $dir 5 || return 1 + + ceph osd erasure-code-profile set myprofile plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd + create_pool $poolname 1 1 erasure myprofile + ceph osd pool set $poolname min_size 2 + + wait_for_clean || return 1 + + for i in $(seq 1 $objects) + do + rados -p $poolname put obj$i /dev/null + done + + local PG=$(get_pg $poolname obj1) + # Remember primary during the backfill + local primary=$(get_primary $poolname obj1) + local otherosd=$(get_not_primary $poolname obj1) + local allosds=$(get_osds $poolname obj1) + + ceph osd set nobackfill + kill $(cat $dir/osd.${otherosd}.pid) + ceph osd down osd.${otherosd} + for o in $allosds + do + ceph osd out osd.$o + done + # Primary might change before backfill starts + sleep 2 + primary=$(get_primary $poolname obj1) + ceph osd unset nobackfill + ceph tell osd.$primary get_latest_osdmap + ceph tell osd.$primary debug kick_recovery_wq 0 + sleep 2 + flush_pg_stats + + # Wait for recovery to finish + # Can't use wait_for_clean() because state goes from active+undersized+degraded+remapped+backfilling + # to active+undersized+remapped + while(true) + do + if test "$(ceph --format json pg dump pgs | + jq '.pg_stats | [.[] | .state | select(. == "incomplete")] | length')" -ne "0" + then + sleep 2 + continue + fi + break + done + ceph pg dump pgs + for i in $(seq 1 60) + do + if ceph pg dump pgs | grep ^$PG | grep -qv backfilling + then + break + fi + if [ $i = "60" ]; + then + echo "Timeout waiting for recovery to finish" + return 1 + fi + sleep 1 + done + + ceph pg dump pgs + + local misplaced=$(expr $objects \* 2) + check $dir $PG $primary erasure $objects 0 $misplaced 0 || return 1 + + delete_pool $poolname + kill_daemons $dir || return 1 +} + + +# [1,0,2] -> [1,3,2] +# degraded 500 -> 0 +# active+backfilling+degraded +# +# PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP +# 1.0 500 0 500 0 0 0 100 100 active+undersized+degraded+remapped+backfilling 2017-11-06 13:57:25.412322 22'500 28:794 [1,3,2] 1 [1,NONE,2] 1 0'0 2017-11-06 13:54:58.033906 0'0 2017-11-06 13:54:58.033906 +function TEST_backfill_ec_down_out() { + local dir=$1 + + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + run_osd $dir 1 || return 1 + run_osd $dir 2 || return 1 + run_osd $dir 3 || return 1 + run_osd $dir 4 || return 1 + run_osd $dir 5 || return 1 + + ceph osd erasure-code-profile set myprofile plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd + create_pool $poolname 1 1 erasure myprofile + ceph osd pool set $poolname min_size 2 + + wait_for_clean || return 1 + + for i in $(seq 1 $objects) + do + rados -p $poolname put obj$i /dev/null + done + + local PG=$(get_pg $poolname obj1) + # Remember primary during the backfill + local primary=$(get_primary $poolname obj1) + local otherosd=$(get_not_primary $poolname obj1) + + ceph osd set nobackfill + kill $(cat $dir/osd.${otherosd}.pid) + ceph osd down osd.${otherosd} + ceph osd out osd.${otherosd} + # Primary might change before backfill starts + sleep 2 + primary=$(get_primary $poolname obj1) + ceph osd unset nobackfill + ceph tell osd.$primary get_latest_osdmap + ceph tell osd.$primary debug kick_recovery_wq 0 + sleep 2 + + wait_for_clean || return 1 + + local misplaced=$(expr $objects \* 2) + check $dir $PG $primary erasure $objects 0 0 0 || return 1 + + delete_pool $poolname + kill_daemons $dir || return 1 +} + + +main osd-backfill-stats "$@" + +# Local Variables: +# compile-command: "make -j4 && ../qa/run-standalone.sh osd-backfill-stats.sh" +# End: diff --git a/qa/standalone/osd/osd-bench.sh b/qa/standalone/osd/osd-bench.sh new file mode 100755 index 00000000..5bcbe377 --- /dev/null +++ b/qa/standalone/osd/osd-bench.sh @@ -0,0 +1,96 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2014 Cloudwatt +# Copyright (C) 2014, 2015 Red Hat +# +# Author: Loic Dachary +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Library Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library Public License for more details. +# + +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh + +function run() { + local dir=$1 + shift + + export CEPH_MON="127.0.0.1:7106" # git grep '\<7106\>' : there must be only one + export CEPH_ARGS + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " + CEPH_ARGS+="--mon-host=$CEPH_MON " + + local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} + for func in $funcs ; do + setup $dir || return 1 + $func $dir || return 1 + teardown $dir || return 1 + done +} + +function TEST_bench() { + local dir=$1 + + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + + local osd_bench_small_size_max_iops=$(CEPH_ARGS='' ceph-conf \ + --show-config-value osd_bench_small_size_max_iops) + local osd_bench_large_size_max_throughput=$(CEPH_ARGS='' ceph-conf \ + --show-config-value osd_bench_large_size_max_throughput) + local osd_bench_max_block_size=$(CEPH_ARGS='' ceph-conf \ + --show-config-value osd_bench_max_block_size) + local osd_bench_duration=$(CEPH_ARGS='' ceph-conf \ + --show-config-value osd_bench_duration) + + # + # block size too high + # + expect_failure $dir osd_bench_max_block_size \ + ceph tell osd.0 bench 1024 $((osd_bench_max_block_size + 1)) || return 1 + + # + # count too high for small (< 1MB) block sizes + # + local bsize=1024 + local max_count=$(($bsize * $osd_bench_duration * $osd_bench_small_size_max_iops)) + expect_failure $dir bench_small_size_max_iops \ + ceph tell osd.0 bench $(($max_count + 1)) $bsize || return 1 + + # + # count too high for large (>= 1MB) block sizes + # + local bsize=$((1024 * 1024 + 1)) + local max_count=$(($osd_bench_large_size_max_throughput * $osd_bench_duration)) + expect_failure $dir osd_bench_large_size_max_throughput \ + ceph tell osd.0 bench $(($max_count + 1)) $bsize || return 1 + + # + # default values should work + # + ceph tell osd.0 bench || return 1 + + # + # test object_size < block_size + ceph tell osd.0 bench 10 14456 4444 3 + # + + # + # test object_size < block_size & object_size = 0(default value) + # + ceph tell osd.0 bench 1 14456 +} + +main osd-bench "$@" + +# Local Variables: +# compile-command: "cd ../.. ; make -j4 && test/osd/osd-bench.sh" +# End: diff --git a/qa/standalone/osd/osd-bluefs-volume-ops.sh b/qa/standalone/osd/osd-bluefs-volume-ops.sh new file mode 100755 index 00000000..1c9c5cf2 --- /dev/null +++ b/qa/standalone/osd/osd-bluefs-volume-ops.sh @@ -0,0 +1,346 @@ +#!/usr/bin/env bash + +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh + +[ `uname` = FreeBSD ] && exit 0 + +function run() { + local dir=$1 + shift + + export CEPH_MON="127.0.0.1:7146" # git grep '\<7146\>' : there must be only one + export CEPH_ARGS + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " + CEPH_ARGS+="--mon-host=$CEPH_MON " + CEPH_ARGS+="--bluestore_block_size=2147483648 " + CEPH_ARGS+="--bluestore_block_db_create=true " + CEPH_ARGS+="--bluestore_block_db_size=1073741824 " + CEPH_ARGS+="--bluestore_block_wal_size=536870912 " + CEPH_ARGS+="--bluestore_bluefs_min=536870912 " + CEPH_ARGS+="--bluestore_bluefs_min_free=536870912 " + CEPH_ARGS+="--bluestore_block_wal_create=true " + CEPH_ARGS+="--bluestore_fsck_on_mount=true " + local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} + for func in $funcs ; do + setup $dir || return 1 + $func $dir || return 1 + teardown $dir || return 1 + done +} + +function TEST_bluestore() { + local dir=$1 + + local flimit=$(ulimit -n) + if [ $flimit -lt 1536 ]; then + echo "Low open file limit ($flimit), test may fail. Increase to 1536 or higher and retry if that happens." + fi + + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + osd_pid0=$(cat $dir/osd.0.pid) + run_osd $dir 1 || return 1 + osd_pid1=$(cat $dir/osd.1.pid) + run_osd $dir 2 || return 1 + osd_pid2=$(cat $dir/osd.2.pid) + run_osd $dir 3 || return 1 + osd_pid3=$(cat $dir/osd.3.pid) + + sleep 5 + + create_pool foo 16 + + # write some objects + timeout 60 rados bench -p foo 30 write -b 4096 --no-cleanup #|| return 1 + + echo "after bench" + + # kill + while kill $osd_pid0; do sleep 1 ; done + ceph osd down 0 + while kill $osd_pid1; do sleep 1 ; done + ceph osd down 1 + while kill $osd_pid2; do sleep 1 ; done + ceph osd down 2 + while kill $osd_pid3; do sleep 1 ; done + ceph osd down 3 + + # expand slow devices + ceph-bluestore-tool --path $dir/0 fsck || return 1 + ceph-bluestore-tool --path $dir/1 fsck || return 1 + ceph-bluestore-tool --path $dir/2 fsck || return 1 + ceph-bluestore-tool --path $dir/3 fsck || return 1 + + truncate $dir/0/block -s 4294967296 # 4GB + ceph-bluestore-tool --path $dir/0 bluefs-bdev-expand || return 1 + truncate $dir/1/block -s 4311744512 # 4GB + 16MB + ceph-bluestore-tool --path $dir/1 bluefs-bdev-expand || return 1 + truncate $dir/2/block -s 4295099392 # 4GB + 129KB + ceph-bluestore-tool --path $dir/2 bluefs-bdev-expand || return 1 + truncate $dir/3/block -s 4293918720 # 4GB - 1MB + ceph-bluestore-tool --path $dir/3 bluefs-bdev-expand || return 1 + + # slow, DB, WAL -> slow, DB + ceph-bluestore-tool --path $dir/0 fsck || return 1 + ceph-bluestore-tool --path $dir/1 fsck || return 1 + ceph-bluestore-tool --path $dir/2 fsck || return 1 + ceph-bluestore-tool --path $dir/3 fsck || return 1 + + ceph-bluestore-tool --path $dir/0 bluefs-bdev-sizes + + ceph-bluestore-tool --path $dir/0 \ + --devs-source $dir/0/block.wal \ + --dev-target $dir/0/block.db \ + --command bluefs-bdev-migrate || return 1 + + ceph-bluestore-tool --path $dir/0 fsck || return 1 + + # slow, DB, WAL -> slow, WAL + ceph-bluestore-tool --path $dir/1 \ + --devs-source $dir/1/block.db \ + --dev-target $dir/1/block \ + --command bluefs-bdev-migrate || return 1 + + ceph-bluestore-tool --path $dir/1 fsck || return 1 + + # slow, DB, WAL -> slow + ceph-bluestore-tool --path $dir/2 \ + --devs-source $dir/2/block.wal \ + --devs-source $dir/2/block.db \ + --dev-target $dir/2/block \ + --command bluefs-bdev-migrate || return 1 + + ceph-bluestore-tool --path $dir/2 fsck || return 1 + + # slow, DB, WAL -> slow, WAL (negative case) + ceph-bluestore-tool --path $dir/3 \ + --devs-source $dir/3/block.db \ + --dev-target $dir/3/block.wal \ + --command bluefs-bdev-migrate + + # Migration to WAL is unsupported + if [ $? -eq 0 ]; then + return 1 + fi + ceph-bluestore-tool --path $dir/3 fsck || return 1 + + # slow, DB, WAL -> slow, DB (WAL to slow then slow to DB) + ceph-bluestore-tool --path $dir/3 \ + --devs-source $dir/3/block.wal \ + --dev-target $dir/3/block \ + --command bluefs-bdev-migrate || return 1 + + ceph-bluestore-tool --path $dir/3 fsck || return 1 + + ceph-bluestore-tool --path $dir/3 \ + --devs-source $dir/3/block \ + --dev-target $dir/3/block.db \ + --command bluefs-bdev-migrate || return 1 + + ceph-bluestore-tool --path $dir/3 fsck || return 1 + + activate_osd $dir 0 || return 1 + osd_pid0=$(cat $dir/osd.0.pid) + activate_osd $dir 1 || return 1 + osd_pid1=$(cat $dir/osd.1.pid) + activate_osd $dir 2 || return 1 + osd_pid2=$(cat $dir/osd.2.pid) + activate_osd $dir 3 || return 1 + osd_pid3=$(cat $dir/osd.3.pid) + + wait_for_clean || return 1 + + # write some objects + timeout 60 rados bench -p foo 30 write -b 4096 --no-cleanup #|| return 1 + + # kill + while kill $osd_pid0; do sleep 1 ; done + ceph osd down 0 + while kill $osd_pid1; do sleep 1 ; done + ceph osd down 1 + while kill $osd_pid2; do sleep 1 ; done + ceph osd down 2 + while kill $osd_pid3; do sleep 1 ; done + ceph osd down 3 + + # slow, DB -> slow, DB, WAL + ceph-bluestore-tool --path $dir/0 fsck || return 1 + + dd if=/dev/zero of=$dir/0/wal count=512 bs=1M + ceph-bluestore-tool --path $dir/0 \ + --dev-target $dir/0/wal \ + --command bluefs-bdev-new-wal || return 1 + + ceph-bluestore-tool --path $dir/0 fsck || return 1 + + # slow, WAL -> slow, DB, WAL + ceph-bluestore-tool --path $dir/1 fsck || return 1 + + dd if=/dev/zero of=$dir/1/db count=1024 bs=1M + ceph-bluestore-tool --path $dir/1 \ + --dev-target $dir/1/db \ + --command bluefs-bdev-new-db || return 1 + + ceph-bluestore-tool --path $dir/1 \ + --devs-source $dir/1/block \ + --dev-target $dir/1/block.db \ + --command bluefs-bdev-migrate || return 1 + + ceph-bluestore-tool --path $dir/1 fsck || return 1 + + # slow -> slow, DB, WAL + ceph-bluestore-tool --path $dir/2 fsck || return 1 + + ceph-bluestore-tool --path $dir/2 \ + --command bluefs-bdev-new-db || return 1 + + ceph-bluestore-tool --path $dir/2 \ + --command bluefs-bdev-new-wal || return 1 + + ceph-bluestore-tool --path $dir/2 \ + --devs-source $dir/2/block \ + --dev-target $dir/2/block.db \ + --command bluefs-bdev-migrate || return 1 + + ceph-bluestore-tool --path $dir/2 fsck || return 1 + + # slow, DB -> slow, WAL + ceph-bluestore-tool --path $dir/3 fsck || return 1 + + ceph-bluestore-tool --path $dir/3 \ + --command bluefs-bdev-new-wal || return 1 + + ceph-bluestore-tool --path $dir/3 \ + --devs-source $dir/3/block.db \ + --dev-target $dir/3/block \ + --command bluefs-bdev-migrate || return 1 + + ceph-bluestore-tool --path $dir/3 fsck || return 1 + + activate_osd $dir 0 || return 1 + osd_pid0=$(cat $dir/osd.0.pid) + activate_osd $dir 1 || return 1 + osd_pid1=$(cat $dir/osd.1.pid) + activate_osd $dir 2 || return 1 + osd_pid2=$(cat $dir/osd.2.pid) + activate_osd $dir 3 || return 1 + osd_pid3=$(cat $dir/osd.3.pid) + + # write some objects + timeout 60 rados bench -p foo 30 write -b 4096 --no-cleanup #|| return 1 + + # kill + while kill $osd_pid0; do sleep 1 ; done + ceph osd down 0 + while kill $osd_pid1; do sleep 1 ; done + ceph osd down 1 + while kill $osd_pid2; do sleep 1 ; done + ceph osd down 2 + while kill $osd_pid3; do sleep 1 ; done + ceph osd down 3 + + # slow, DB1, WAL -> slow, DB2, WAL + ceph-bluestore-tool --path $dir/0 fsck || return 1 + + dd if=/dev/zero of=$dir/0/db2 count=1024 bs=1M + ceph-bluestore-tool --path $dir/0 \ + --devs-source $dir/0/block.db \ + --dev-target $dir/0/db2 \ + --command bluefs-bdev-migrate || return 1 + + ceph-bluestore-tool --path $dir/0 fsck || return 1 + + # slow, DB, WAL1 -> slow, DB, WAL2 + + dd if=/dev/zero of=$dir/0/wal2 count=512 bs=1M + ceph-bluestore-tool --path $dir/0 \ + --devs-source $dir/0/block.wal \ + --dev-target $dir/0/wal2 \ + --command bluefs-bdev-migrate || return 1 + rm -rf $dir/0/wal + + ceph-bluestore-tool --path $dir/0 fsck || return 1 + + # slow, DB + WAL -> slow, DB2 -> slow + ceph-bluestore-tool --path $dir/1 fsck || return 1 + + dd if=/dev/zero of=$dir/1/db2 count=1024 bs=1M + ceph-bluestore-tool --path $dir/1 \ + --devs-source $dir/1/block.db \ + --devs-source $dir/1/block.wal \ + --dev-target $dir/1/db2 \ + --command bluefs-bdev-migrate || return 1 + + rm -rf $dir/1/db + + ceph-bluestore-tool --path $dir/1 fsck || return 1 + + ceph-bluestore-tool --path $dir/1 \ + --devs-source $dir/1/block.db \ + --dev-target $dir/1/block \ + --command bluefs-bdev-migrate || return 1 + + rm -rf $dir/1/db2 + + ceph-bluestore-tool --path $dir/1 fsck || return 1 + + # slow -> slow, DB (negative case) + ceph-objectstore-tool --type bluestore --data-path $dir/2 \ + --op fsck --no-mon-config || return 1 + + dd if=/dev/zero of=$dir/2/db2 count=1024 bs=1M + ceph-bluestore-tool --path $dir/2 \ + --devs-source $dir/2/block \ + --dev-target $dir/2/db2 \ + --command bluefs-bdev-migrate + + # Migration from slow-only to new device is unsupported + if [ $? -eq 0 ]; then + return 1 + fi + ceph-bluestore-tool --path $dir/2 fsck || return 1 + + # slow + DB + WAL -> slow, DB2 + dd if=/dev/zero of=$dir/2/db2 count=1024 bs=1M + + ceph-bluestore-tool --path $dir/2 \ + --devs-source $dir/2/block \ + --devs-source $dir/2/block.db \ + --devs-source $dir/2/block.wal \ + --dev-target $dir/2/db2 \ + --command bluefs-bdev-migrate || return 1 + + ceph-bluestore-tool --path $dir/2 fsck || return 1 + + # slow + WAL -> slow2, WAL2 + dd if=/dev/zero of=$dir/3/wal2 count=1024 bs=1M + + ceph-bluestore-tool --path $dir/3 \ + --devs-source $dir/3/block \ + --devs-source $dir/3/block.wal \ + --dev-target $dir/3/wal2 \ + --command bluefs-bdev-migrate || return 1 + + ceph-bluestore-tool --path $dir/3 fsck || return 1 + + activate_osd $dir 0 || return 1 + osd_pid0=$(cat $dir/osd.0.pid) + activate_osd $dir 1 || return 1 + osd_pid1=$(cat $dir/osd.1.pid) + activate_osd $dir 2 || return 1 + osd_pid2=$(cat $dir/osd.2.pid) + activate_osd $dir 3 || return 1 + osd_pid3=$(cat $dir/osd.3.pid) + + # write some objects + timeout 60 rados bench -p foo 30 write -b 4096 --no-cleanup #|| return 1 + + wait_for_clean || return 1 +} + +main osd-bluefs-volume-ops "$@" + +# Local Variables: +# compile-command: "cd ../.. ; make -j4 && test/osd/osd-bluefs-volume-ops.sh" +# End: diff --git a/qa/standalone/osd/osd-config.sh b/qa/standalone/osd/osd-config.sh new file mode 100755 index 00000000..126c2f7d --- /dev/null +++ b/qa/standalone/osd/osd-config.sh @@ -0,0 +1,97 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2014 Cloudwatt +# Copyright (C) 2014, 2015 Red Hat +# +# Author: Loic Dachary +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Library Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library Public License for more details. +# + +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh + +function run() { + local dir=$1 + shift + + export CEPH_MON="127.0.0.1:7100" # git grep '\<7100\>' : there must be only one + export CEPH_ARGS + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " + CEPH_ARGS+="--mon-host=$CEPH_MON " + + local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} + for func in $funcs ; do + setup $dir || return 1 + $func $dir || return 1 + teardown $dir || return 1 + done +} + +function TEST_config_init() { + local dir=$1 + + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + local stale=1000 + local cache=500 + run_osd $dir 0 \ + --osd-map-cache-size=$cache \ + --osd-pg-epoch-persisted-max-stale=$stale \ + || return 1 + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.0) log flush || return 1 + grep 'is not > osd_pg_epoch_persisted_max_stale' $dir/osd.0.log || return 1 +} + +function TEST_config_track() { + local dir=$1 + + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + + local osd_map_cache_size=$(CEPH_ARGS='' ceph-conf \ + --show-config-value osd_map_cache_size) + local osd_pg_epoch_persisted_max_stale=$(CEPH_ARGS='' ceph-conf \ + --show-config-value osd_pg_epoch_persisted_max_stale) + + # + # increase the osd_pg_epoch_persisted_max_stale above the default cache_size + # + ! grep 'is not > osd_pg_epoch_persisted_max_stale' $dir/osd.0.log || return 1 + local stale=$(($osd_map_cache_size * 2)) + ceph tell osd.0 injectargs "--osd-pg-epoch-persisted-max-stale $stale" || return 1 + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.0) log flush || return 1 + grep 'is not > osd_pg_epoch_persisted_max_stale' $dir/osd.0.log || return 1 + rm $dir/osd.0.log + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.0) log reopen || return 1 +} + +function TEST_default_adjustment() { + a=$(ceph-osd --no-mon-config --show-config-value rgw_torrent_origin) + b=$(ceph-osd --no-mon-config --show-config-value rgw_torrent_origin --default-rgw-torrent-origin default) + c=$(ceph-osd --no-mon-config --show-config-value rgw_torrent_origin --default-rgw-torrent-origin arg) + [ "$a" != "default" ] || return 1 + [ "$b" = "default" ] || return 1 + [ "$c" = "arg" ] || return 1 + + a=$(ceph-osd --no-mon-config --show-config-value log_to_file) + b=$(ceph-osd --no-mon-config --show-config-value log_to_file --default-log-to-file=false) + c=$(ceph-osd --no-mon-config --show-config-value log_to_file --default-log-to-file=false --log-to-file) + [ "$a" = "true" ] || return 1 + [ "$b" = "false" ] || return 1 + [ "$c" = "true" ] || return 1 +} + +main osd-config "$@" + +# Local Variables: +# compile-command: "cd ../.. ; make -j4 && test/osd/osd-config.sh" +# End: diff --git a/qa/standalone/osd/osd-copy-from.sh b/qa/standalone/osd/osd-copy-from.sh new file mode 100755 index 00000000..8ac0ab54 --- /dev/null +++ b/qa/standalone/osd/osd-copy-from.sh @@ -0,0 +1,68 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2014 Cloudwatt +# Copyright (C) 2014, 2015 Red Hat +# +# Author: Loic Dachary +# Author: Sage Weil +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Library Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library Public License for more details. +# + +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh + +function run() { + local dir=$1 + shift + + export CEPH_MON="127.0.0.1:7111" # git grep '\<7111\>' : there must be only one + export CEPH_ARGS + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " + CEPH_ARGS+="--mon-host=$CEPH_MON " + + local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} + for func in $funcs ; do + setup $dir || return 1 + $func $dir || return 1 + teardown $dir || return 1 + done +} + +function TEST_copy_from() { + local dir=$1 + + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + run_osd $dir 1 || return 1 + create_rbd_pool || return 1 + + # success + rados -p rbd put foo $(which rados) + rados -p rbd cp foo foo2 + rados -p rbd stat foo2 + + # failure + ceph tell osd.\* injectargs -- --osd-debug-inject-copyfrom-error + ! rados -p rbd cp foo foo3 + ! rados -p rbd stat foo3 + + # success again + ceph tell osd.\* injectargs -- --no-osd-debug-inject-copyfrom-error + ! rados -p rbd cp foo foo3 + rados -p rbd stat foo3 +} + +main osd-copy-from "$@" + +# Local Variables: +# compile-command: "cd ../.. ; make -j4 && test/osd/osd-bench.sh" +# End: diff --git a/qa/standalone/osd/osd-dup.sh b/qa/standalone/osd/osd-dup.sh new file mode 100755 index 00000000..fdb2649c --- /dev/null +++ b/qa/standalone/osd/osd-dup.sh @@ -0,0 +1,83 @@ +#!/usr/bin/env bash + +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh + +[ `uname` = FreeBSD ] && exit 0 + +function run() { + local dir=$1 + shift + + export CEPH_MON="127.0.0.1:7146" # git grep '\<7146\>' : there must be only one + export CEPH_ARGS + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " + CEPH_ARGS+="--mon-host=$CEPH_MON " + # avoid running out of fds in rados bench + CEPH_ARGS+="--filestore_wbthrottle_xfs_ios_hard_limit=900 " + CEPH_ARGS+="--filestore_wbthrottle_btrfs_ios_hard_limit=900 " + local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} + for func in $funcs ; do + setup $dir || return 1 + $func $dir || return 1 + teardown $dir || return 1 + done +} + +function TEST_filestore_to_bluestore() { + local dir=$1 + + local flimit=$(ulimit -n) + if [ $flimit -lt 1536 ]; then + echo "Low open file limit ($flimit), test may fail. Increase to 1536 or higher and retry if that happens." + fi + + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + run_osd_filestore $dir 0 || return 1 + osd_pid=$(cat $dir/osd.0.pid) + run_osd_filestore $dir 1 || return 1 + run_osd_filestore $dir 2 || return 1 + + sleep 5 + + create_pool foo 16 + + # write some objects + timeout 20 rados bench -p foo 10 write -b 4096 --no-cleanup || return 1 + + # kill + while kill $osd_pid; do sleep 1 ; done + ceph osd down 0 + + mv $dir/0 $dir/0.old || return 1 + mkdir $dir/0 || return 1 + ofsid=$(cat $dir/0.old/fsid) + echo "osd fsid $ofsid" + O=$CEPH_ARGS + CEPH_ARGS+="--log-file $dir/cot.log --log-max-recent 0 " + ceph-objectstore-tool --type bluestore --data-path $dir/0 --fsid $ofsid \ + --op mkfs --no-mon-config || return 1 + ceph-objectstore-tool --data-path $dir/0.old --target-data-path $dir/0 \ + --op dup || return 1 + CEPH_ARGS=$O + + activate_osd $dir 0 || return 1 + + while ! ceph osd stat | grep '3 up' ; do sleep 1 ; done + ceph osd metadata 0 | grep bluestore || return 1 + + ceph osd scrub 0 + + # give it some time + sleep 15 + # and make sure mon is sync'ed + flush_pg_stats + + wait_for_clean || return 1 +} + +main osd-dup "$@" + +# Local Variables: +# compile-command: "cd ../.. ; make -j4 && test/osd/osd-dup.sh" +# End: diff --git a/qa/standalone/osd/osd-fast-mark-down.sh b/qa/standalone/osd/osd-fast-mark-down.sh new file mode 100755 index 00000000..cf5851c2 --- /dev/null +++ b/qa/standalone/osd/osd-fast-mark-down.sh @@ -0,0 +1,116 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2016 Piotr Dałek +# Copyright (C) 2014, 2015 Red Hat +# +# Author: Piotr Dałek +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Library Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library Public License for more details. +# + +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh +MAX_PROPAGATION_TIME=30 + +function run() { + local dir=$1 + shift + rm -f $dir/*.pid + export CEPH_MON="127.0.0.1:7126" # git grep '\<7126\>' : there must be only one + export CEPH_ARGS + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " + + OLD_ARGS=$CEPH_ARGS + CEPH_ARGS+="--osd-fast-fail-on-connection-refused=false " + echo "Ensuring old behavior is there..." + test_fast_kill $dir && (echo "OSDs died too early! Old behavior doesn't work." ; return 1) + + CEPH_ARGS=$OLD_ARGS"--osd-fast-fail-on-connection-refused=true " + OLD_ARGS=$CEPH_ARGS + + # force v1 addr here for simple's benefit + CEPH_ARGS+="--ms_type=simple --mon-host=v1:$CEPH_MON" + echo "Testing simple msgr..." + test_fast_kill $dir || return 1 + + CEPH_ARGS=$OLD_ARGS"--ms_type=async --mon-host=$CEPH_MON" + echo "Testing async msgr..." + test_fast_kill $dir || return 1 + + return 0 + +} + +function test_fast_kill() { + # create cluster with 3 osds + setup $dir || return 1 + run_mon $dir a --osd_pool_default_size=3 || return 1 + run_mgr $dir x || return 1 + for oi in {0..2}; do + run_osd $dir $oi || return 1 + pids[$oi]=$(cat $dir/osd.$oi.pid) + done + + create_rbd_pool || return 1 + + # make some objects so osds to ensure connectivity between osds + timeout 20 rados -p rbd bench 10 write -b 4096 --max-objects 128 --no-cleanup || return 1 + sleep 1 + + killid=0 + previd=0 + + # kill random osd and see if after max MAX_PROPAGATION_TIME, the osd count decreased. + for i in {1..2}; do + while [ $killid -eq $previd ]; do + killid=${pids[$RANDOM%${#pids[@]}]} + done + previd=$killid + + kill -9 $killid + time_left=$MAX_PROPAGATION_TIME + down_osds=0 + + while [ $time_left -gt 0 ]; do + sleep 1 + time_left=$[$time_left - 1]; + + grep -m 1 -c -F "ms_handle_refused" $dir/osd.*.log > /dev/null + if [ $? -ne 0 ]; then + continue + fi + + down_osds=$(ceph osd tree | grep -c down) + if [ $down_osds -lt $i ]; then + # osds not marked down yet, try again in a second + continue + elif [ $down_osds -gt $i ]; then + echo Too many \($down_osds\) osds died! + return 1 + else + break + fi + done + + if [ $down_osds -lt $i ]; then + echo Killed the OSD, yet it is not marked down + ceph osd tree + return 1 + fi + done + pkill -SIGTERM rados + teardown $dir || return 1 +} + +main osd-fast-mark-down "$@" + +# Local Variables: +# compile-command: "cd ../.. ; make -j4 && test/osd/osd-fast-mark-down.sh" +# End: diff --git a/qa/standalone/osd/osd-force-create-pg.sh b/qa/standalone/osd/osd-force-create-pg.sh new file mode 100755 index 00000000..f70caac1 --- /dev/null +++ b/qa/standalone/osd/osd-force-create-pg.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh + +function run() { + local dir=$1 + shift + + export CEPH_MON="127.0.0.1:7145" # git grep '\<7145\>' : there must be only one + export CEPH_ARGS + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " + CEPH_ARGS+="--mon-host=$CEPH_MON " + + local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} + for func in $funcs ; do + $func $dir || return 1 + done +} + +function TEST_reuse_id() { + local dir=$1 + + setup $dir || return 1 + run_mon $dir a --osd_pool_default_size=1 || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + run_osd $dir 1 || return 1 + run_osd $dir 2 || return 1 + + ceph osd pool create foo 50 || return 1 + wait_for_clean || return 1 + + kill_daemons $dir TERM osd.0 + kill_daemons $dir TERM osd.1 + kill_daemons $dir TERM osd.2 + ceph-objectstore-tool --data-path $dir/0 --op remove --pgid 1.0 --force + ceph-objectstore-tool --data-path $dir/1 --op remove --pgid 1.0 --force + ceph-objectstore-tool --data-path $dir/2 --op remove --pgid 1.0 --force + activate_osd $dir 0 || return 1 + activate_osd $dir 1 || return 1 + activate_osd $dir 2 || return 1 + sleep 10 + ceph pg ls | grep 1.0 | grep stale || return 1 + + ceph osd force-create-pg 1.0 --yes-i-really-mean-it || return 1 + wait_for_clean || return 1 +} + +main osd-force-create-pg "$@" + +# Local Variables: +# compile-command: "cd ../.. ; make -j4 && test/osd/osd-force-create-pg.sh" +# End: diff --git a/qa/standalone/osd/osd-markdown.sh b/qa/standalone/osd/osd-markdown.sh new file mode 100755 index 00000000..6dc1f883 --- /dev/null +++ b/qa/standalone/osd/osd-markdown.sh @@ -0,0 +1,131 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2015 Intel +# Copyright (C) 2014, 2015 Red Hat +# +# Author: Xiaoxi Chen +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Library Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library Public License for more details. +# + +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh + +function run() { + local dir=$1 + shift + + export CEPH_MON="127.0.0.1:7108" # git grep '\<7108\>' : there must be only one + export CEPH_ARGS + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " + CEPH_ARGS+="--mon-host=$CEPH_MON " + + local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} + for func in $funcs ; do + setup $dir || return 1 + $func $dir || return 1 + teardown $dir || return 1 + done +} + +function markdown_N_impl() { + markdown_times=$1 + total_time=$2 + sleeptime=$3 + for i in `seq 1 $markdown_times` + do + # check the OSD is UP + ceph osd tree + ceph osd tree | grep osd.0 |grep up || return 1 + # mark the OSD down. + # override any dup setting in the environment to ensure we do this + # exactly once (modulo messenger failures, at least; we can't *actually* + # provide exactly-once semantics for mon commands). + ( unset CEPH_CLI_TEST_DUP_COMMAND ; ceph osd down 0 ) + sleep $sleeptime + done +} + + +function TEST_markdown_exceed_maxdown_count() { + local dir=$1 + + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + run_osd $dir 1 || return 1 + run_osd $dir 2 || return 1 + + create_rbd_pool || return 1 + + # 3+1 times within 300s, osd should stay dead on the 4th time + local count=3 + local sleeptime=10 + local period=300 + ceph tell osd.0 injectargs '--osd_max_markdown_count '$count'' || return 1 + ceph tell osd.0 injectargs '--osd_max_markdown_period '$period'' || return 1 + + markdown_N_impl $(($count+1)) $period $sleeptime + # down N+1 times ,the osd.0 should die + ceph osd tree | grep down | grep osd.0 || return 1 +} + +function TEST_markdown_boot() { + local dir=$1 + + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + run_osd $dir 1 || return 1 + run_osd $dir 2 || return 1 + + create_rbd_pool || return 1 + + # 3 times within 120s, should stay up + local count=3 + local sleeptime=10 + local period=120 + ceph tell osd.0 injectargs '--osd_max_markdown_count '$count'' || return 1 + ceph tell osd.0 injectargs '--osd_max_markdown_period '$period'' || return 1 + + markdown_N_impl $count $period $sleeptime + #down N times, osd.0 should be up + sleep 15 # give osd plenty of time to notice and come back up + ceph osd tree | grep up | grep osd.0 || return 1 +} + +function TEST_markdown_boot_exceed_time() { + local dir=$1 + + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + run_osd $dir 1 || return 1 + run_osd $dir 2 || return 1 + + create_rbd_pool || return 1 + + # 3+1 times, but over 40s, > 20s, so should stay up + local count=3 + local period=20 + local sleeptime=10 + ceph tell osd.0 injectargs '--osd_max_markdown_count '$count'' || return 1 + ceph tell osd.0 injectargs '--osd_max_markdown_period '$period'' || return 1 + + markdown_N_impl $(($count+1)) $period $sleeptime + sleep 15 # give osd plenty of time to notice and come back up + ceph osd tree | grep up | grep osd.0 || return 1 +} + +main osd-markdown "$@" + +# Local Variables: +# compile-command: "cd ../.. ; make -j4 && test/osd/osd-bench.sh" +# End: diff --git a/qa/standalone/osd/osd-reactivate.sh b/qa/standalone/osd/osd-reactivate.sh new file mode 100755 index 00000000..6d643862 --- /dev/null +++ b/qa/standalone/osd/osd-reactivate.sh @@ -0,0 +1,56 @@ +#!/usr/bin/env bash +# +# Author: Vicente Cheng +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Library Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library Public License for more details. +# + +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh + +function run() { + local dir=$1 + shift + + export CEPH_MON="127.0.0.1:7122" # git grep '\<7122\>' : there must be only one + export CEPH_ARGS + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " + CEPH_ARGS+="--mon-host=$CEPH_MON " + + local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} + for func in $funcs ; do + setup $dir || return 1 + $func $dir || return 1 + teardown $dir || return 1 + done +} + +function TEST_reactivate() { + local dir=$1 + + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + + kill_daemons $dir TERM osd || return 1 + + ready_path=$dir"/0/ready" + activate_path=$dir"/0/active" + # trigger mkfs again + rm -rf $ready_path $activate_path + activate_osd $dir 0 || return 1 + +} + +main osd-reactivate "$@" + +# Local Variables: +# compile-command: "cd ../.. ; make -j4 && test/osd/osd-reactivate.sh" +# End: diff --git a/qa/standalone/osd/osd-recovery-prio.sh b/qa/standalone/osd/osd-recovery-prio.sh new file mode 100755 index 00000000..fb386e26 --- /dev/null +++ b/qa/standalone/osd/osd-recovery-prio.sh @@ -0,0 +1,515 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2019 Red Hat +# +# Author: David Zafman +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Library Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library Public License for more details. +# + +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh + +function run() { + local dir=$1 + shift + + # Fix port???? + export CEPH_MON="127.0.0.1:7114" # git grep '\<7114\>' : there must be only one + export CEPH_ARGS + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " + CEPH_ARGS+="--mon-host=$CEPH_MON --osd_max_backfills=1 --debug_reserver=20" + export objects=200 + export poolprefix=test + export FORCE_PRIO="255" # See OSD_RECOVERY_PRIORITY_FORCED + export NORMAL_PRIO="190" # See OSD_RECOVERY_PRIORITY_BASE + 10 + + local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} + for func in $funcs ; do + setup $dir || return 1 + $func $dir || return 1 + teardown $dir || return 1 + done +} + + +function TEST_recovery_priority() { + local dir=$1 + local pools=10 + local OSDS=5 + local max_tries=10 + + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + export CEPH_ARGS + + for osd in $(seq 0 $(expr $OSDS - 1)) + do + run_osd $dir $osd || return 1 + done + + for p in $(seq 1 $pools) + do + create_pool "${poolprefix}$p" 1 1 + ceph osd pool set "${poolprefix}$p" size 2 + done + sleep 5 + + wait_for_clean || return 1 + + ceph pg dump pgs + + # Find 3 pools with a pg with the same primaries but second + # replica on another osd. + local PG1 + local POOLNUM1 + local pool1 + local chk_osd1_1 + local chk_osd1_2 + + local PG2 + local POOLNUM2 + local pool2 + local chk_osd2 + + local PG3 + local POOLNUM3 + local pool3 + + for p in $(seq 1 $pools) + do + ceph pg map ${p}.0 --format=json | jq '.acting[]' > $dir/acting + local test_osd1=$(head -1 $dir/acting) + local test_osd2=$(tail -1 $dir/acting) + if [ -z "$PG1" ]; + then + PG1="${p}.0" + POOLNUM1=$p + pool1="${poolprefix}$p" + chk_osd1_1=$test_osd1 + chk_osd1_2=$test_osd2 + elif [ -z "$PG2" -a $chk_osd1_1 = $test_osd1 -a $chk_osd1_2 != $test_osd2 ]; + then + PG2="${p}.0" + POOLNUM2=$p + pool2="${poolprefix}$p" + chk_osd2=$test_osd2 + elif [ -n "$PG2" -a $chk_osd1_1 = $test_osd1 -a $chk_osd1_2 != $test_osd2 -a "$chk_osd2" != $test_osd2 ]; + then + PG3="${p}.0" + POOLNUM3=$p + pool3="${poolprefix}$p" + break + fi + done + rm -f $dir/acting + + if [ "$pool2" = "" -o "pool3" = "" ]; + then + echo "Failure to find appropirate PGs" + return 1 + fi + + for p in $(seq 1 $pools) + do + if [ $p != $POOLNUM1 -a $p != $POOLNUM2 -a $p != $POOLNUM3 ]; + then + delete_pool ${poolprefix}$p + fi + done + + ceph osd pool set $pool2 size 1 + ceph osd pool set $pool3 size 1 + wait_for_clean || return 1 + + dd if=/dev/urandom of=$dir/data bs=1M count=10 + p=1 + for pname in $pool1 $pool2 $pool3 + do + for i in $(seq 1 $objects) + do + rados -p ${pname} put obj${i}-p${p} $dir/data + done + p=$(expr $p + 1) + done + + local otherosd=$(get_not_primary $pool1 obj1-p1) + + ceph pg dump pgs + ERRORS=0 + + ceph osd set norecover + ceph osd set noout + + # Get a pg to want to recover and quickly force it + # to be preempted. + ceph osd pool set $pool3 size 2 + sleep 2 + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations || return 1 + + # 3. Item is in progress, adjust priority with no higher priority waiting + for i in $(seq 1 $max_tries) + do + if ! ceph pg force-recovery $PG3 2>&1 | grep -q "doesn't require recovery"; then + break + fi + if [ "$i" = "$max_tries" ]; then + echo "ERROR: Didn't appear to be able to force-recovery" + ERRORS=$(expr $ERRORS + 1) + fi + sleep 2 + done + flush_pg_stats || return 1 + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations || return 1 + + ceph osd out osd.$chk_osd1_2 + sleep 2 + flush_pg_stats || return 1 + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations || return 1 + ceph pg dump pgs + + ceph osd pool set $pool2 size 2 + sleep 2 + flush_pg_stats || return 1 + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations > $dir/out || return 1 + cat $dir/out + ceph pg dump pgs + + PRIO=$(cat $dir/out | jq "(.local_reservations.queues[].items[] | select(.item == \"${PG1}\")).prio") + if [ "$PRIO" != "$NORMAL_PRIO" ]; + then + echo "The normal PG ${PG1} doesn't have prio $NORMAL_PRIO queued waiting" + ERRORS=$(expr $ERRORS + 1) + fi + + # Using eval will strip double-quotes from item + eval ITEM=$(cat $dir/out | jq '.local_reservations.in_progress[0].item') + if [ "$ITEM" != ${PG3} ]; + then + echo "The first force-recovery PG $PG3 didn't become the in progress item" + ERRORS=$(expr $ERRORS + 1) + else + PRIO=$(cat $dir/out | jq '.local_reservations.in_progress[0].prio') + if [ "$PRIO" != $FORCE_PRIO ]; + then + echo "The first force-recovery PG ${PG3} doesn't have prio $FORCE_PRIO" + ERRORS=$(expr $ERRORS + 1) + fi + fi + + # 1. Item is queued, re-queue with new priority + for i in $(seq 1 $max_tries) + do + if ! ceph pg force-recovery $PG2 2>&1 | grep -q "doesn't require recovery"; then + break + fi + if [ "$i" = "$max_tries" ]; then + echo "ERROR: Didn't appear to be able to force-recovery" + ERRORS=$(expr $ERRORS + 1) + fi + sleep 2 + done + sleep 2 + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations > $dir/out || return 1 + cat $dir/out + PRIO=$(cat $dir/out | jq "(.local_reservations.queues[].items[] | select(.item == \"${PG2}\")).prio") + if [ "$PRIO" != "$FORCE_PRIO" ]; + then + echo "The second force-recovery PG ${PG2} doesn't have prio $FORCE_PRIO" + ERRORS=$(expr $ERRORS + 1) + fi + flush_pg_stats || return 1 + + # 4. Item is in progress, if higher priority items waiting prempt item + #ceph osd unset norecover + ceph pg cancel-force-recovery $PG3 || return 1 + sleep 2 + #ceph osd set norecover + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations > $dir/out || return 1 + cat $dir/out + PRIO=$(cat $dir/out | jq "(.local_reservations.queues[].items[] | select(.item == \"${PG3}\")).prio") + if [ "$PRIO" != "$NORMAL_PRIO" ]; + then + echo "After cancel-recovery PG ${PG3} doesn't have prio $NORMAL_PRIO" + ERRORS=$(expr $ERRORS + 1) + fi + + eval ITEM=$(cat $dir/out | jq '.local_reservations.in_progress[0].item') + if [ "$ITEM" != ${PG2} ]; + then + echo "The force-recovery PG $PG2 didn't become the in progress item" + ERRORS=$(expr $ERRORS + 1) + else + PRIO=$(cat $dir/out | jq '.local_reservations.in_progress[0].prio') + if [ "$PRIO" != $FORCE_PRIO ]; + then + echo "The first force-recovery PG ${PG2} doesn't have prio $FORCE_PRIO" + ERRORS=$(expr $ERRORS + 1) + fi + fi + + ceph pg cancel-force-recovery $PG2 || return 1 + sleep 5 + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations || return 1 + + # 2. Item is queued, re-queue and preempt because new priority higher than an in progress item + flush_pg_stats || return 1 + ceph pg force-recovery $PG3 || return 1 + sleep 2 + + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations > $dir/out || return 1 + cat $dir/out + PRIO=$(cat $dir/out | jq "(.local_reservations.queues[].items[] | select(.item == \"${PG2}\")).prio") + if [ "$PRIO" != "$NORMAL_PRIO" ]; + then + echo "After cancel-force-recovery PG ${PG3} doesn't have prio $NORMAL_PRIO" + ERRORS=$(expr $ERRORS + 1) + fi + + eval ITEM=$(cat $dir/out | jq '.local_reservations.in_progress[0].item') + if [ "$ITEM" != ${PG3} ]; + then + echo "The force-recovery PG $PG3 didn't get promoted to an in progress item" + ERRORS=$(expr $ERRORS + 1) + else + PRIO=$(cat $dir/out | jq '.local_reservations.in_progress[0].prio') + if [ "$PRIO" != $FORCE_PRIO ]; + then + echo "The force-recovery PG ${PG2} doesn't have prio $FORCE_PRIO" + ERRORS=$(expr $ERRORS + 1) + fi + fi + + ceph osd unset noout + ceph osd unset norecover + + wait_for_clean "CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations" || return 1 + + ceph pg dump pgs + + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_pgstate_history + + if [ $ERRORS != "0" ]; + then + echo "$ERRORS error(s) found" + else + echo TEST PASSED + fi + + delete_pool $pool1 + delete_pool $pool2 + delete_pool $pool3 + kill_daemons $dir || return 1 + return $ERRORS +} + +# +# Show that pool recovery_priority is added to recovery priority +# +# Create 2 pools with 2 OSDs with different primarys +# pool 1 with recovery_priority 1 +# pool 2 with recovery_priority 2 +# +# Start recovery by changing the pool sizes from 1 to 2 +# Use dump_recovery_reservations to verify priorities +function TEST_recovery_pool_priority() { + local dir=$1 + local pools=3 # Don't assume the first 2 pools are exact what we want + local OSDS=2 + + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + export CEPH_ARGS + + for osd in $(seq 0 $(expr $OSDS - 1)) + do + run_osd $dir $osd || return 1 + done + + for p in $(seq 1 $pools) + do + create_pool "${poolprefix}$p" 1 1 + ceph osd pool set "${poolprefix}$p" size 2 + done + sleep 5 + + wait_for_clean || return 1 + + ceph pg dump pgs + + # Find 2 pools with different primaries which + # means the replica must be on another osd. + local PG1 + local POOLNUM1 + local pool1 + local chk_osd1_1 + local chk_osd1_2 + + local PG2 + local POOLNUM2 + local pool2 + local chk_osd2_1 + local chk_osd2_2 + + for p in $(seq 1 $pools) + do + ceph pg map ${p}.0 --format=json | jq '.acting[]' > $dir/acting + local test_osd1=$(head -1 $dir/acting) + local test_osd2=$(tail -1 $dir/acting) + if [ -z "$PG1" ]; + then + PG1="${p}.0" + POOLNUM1=$p + pool1="${poolprefix}$p" + chk_osd1_1=$test_osd1 + chk_osd1_2=$test_osd2 + elif [ $chk_osd1_1 != $test_osd1 ]; + then + PG2="${p}.0" + POOLNUM2=$p + pool2="${poolprefix}$p" + chk_osd2_1=$test_osd1 + chk_osd2_2=$test_osd2 + break + fi + done + rm -f $dir/acting + + if [ "$pool2" = "" ]; + then + echo "Failure to find appropirate PGs" + return 1 + fi + + for p in $(seq 1 $pools) + do + if [ $p != $POOLNUM1 -a $p != $POOLNUM2 ]; + then + delete_pool ${poolprefix}$p + fi + done + + pool1_extra_prio=1 + pool2_extra_prio=2 + pool1_prio=$(expr $NORMAL_PRIO + $pool1_extra_prio) + pool2_prio=$(expr $NORMAL_PRIO + $pool2_extra_prio) + + ceph osd pool set $pool1 size 1 + ceph osd pool set $pool1 recovery_priority $pool1_extra_prio + ceph osd pool set $pool2 size 1 + ceph osd pool set $pool2 recovery_priority $pool2_extra_prio + wait_for_clean || return 1 + + dd if=/dev/urandom of=$dir/data bs=1M count=10 + p=1 + for pname in $pool1 $pool2 + do + for i in $(seq 1 $objects) + do + rados -p ${pname} put obj${i}-p${p} $dir/data + done + p=$(expr $p + 1) + done + + local otherosd=$(get_not_primary $pool1 obj1-p1) + + ceph pg dump pgs + ERRORS=0 + + ceph osd pool set $pool1 size 2 + ceph osd pool set $pool2 size 2 + sleep 10 + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_1}) dump_recovery_reservations > $dir/dump.${chk_osd1_1}.out + echo osd.${chk_osd1_1} + cat $dir/dump.${chk_osd1_1}.out + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${chk_osd1_2}) dump_recovery_reservations > $dir/dump.${chk_osd1_2}.out + echo osd.${chk_osd1_2} + cat $dir/dump.${chk_osd1_2}.out + + # Using eval will strip double-quotes from item + eval ITEM=$(cat $dir/dump.${chk_osd1_1}.out | jq '.local_reservations.in_progress[0].item') + if [ "$ITEM" != ${PG1} ]; + then + echo "The primary PG for $pool1 didn't become the in progress item" + ERRORS=$(expr $ERRORS + 1) + else + PRIO=$(cat $dir/dump.${chk_osd1_1}.out | jq '.local_reservations.in_progress[0].prio') + if [ "$PRIO" != $pool1_prio ]; + then + echo "The primary PG ${PG1} doesn't have prio $pool1_prio" + ERRORS=$(expr $ERRORS + 1) + fi + fi + + # Using eval will strip double-quotes from item + eval ITEM=$(cat $dir/dump.${chk_osd1_2}.out | jq '.remote_reservations.in_progress[0].item') + if [ "$ITEM" != ${PG1} ]; + then + echo "The primary PG for $pool1 didn't become the in progress item on remote" + ERRORS=$(expr $ERRORS + 1) + else + PRIO=$(cat $dir/dump.${chk_osd1_2}.out | jq '.remote_reservations.in_progress[0].prio') + if [ "$PRIO" != $pool1_prio ]; + then + echo "The primary PG ${PG1} doesn't have prio $pool1_prio on remote" + ERRORS=$(expr $ERRORS + 1) + fi + fi + + # Using eval will strip double-quotes from item + eval ITEM=$(cat $dir/dump.${chk_osd2_1}.out | jq '.local_reservations.in_progress[0].item') + if [ "$ITEM" != ${PG2} ]; + then + echo "The primary PG for $pool2 didn't become the in progress item" + ERRORS=$(expr $ERRORS + 1) + else + PRIO=$(cat $dir/dump.${chk_osd2_1}.out | jq '.local_reservations.in_progress[0].prio') + if [ "$PRIO" != $pool2_prio ]; + then + echo "The primary PG ${PG2} doesn't have prio $pool2_prio" + ERRORS=$(expr $ERRORS + 1) + fi + fi + + # Using eval will strip double-quotes from item + eval ITEM=$(cat $dir/dump.${chk_osd2_2}.out | jq '.remote_reservations.in_progress[0].item') + if [ "$ITEM" != ${PG2} ]; + then + echo "The primary PG $PG2 didn't become the in progress item on remote" + ERRORS=$(expr $ERRORS + 1) + else + PRIO=$(cat $dir/dump.${chk_osd2_2}.out | jq '.remote_reservations.in_progress[0].prio') + if [ "$PRIO" != $pool2_prio ]; + then + echo "The primary PG ${PG2} doesn't have prio $pool2_prio on remote" + ERRORS=$(expr $ERRORS + 1) + fi + fi + + wait_for_clean || return 1 + + if [ $ERRORS != "0" ]; + then + echo "$ERRORS error(s) found" + else + echo TEST PASSED + fi + + delete_pool $pool1 + delete_pool $pool2 + kill_daemons $dir || return 1 + return $ERRORS +} + +main osd-recovery-prio "$@" + +# Local Variables: +# compile-command: "make -j4 && ../qa/run-standalone.sh osd-recovery-prio.sh" +# End: diff --git a/qa/standalone/osd/osd-recovery-space.sh b/qa/standalone/osd/osd-recovery-space.sh new file mode 100755 index 00000000..82cdf82e --- /dev/null +++ b/qa/standalone/osd/osd-recovery-space.sh @@ -0,0 +1,175 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2018 Red Hat +# +# Author: David Zafman +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Library Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library Public License for more details. +# + +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh + +function run() { + local dir=$1 + shift + + export CEPH_MON="127.0.0.1:7221" # git grep '\<7221\>' : there must be only one + export CEPH_ARGS + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " + CEPH_ARGS+="--mon-host=$CEPH_MON " + CEPH_ARGS+="--osd_max_backfills=10 " + export objects=600 + export poolprefix=test + + local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} + for func in $funcs ; do + setup $dir || return 1 + $func $dir || return 1 + teardown $dir || return 1 + done +} + + +function get_num_in_state() { + local state=$1 + local expression + expression+="select(contains(\"${state}\"))" + ceph --format json pg dump pgs 2>/dev/null | \ + jq ".pg_stats | [.[] | .state | $expression] | length" +} + + +function wait_for_state() { + local state=$1 + local cur_in_state + local -a delays=($(get_timeout_delays $2 5)) + local -i loop=0 + + flush_pg_stats || return 1 + while test $(get_num_pgs) == 0 ; do + sleep 1 + done + + while true ; do + cur_in_state=$(get_num_in_state ${state}) + test $cur_in_state -gt 0 && break + if (( $loop >= ${#delays[*]} )) ; then + ceph pg dump pgs + return 1 + fi + sleep ${delays[$loop]} + loop+=1 + done + return 0 +} + + +function wait_for_recovery_toofull() { + local timeout=$1 + wait_for_state recovery_toofull $timeout +} + + +# Create 1 pools with size 1 +# set ful-ratio to 50% +# Write data 600 5K (3000K) +# Inject fake_statfs_for_testing to 3600K (83% full) +# Incresase the pool size to 2 +# The pool shouldn't have room to recovery +function TEST_recovery_test_simple() { + local dir=$1 + local pools=1 + local OSDS=2 + + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + export CEPH_ARGS + + for osd in $(seq 0 $(expr $OSDS - 1)) + do + run_osd $dir $osd || return 1 + done + + ceph osd set-nearfull-ratio .40 + ceph osd set-backfillfull-ratio .45 + ceph osd set-full-ratio .50 + + for p in $(seq 1 $pools) + do + create_pool "${poolprefix}$p" 1 1 + ceph osd pool set "${poolprefix}$p" size 1 + done + + wait_for_clean || return 1 + + dd if=/dev/urandom of=$dir/datafile bs=1024 count=5 + for o in $(seq 1 $objects) + do + rados -p "${poolprefix}$p" put obj$o $dir/datafile + done + + for o in $(seq 0 $(expr $OSDS - 1)) + do + ceph tell osd.$o injectargs '--fake_statfs_for_testing 3686400' || return 1 + done + sleep 5 + + ceph pg dump pgs + + for p in $(seq 1 $pools) + do + ceph osd pool set "${poolprefix}$p" size 2 + done + + # If this times out, we'll detected errors below + wait_for_recovery_toofull 30 + + ERRORS=0 + if [ "$(ceph pg dump pgs | grep +recovery_toofull | wc -l)" != "1" ]; + then + echo "One pool should have been in recovery_toofull" + ERRORS="$(expr $ERRORS + 1)" + fi + + ceph pg dump pgs + ceph status + ceph status --format=json-pretty > $dir/stat.json + + eval SEV=$(jq '.health.checks.PG_RECOVERY_FULL.severity' $dir/stat.json) + if [ "$SEV" != "HEALTH_ERR" ]; then + echo "PG_RECOVERY_FULL severity $SEV not HEALTH_ERR" + ERRORS="$(expr $ERRORS + 1)" + fi + eval MSG=$(jq '.health.checks.PG_RECOVERY_FULL.summary.message' $dir/stat.json) + if [ "$MSG" != "Full OSDs blocking recovery: 1 pg recovery_toofull" ]; then + echo "PG_RECOVERY_FULL message '$MSG' mismatched" + ERRORS="$(expr $ERRORS + 1)" + fi + rm -f $dir/stat.json + + if [ $ERRORS != "0" ]; + then + return 1 + fi + + for i in $(seq 1 $pools) + do + delete_pool "${poolprefix}$i" + done + kill_daemons $dir || return 1 +} + + +main osd-recovery-space "$@" + +# Local Variables: +# compile-command: "make -j4 && ../qa/run-standalone.sh osd-recovery-space.sh" +# End: diff --git a/qa/standalone/osd/osd-recovery-stats.sh b/qa/standalone/osd/osd-recovery-stats.sh new file mode 100755 index 00000000..04a28794 --- /dev/null +++ b/qa/standalone/osd/osd-recovery-stats.sh @@ -0,0 +1,512 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2017 Red Hat +# +# Author: David Zafman +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Library Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library Public License for more details. +# + +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh + +function run() { + local dir=$1 + shift + + # Fix port???? + export CEPH_MON="127.0.0.1:7115" # git grep '\<7115\>' : there must be only one + export CEPH_ARGS + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " + CEPH_ARGS+="--mon-host=$CEPH_MON " + # so we will not force auth_log_shard to be acting_primary + CEPH_ARGS+="--osd_force_auth_primary_missing_objects=1000000 " + export margin=10 + export objects=200 + export poolname=test + + local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} + for func in $funcs ; do + setup $dir || return 1 + $func $dir || return 1 + teardown $dir || return 1 + done +} + +function below_margin() { + local -i check=$1 + shift + local -i target=$1 + + return $(( $check <= $target && $check >= $target - $margin ? 0 : 1 )) +} + +function above_margin() { + local -i check=$1 + shift + local -i target=$1 + + return $(( $check >= $target && $check <= $target + $margin ? 0 : 1 )) +} + +FIND_UPACT='grep "pg[[]${PG}.*recovering.*_update_calc_stats " $log | tail -1 | sed "s/.*[)] \([[][^ p]*\).*$/\1/"' +FIND_FIRST='grep "pg[[]${PG}.*recovering.*_update_calc_stats $which " $log | grep -F " ${UPACT}${addp}" | grep -v est | head -1 | sed "s/.* \([0-9]*\)$/\1/"' +FIND_LAST='grep "pg[[]${PG}.*recovering.*_update_calc_stats $which " $log | tail -1 | sed "s/.* \([0-9]*\)$/\1/"' + +function check() { + local dir=$1 + local PG=$2 + local primary=$3 + local type=$4 + local degraded_start=$5 + local degraded_end=$6 + local misplaced_start=$7 + local misplaced_end=$8 + local primary_start=${9:-} + local primary_end=${10:-} + + local log=$dir/osd.${primary}.log + + local addp=" " + if [ "$type" = "erasure" ]; + then + addp="p" + fi + + UPACT=$(eval $FIND_UPACT) + + # Check 3rd line at start because of false recovery starts + local which="degraded" + FIRST=$(eval $FIND_FIRST) + below_margin $FIRST $degraded_start || return 1 + LAST=$(eval $FIND_LAST) + above_margin $LAST $degraded_end || return 1 + + # Check 3rd line at start because of false recovery starts + which="misplaced" + FIRST=$(eval $FIND_FIRST) + below_margin $FIRST $misplaced_start || return 1 + LAST=$(eval $FIND_LAST) + above_margin $LAST $misplaced_end || return 1 + + # This is the value of set into MISSING_ON_PRIMARY + if [ -n "$primary_start" ]; + then + which="shard $primary" + FIRST=$(eval $FIND_FIRST) + below_margin $FIRST $primary_start || return 1 + LAST=$(eval $FIND_LAST) + above_margin $LAST $primary_end || return 1 + fi +} + +# [1,0,?] -> [1,2,4] +# degraded 500 -> 0 +# active+recovering+degraded + +# PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP +# 1.0 500 0 500 0 0 0 500 500 active+recovering+degraded 2017-11-17 19:27:36.493828 28'500 32:603 [1,2,4] 1 [1,2,4] 1 0'0 2017-11-17 19:27:05.915467 0'0 2017-11-17 19:27:05.915467 +function do_recovery_out1() { + local dir=$1 + shift + local type=$1 + + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + run_osd $dir 1 || return 1 + run_osd $dir 2 || return 1 + run_osd $dir 3 || return 1 + run_osd $dir 4 || return 1 + run_osd $dir 5 || return 1 + + if [ $type = "erasure" ]; + then + ceph osd erasure-code-profile set myprofile plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd + create_pool $poolname 1 1 $type myprofile + else + create_pool $poolname 1 1 $type + fi + + wait_for_clean || return 1 + + for i in $(seq 1 $objects) + do + rados -p $poolname put obj$i /dev/null + done + + local primary=$(get_primary $poolname obj1) + local PG=$(get_pg $poolname obj1) + # Only 2 OSDs so only 1 not primary + local otherosd=$(get_not_primary $poolname obj1) + + ceph osd set norecover + kill $(cat $dir/osd.${otherosd}.pid) + ceph osd down osd.${otherosd} + ceph osd out osd.${otherosd} + ceph osd unset norecover + ceph tell osd.$(get_primary $poolname obj1) debug kick_recovery_wq 0 + sleep 2 + + wait_for_clean || return 1 + + check $dir $PG $primary $type $objects 0 0 0 || return 1 + + delete_pool $poolname + kill_daemons $dir || return 1 +} + +function TEST_recovery_replicated_out1() { + local dir=$1 + + do_recovery_out1 $dir replicated || return 1 +} + +function TEST_recovery_erasure_out1() { + local dir=$1 + + do_recovery_out1 $dir erasure || return 1 +} + +# [0, 1] -> [2,3,4,5] +# degraded 1000 -> 0 +# misplaced 1000 -> 0 +# missing on primary 500 -> 0 + +# PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP +# 1.0 500 500 1000 1000 0 0 500 500 active+recovering+degraded 2017-10-27 09:38:37.453438 22'500 25:394 [2,4,3,5] 2 [2,4,3,5] 2 0'0 2017-10-27 09:37:58.046748 0'0 2017-10-27 09:37:58.046748 +function TEST_recovery_sizeup() { + local dir=$1 + + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + run_osd $dir 1 || return 1 + run_osd $dir 2 || return 1 + run_osd $dir 3 || return 1 + run_osd $dir 4 || return 1 + run_osd $dir 5 || return 1 + + create_pool $poolname 1 1 + ceph osd pool set $poolname size 2 + + wait_for_clean || return 1 + + for i in $(seq 1 $objects) + do + rados -p $poolname put obj$i /dev/null + done + + local primary=$(get_primary $poolname obj1) + local PG=$(get_pg $poolname obj1) + # Only 2 OSDs so only 1 not primary + local otherosd=$(get_not_primary $poolname obj1) + + ceph osd set norecover + ceph osd out osd.$primary osd.$otherosd + ceph osd pool set test size 4 + ceph osd unset norecover + # Get new primary + primary=$(get_primary $poolname obj1) + + ceph tell osd.${primary} debug kick_recovery_wq 0 + sleep 2 + + wait_for_clean || return 1 + + local degraded=$(expr $objects \* 2) + local misplaced=$(expr $objects \* 2) + local log=$dir/osd.${primary}.log + check $dir $PG $primary replicated $degraded 0 $misplaced 0 $objects 0 || return 1 + + delete_pool $poolname + kill_daemons $dir || return 1 +} + +# [0, 1, 2, 4] -> [3, 5] +# misplaced 1000 -> 0 +# missing on primary 500 -> 0 +# active+recovering+degraded + +# PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP +# 1.0 500 500 0 1000 0 0 500 500 active+recovering+degraded 2017-10-27 09:34:50.012261 22'500 27:118 [3,5] 3 [3,5] 3 0'0 2017-10-27 09:34:08.617248 0'0 2017-10-27 09:34:08.617248 +function TEST_recovery_sizedown() { + local dir=$1 + + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + run_osd $dir 1 || return 1 + run_osd $dir 2 || return 1 + run_osd $dir 3 || return 1 + run_osd $dir 4 || return 1 + run_osd $dir 5 || return 1 + + create_pool $poolname 1 1 + ceph osd pool set $poolname size 4 + + wait_for_clean || return 1 + + for i in $(seq 1 $objects) + do + rados -p $poolname put obj$i /dev/null + done + + local primary=$(get_primary $poolname obj1) + local PG=$(get_pg $poolname obj1) + # Only 2 OSDs so only 1 not primary + local allosds=$(get_osds $poolname obj1) + + ceph osd set norecover + for osd in $allosds + do + ceph osd out osd.$osd + done + + ceph osd pool set test size 2 + ceph osd unset norecover + ceph tell osd.$(get_primary $poolname obj1) debug kick_recovery_wq 0 + sleep 2 + + wait_for_clean || return 1 + + # Get new primary + primary=$(get_primary $poolname obj1) + + local misplaced=$(expr $objects \* 2) + local log=$dir/osd.${primary}.log + check $dir $PG $primary replicated 0 0 $misplaced 0 || return 1 + + UPACT=$(grep "pg[[]${PG}.*recovering.*_update_calc_stats " $log | tail -1 | sed "s/.*[)] \([[][^ p]*\).*$/\1/") + + # This is the value of set into MISSING_ON_PRIMARY + FIRST=$(grep "pg[[]${PG}.*recovering.*_update_calc_stats shard $primary " $log | grep -F " $UPACT " | head -1 | sed "s/.* \([0-9]*\)$/\1/") + below_margin $FIRST $objects || return 1 + LAST=$(grep "pg[[]${PG}.*recovering.*_update_calc_stats shard $primary " $log | tail -1 | sed "s/.* \([0-9]*\)$/\1/") + above_margin $LAST 0 || return 1 + + delete_pool $poolname + kill_daemons $dir || return 1 +} + +# [1] -> [1,2] +# degraded 300 -> 200 +# active+recovering+undersized+degraded + +# PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP +# 1.0 100 0 300 0 0 0 100 100 active+recovering+undersized+degraded 2017-11-17 17:16:15.302943 13'500 16:643 [1,2] 1 [1,2] 1 0'0 2017-11-17 17:15:34.985563 0'0 2017-11-17 17:15:34.985563 +function TEST_recovery_undersized() { + local dir=$1 + + local osds=3 + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + for i in $(seq 0 $(expr $osds - 1)) + do + run_osd $dir $i || return 1 + done + + create_pool $poolname 1 1 + ceph osd pool set $poolname size 1 + + wait_for_clean || return 1 + + for i in $(seq 1 $objects) + do + rados -p $poolname put obj$i /dev/null + done + + local primary=$(get_primary $poolname obj1) + local PG=$(get_pg $poolname obj1) + + ceph osd set norecover + # Mark any osd not the primary (only 1 replica so also has no replica) + for i in $(seq 0 $(expr $osds - 1)) + do + if [ $i = $primary ]; + then + continue + fi + ceph osd out osd.$i + break + done + ceph osd pool set test size 4 + ceph osd unset norecover + ceph tell osd.$(get_primary $poolname obj1) debug kick_recovery_wq 0 + # Give extra sleep time because code below doesn't have the sophistication of wait_for_clean() + sleep 10 + flush_pg_stats || return 1 + + # Wait for recovery to finish + # Can't use wait_for_clean() because state goes from active+recovering+undersized+degraded + # to active+undersized+degraded + for i in $(seq 1 60) + do + if ceph pg dump pgs | grep ^$PG | grep -qv recovering + then + break + fi + if [ $i = "60" ]; + then + echo "Timeout waiting for recovery to finish" + return 1 + fi + sleep 1 + done + + # Get new primary + primary=$(get_primary $poolname obj1) + local log=$dir/osd.${primary}.log + + local first_degraded=$(expr $objects \* 3) + local last_degraded=$(expr $objects \* 2) + check $dir $PG $primary replicated $first_degraded $last_degraded 0 0 || return 1 + + delete_pool $poolname + kill_daemons $dir || return 1 +} + +# [1,0,2] -> [1,3,NONE]/[1,3,2] +# degraded 100 -> 0 +# misplaced 100 -> 100 +# active+recovering+degraded+remapped + +# PG_STAT OBJECTS MISSING_ON_PRIMARY DEGRADED MISPLACED UNFOUND BYTES LOG DISK_LOG STATE STATE_STAMP VERSION REPORTED UP UP_PRIMARY ACTING ACTING_PRIMARY LAST_SCRUB SCRUB_STAMP LAST_DEEP_SCRUB DEEP_SCRUB_STAMP +# 1.0 100 0 100 100 0 0 100 100 active+recovering+degraded+remapped 2017-11-27 21:24:20.851243 18'500 23:618 [1,3,NONE] 1 [1,3,2] 1 0'0 2017-11-27 21:23:39.395242 0'0 2017-11-27 21:23:39.395242 +function TEST_recovery_erasure_remapped() { + local dir=$1 + + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + run_osd $dir 1 || return 1 + run_osd $dir 2 || return 1 + run_osd $dir 3 || return 1 + + ceph osd erasure-code-profile set myprofile plugin=jerasure technique=reed_sol_van k=2 m=1 crush-failure-domain=osd + create_pool $poolname 1 1 erasure myprofile + ceph osd pool set $poolname min_size 2 + + wait_for_clean || return 1 + + for i in $(seq 1 $objects) + do + rados -p $poolname put obj$i /dev/null + done + + local primary=$(get_primary $poolname obj1) + local PG=$(get_pg $poolname obj1) + local otherosd=$(get_not_primary $poolname obj1) + + ceph osd set norecover + kill $(cat $dir/osd.${otherosd}.pid) + ceph osd down osd.${otherosd} + ceph osd out osd.${otherosd} + + # Mark osd not the primary and not down/out osd as just out + for i in 0 1 2 3 + do + if [ $i = $primary ]; + then + continue + fi + if [ $i = $otherosd ]; + then + continue + fi + ceph osd out osd.$i + break + done + ceph osd unset norecover + ceph tell osd.$(get_primary $poolname obj1) debug kick_recovery_wq 0 + sleep 2 + + wait_for_clean || return 1 + + local log=$dir/osd.${primary}.log + check $dir $PG $primary erasure $objects 0 $objects $objects || return 1 + + delete_pool $poolname + kill_daemons $dir || return 1 +} + +function TEST_recovery_multi() { + local dir=$1 + + local osds=6 + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + for i in $(seq 0 $(expr $osds - 1)) + do + run_osd $dir $i || return 1 + done + + create_pool $poolname 1 1 + ceph osd pool set $poolname size 3 + ceph osd pool set $poolname min_size 1 + + wait_for_clean || return 1 + + rados -p $poolname put obj1 /dev/null + + local primary=$(get_primary $poolname obj1) + local otherosd=$(get_not_primary $poolname obj1) + + ceph osd set noout + ceph osd set norecover + kill $(cat $dir/osd.${otherosd}.pid) + ceph osd down osd.${otherosd} + + local half=$(expr $objects / 2) + for i in $(seq 2 $half) + do + rados -p $poolname put obj$i /dev/null + done + + kill $(cat $dir/osd.${primary}.pid) + ceph osd down osd.${primary} + activate_osd $dir ${otherosd} + sleep 3 + + for i in $(seq $(expr $half + 1) $objects) + do + rados -p $poolname put obj$i /dev/null + done + + local PG=$(get_pg $poolname obj1) + local otherosd=$(get_not_primary $poolname obj$objects) + + ceph osd unset noout + ceph osd out osd.$primary osd.$otherosd + activate_osd $dir ${primary} + sleep 3 + + ceph osd pool set test size 4 + ceph osd unset norecover + ceph tell osd.$(get_primary $poolname obj1) debug kick_recovery_wq 0 + sleep 2 + + wait_for_clean || return 1 + + # Get new primary + primary=$(get_primary $poolname obj1) + + local log=$dir/osd.${primary}.log + check $dir $PG $primary replicated 399 0 300 0 99 0 || return 1 + + delete_pool $poolname + kill_daemons $dir || return 1 +} + +main osd-recovery-stats "$@" + +# Local Variables: +# compile-command: "make -j4 && ../qa/run-standalone.sh osd-recovery-stats.sh" +# End: diff --git a/qa/standalone/osd/osd-rep-recov-eio.sh b/qa/standalone/osd/osd-rep-recov-eio.sh new file mode 100755 index 00000000..332a61ac --- /dev/null +++ b/qa/standalone/osd/osd-rep-recov-eio.sh @@ -0,0 +1,476 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2017 Red Hat +# +# +# Author: Kefu Chai +# Author: David Zafman +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Library Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library Public License for more details. +# + +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh + +warnings=10 + +function run() { + local dir=$1 + shift + + export CEPH_MON="127.0.0.1:7140" # git grep '\<7140\>' : there must be only one + export CEPH_ARGS + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " + CEPH_ARGS+="--mon-host=$CEPH_MON " + + + local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} + for func in $funcs ; do + setup $dir || return 1 + # set warning amount in case default changes + run_mon $dir a --mon_osd_warn_num_repaired=$warnings || return 1 + run_mgr $dir x || return 1 + ceph osd pool create foo 8 || return 1 + + $func $dir || return 1 + teardown $dir || return 1 + done +} + +function setup_osds() { + local count=$1 + shift + local type=$1 + + for id in $(seq 0 $(expr $count - 1)) ; do + run_osd${type} $dir $id || return 1 + done + wait_for_clean || return 1 +} + +function get_state() { + local pgid=$1 + local sname=state + ceph --format json pg dump pgs 2>/dev/null | \ + jq -r ".pg_stats | .[] | select(.pgid==\"$pgid\") | .$sname" +} + +function rados_put() { + local dir=$1 + local poolname=$2 + local objname=${3:-SOMETHING} + + for marker in AAA BBB CCCC DDDD ; do + printf "%*s" 1024 $marker + done > $dir/ORIGINAL + # + # get and put an object, compare they are equal + # + rados --pool $poolname put $objname $dir/ORIGINAL || return 1 +} + +function rados_get() { + local dir=$1 + local poolname=$2 + local objname=${3:-SOMETHING} + local expect=${4:-ok} + + # + # Expect a failure to get object + # + if [ $expect = "fail" ]; + then + ! rados --pool $poolname get $objname $dir/COPY + return + fi + # + # Expect hang trying to get object + # + if [ $expect = "hang" ]; + then + timeout 5 rados --pool $poolname get $objname $dir/COPY + test "$?" = "124" + return + fi + # + # get an object, compare with $dir/ORIGINAL + # + rados --pool $poolname get $objname $dir/COPY || return 1 + diff $dir/ORIGINAL $dir/COPY || return 1 + rm $dir/COPY +} + +function rados_get_data() { + local inject=$1 + shift + local dir=$1 + + local poolname=pool-rep + local objname=obj-$inject-$$ + local pgid=$(get_pg $poolname $objname) + + rados_put $dir $poolname $objname || return 1 + inject_$inject rep data $poolname $objname $dir 0 || return 1 + rados_get $dir $poolname $objname || return 1 + + wait_for_clean + COUNT=$(ceph pg $pgid query | jq '.info.stats.stat_sum.num_objects_repaired') + test "$COUNT" = "1" || return 1 + flush_pg_stats + COUNT=$(ceph pg dump --format=json-pretty | jq ".pg_map.osd_stats_sum.num_shards_repaired") + test "$COUNT" = "1" || return 1 + + local object_osds=($(get_osds $poolname $objname)) + local primary=${object_osds[0]} + local bad_peer=${object_osds[1]} + inject_$inject rep data $poolname $objname $dir 0 || return 1 + inject_$inject rep data $poolname $objname $dir 1 || return 1 + # Force primary to pull from the bad peer, so we can repair it too! + set_config osd $primary osd_debug_feed_pullee $bad_peer || return 1 + rados_get $dir $poolname $objname || return 1 + + # Wait until automatic repair of bad peer is done + wait_for_clean || return 1 + + inject_$inject rep data $poolname $objname $dir 0 || return 1 + inject_$inject rep data $poolname $objname $dir 2 || return 1 + rados_get $dir $poolname $objname || return 1 + + wait_for_clean + COUNT=$(ceph pg $pgid query | jq '.info.stats.stat_sum.num_objects_repaired') + test "$COUNT" = "3" || return 1 + flush_pg_stats + COUNT=$(ceph pg dump --format=json-pretty | jq ".pg_map.osd_stats_sum.num_shards_repaired") + test "$COUNT" = "4" || return 1 + + inject_$inject rep data $poolname $objname $dir 0 || return 1 + inject_$inject rep data $poolname $objname $dir 1 || return 1 + inject_$inject rep data $poolname $objname $dir 2 || return 1 + rados_get $dir $poolname $objname hang || return 1 + + wait_for_clean + # After hang another repair couldn't happen, so count stays the same + COUNT=$(ceph pg $pgid query | jq '.info.stats.stat_sum.num_objects_repaired') + test "$COUNT" = "3" || return 1 + flush_pg_stats + COUNT=$(ceph pg dump --format=json-pretty | jq ".pg_map.osd_stats_sum.num_shards_repaired") + test "$COUNT" = "4" || return 1 +} + +function TEST_rados_get_with_eio() { + local dir=$1 + + setup_osds 4 || return 1 + + local poolname=pool-rep + create_pool $poolname 1 1 || return 1 + wait_for_clean || return 1 + rados_get_data eio $dir || return 1 + + delete_pool $poolname +} + +function TEST_rados_repair_warning() { + local dir=$1 + local OBJS=$(expr $warnings + 1) + + setup_osds 4 || return 1 + + local poolname=pool-rep + create_pool $poolname 1 1 || return 1 + wait_for_clean || return 1 + + local poolname=pool-rep + local objbase=obj-warn + local inject=eio + + for i in $(seq 1 $OBJS) + do + rados_put $dir $poolname ${objbase}-$i || return 1 + inject_$inject rep data $poolname ${objbase}-$i $dir 0 || return 1 + rados_get $dir $poolname ${objbase}-$i || return 1 + done + local pgid=$(get_pg $poolname ${objbase}-1) + + local object_osds=($(get_osds $poolname ${objbase}-1)) + local primary=${object_osds[0]} + local bad_peer=${object_osds[1]} + + wait_for_clean + COUNT=$(ceph pg $pgid query | jq '.info.stats.stat_sum.num_objects_repaired') + test "$COUNT" = "$OBJS" || return 1 + flush_pg_stats + COUNT=$(ceph pg dump --format=json-pretty | jq ".pg_map.osd_stats_sum.num_shards_repaired") + test "$COUNT" = "$OBJS" || return 1 + + ceph health | grep -q "Too many repaired reads on 1 OSDs" || return 1 + ceph health detail | grep -q "osd.$primary had $OBJS reads repaired" || return 1 + + ceph tell osd.$primary clear_shards_repaired + sleep 10 + + set -o pipefail + # Should mute this + ceph health | $(! grep -q "Too many repaired reads on 1 OSDs") || return 1 + set +o pipefail + + ceph tell osd.$primary clear_shards_repaired $OBJS + sleep 10 + + for i in $(seq 1 $OBJS) + do + inject_$inject rep data $poolname ${objbase}-$i $dir 0 || return 1 + inject_$inject rep data $poolname ${objbase}-$i $dir 1 || return 1 + # Force primary to pull from the bad peer, so we can repair it too! + set_config osd $primary osd_debug_feed_pullee $bad_peer || return 1 + rados_get $dir $poolname ${objbase}-$i || return 1 + done + + wait_for_clean + COUNT=$(ceph pg $pgid query | jq '.info.stats.stat_sum.num_objects_repaired') + test "$COUNT" = "$(expr $OBJS \* 2)" || return 1 + flush_pg_stats + COUNT=$(ceph pg dump --format=json-pretty | jq ".pg_map.osd_stats_sum.num_shards_repaired") + test "$COUNT" = "$(expr $OBJS \* 3)" || return 1 + + # Give mon a chance to notice additional OSD and reset num_shards_repaired + # The default tick time is 5 seconds + CHECKTIME=10 + LOOPS=0 + while(true) + do + sleep 1 + if ceph health | grep -q "Too many repaired reads on 2 OSDs" + then + break + fi + LOOPS=$(expr $LOOPS + 1) + if test "$LOOPS" = "$CHECKTIME" + then + echo "Too many repaired reads not seen after $CHECKTIME seconds" + return 1 + fi + done + ceph health detail | grep -q "osd.$primary had $(expr $OBJS \* 2) reads repaired" || return 1 + ceph health detail | grep -q "osd.$bad_peer had $OBJS reads repaired" || return 1 + + delete_pool $poolname +} + +# Test backfill with unfound object +function TEST_rep_backfill_unfound() { + local dir=$1 + local objname=myobject + local lastobj=300 + # Must be between 1 and $lastobj + local testobj=obj250 + + export CEPH_ARGS + CEPH_ARGS+=' --osd_min_pg_log_entries=5 --osd_max_pg_log_entries=10' + setup_osds 3 || return 1 + + local poolname=test-pool + create_pool $poolname 1 1 || return 1 + wait_for_clean || return 1 + + ceph pg dump pgs + + rados_put $dir $poolname $objname || return 1 + + local -a initial_osds=($(get_osds $poolname $objname)) + local last_osd=${initial_osds[-1]} + kill_daemons $dir TERM osd.${last_osd} 2>&2 < /dev/null || return 1 + ceph osd down ${last_osd} || return 1 + ceph osd out ${last_osd} || return 1 + + ceph pg dump pgs + + dd if=/dev/urandom of=${dir}/ORIGINAL bs=1024 count=4 + for i in $(seq 1 $lastobj) + do + rados --pool $poolname put obj${i} $dir/ORIGINAL || return 1 + done + + inject_eio rep data $poolname $testobj $dir 0 || return 1 + inject_eio rep data $poolname $testobj $dir 1 || return 1 + + activate_osd $dir ${last_osd} || return 1 + ceph osd in ${last_osd} || return 1 + + sleep 15 + + for tmp in $(seq 1 100); do + state=$(get_state 2.0) + echo $state | grep backfill_unfound + if [ "$?" = "0" ]; then + break + fi + echo "$state " + sleep 1 + done + + ceph pg dump pgs + ceph pg 2.0 list_unfound | grep -q $testobj || return 1 + + # Command should hang because object is unfound + timeout 5 rados -p $poolname get $testobj $dir/CHECK + test $? = "124" || return 1 + + ceph pg 2.0 mark_unfound_lost delete + + wait_for_clean || return 1 + + for i in $(seq 1 $lastobj) + do + if [ obj${i} = "$testobj" ]; then + # Doesn't exist anymore + ! rados -p $poolname get $testobj $dir/CHECK || return 1 + else + rados --pool $poolname get obj${i} $dir/CHECK || return 1 + diff -q $dir/ORIGINAL $dir/CHECK || return 1 + fi + done + + rm -f ${dir}/ORIGINAL ${dir}/CHECK + + delete_pool $poolname +} + +# Test recovery with unfound object +function TEST_rep_recovery_unfound() { + local dir=$1 + local objname=myobject + local lastobj=100 + # Must be between 1 and $lastobj + local testobj=obj75 + + setup_osds 3 || return 1 + + local poolname=test-pool + create_pool $poolname 1 1 || return 1 + wait_for_clean || return 1 + + ceph pg dump pgs + + rados_put $dir $poolname $objname || return 1 + + local -a initial_osds=($(get_osds $poolname $objname)) + local last_osd=${initial_osds[-1]} + kill_daemons $dir TERM osd.${last_osd} 2>&2 < /dev/null || return 1 + ceph osd down ${last_osd} || return 1 + ceph osd out ${last_osd} || return 1 + + ceph pg dump pgs + + dd if=/dev/urandom of=${dir}/ORIGINAL bs=1024 count=4 + for i in $(seq 1 $lastobj) + do + rados --pool $poolname put obj${i} $dir/ORIGINAL || return 1 + done + + inject_eio rep data $poolname $testobj $dir 0 || return 1 + inject_eio rep data $poolname $testobj $dir 1 || return 1 + + activate_osd $dir ${last_osd} || return 1 + ceph osd in ${last_osd} || return 1 + + sleep 15 + + for tmp in $(seq 1 100); do + state=$(get_state 2.0) + echo $state | grep -v recovering + if [ "$?" = "0" ]; then + break + fi + echo "$state " + sleep 1 + done + + ceph pg dump pgs + ceph pg 2.0 list_unfound | grep -q $testobj || return 1 + + # Command should hang because object is unfound + timeout 5 rados -p $poolname get $testobj $dir/CHECK + test $? = "124" || return 1 + + ceph pg 2.0 mark_unfound_lost delete + + wait_for_clean || return 1 + + for i in $(seq 1 $lastobj) + do + if [ obj${i} = "$testobj" ]; then + # Doesn't exist anymore + ! rados -p $poolname get $testobj $dir/CHECK || return 1 + else + rados --pool $poolname get obj${i} $dir/CHECK || return 1 + diff -q $dir/ORIGINAL $dir/CHECK || return 1 + fi + done + + rm -f ${dir}/ORIGINAL ${dir}/CHECK + + delete_pool $poolname +} + +# This is a filestore only test because it requires data digest in object info +function TEST_rep_read_unfound() { + local dir=$1 + local objname=myobject + + setup_osds 3 _filestore || return 1 + + ceph osd pool delete foo foo --yes-i-really-really-mean-it || return 1 + local poolname=test-pool + create_pool $poolname 1 1 || return 1 + ceph osd pool set $poolname size 2 + wait_for_clean || return 1 + + ceph pg dump pgs + + dd if=/dev/urandom bs=8k count=1 of=$dir/ORIGINAL + rados -p $poolname put $objname $dir/ORIGINAL + + local primary=$(get_primary $poolname $objname) + local other=$(get_not_primary $poolname $objname) + + dd if=/dev/urandom bs=8k count=1 of=$dir/CORRUPT + objectstore_tool $dir $primary $objname set-bytes $dir/CORRUPT || return 1 + objectstore_tool $dir $other $objname set-bytes $dir/CORRUPT || return 1 + + timeout 30 rados -p $poolname get $objname $dir/tmp & + + sleep 5 + + flush_pg_stats + ceph --format=json pg dump pgs | jq '.' + + if ! ceph --format=json pg dump pgs | jq '.pg_stats | .[0].state' | grep -q recovery_unfound + then + echo "Failure to get to recovery_unfound state" + return 1 + fi + + objectstore_tool $dir $other $objname set-bytes $dir/ORIGINAL || return 1 + + wait + + if ! cmp $dir/ORIGINAL $dir/tmp + then + echo "Bad data after primary repair" + return 1 + fi +} + +main osd-rep-recov-eio.sh "$@" + +# Local Variables: +# compile-command: "cd ../../../build ; make -j4 && ../qa/run-standalone.sh osd-rep-recov-eio.sh" +# End: diff --git a/qa/standalone/osd/osd-reuse-id.sh b/qa/standalone/osd/osd-reuse-id.sh new file mode 100755 index 00000000..9fd875d1 --- /dev/null +++ b/qa/standalone/osd/osd-reuse-id.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2015 Red Hat +# +# Author: Loic Dachary +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Library Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library Public License for more details. +# +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh + +function run() { + local dir=$1 + shift + + export CEPH_MON="127.0.0.1:7123" # git grep '\<7123\>' : there must be only one + export CEPH_ARGS + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " + CEPH_ARGS+="--mon-host=$CEPH_MON " + + local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} + for func in $funcs ; do + $func $dir || return 1 + done +} + +function TEST_reuse_id() { + local dir=$1 + + setup $dir || return 1 + run_mon $dir a --osd_pool_default_size=1 || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + run_osd $dir 1 || return 1 + create_rbd_pool || return 1 + wait_for_clean || return 1 + destroy_osd $dir 1 || return 1 + run_osd $dir 1 || return 1 +} + +main osd-reuse-id "$@" + +# Local Variables: +# compile-command: "cd ../.. ; make -j4 && test/osd/osd-reuse-id.sh" +# End: diff --git a/qa/standalone/osd/pg-split-merge.sh b/qa/standalone/osd/pg-split-merge.sh new file mode 100755 index 00000000..ad697a9f --- /dev/null +++ b/qa/standalone/osd/pg-split-merge.sh @@ -0,0 +1,204 @@ +#!/usr/bin/env bash +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh + +function run() { + local dir=$1 + shift + + export CEPH_MON="127.0.0.1:7147" # git grep '\<7147\>' : there must be only one + export CEPH_ARGS + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " + CEPH_ARGS+="--mon-host=$CEPH_MON --mon_min_osdmap_epochs=50 --paxos_service_trim_min=10" + + local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} + for func in $funcs ; do + $func $dir || return 1 + done +} + +function TEST_a_merge_empty() { + local dir=$1 + + setup $dir || return 1 + run_mon $dir a --osd_pool_default_size=3 || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + run_osd $dir 1 || return 1 + run_osd $dir 2 || return 1 + + ceph osd pool create foo 2 || return 1 + ceph osd pool set foo pgp_num 1 || return 1 + + wait_for_clean || return 1 + + # note: we need 1.0 to have the same or more objects than 1.1 + # 1.1 + rados -p foo put foo1 /etc/passwd + rados -p foo put foo2 /etc/passwd + rados -p foo put foo3 /etc/passwd + rados -p foo put foo4 /etc/passwd + # 1.0 + rados -p foo put foo5 /etc/passwd + rados -p foo put foo6 /etc/passwd + rados -p foo put foo8 /etc/passwd + rados -p foo put foo10 /etc/passwd + rados -p foo put foo11 /etc/passwd + rados -p foo put foo12 /etc/passwd + rados -p foo put foo16 /etc/passwd + + wait_for_clean || return 1 + + ceph tell osd.1 config set osd_debug_no_purge_strays true + ceph osd pool set foo size 2 || return 1 + wait_for_clean || return 1 + + kill_daemons $dir TERM osd.2 || return 1 + ceph-objectstore-tool --data-path $dir/2 --op remove --pgid 1.1 --force || return 1 + activate_osd $dir 2 || return 1 + + wait_for_clean || return 1 + + # osd.2: now 1.0 is there but 1.1 is not + + # instantiate 1.1 on osd.2 with last_update=0'0 ('empty'), which is + # the problematic state... then let it merge with 1.0 + ceph tell osd.2 config set osd_debug_no_acting_change true + ceph osd out 0 1 + ceph osd pool set foo pg_num 1 + sleep 5 + ceph tell osd.2 config set osd_debug_no_acting_change false + + # go back to osd.1 being primary, and 3x so the osd.2 copy doesn't get + # removed + ceph osd in 0 1 + ceph osd pool set foo size 3 + + wait_for_clean || return 1 + + # scrub to ensure the osd.3 copy of 1.0 was incomplete (vs missing + # half of its objects). + ceph pg scrub 1.0 + sleep 10 + ceph log last debug + ceph pg ls + ceph pg ls | grep ' active.clean ' || return 1 +} + +function TEST_import_after_merge_and_gap() { + local dir=$1 + + setup $dir || return 1 + run_mon $dir a --osd_pool_default_size=1 || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + + ceph osd pool create foo 2 || return 1 + wait_for_clean || return 1 + rados -p foo bench 3 write -b 1024 --no-cleanup || return 1 + + kill_daemons $dir TERM osd.0 || return 1 + ceph-objectstore-tool --data-path $dir/0 --op export --pgid 1.1 --file $dir/1.1 --force || return 1 + ceph-objectstore-tool --data-path $dir/0 --op export --pgid 1.0 --file $dir/1.0 --force || return 1 + activate_osd $dir 0 || return 1 + + ceph osd pool set foo pg_num 1 + sleep 5 + while ceph daemon osd.0 perf dump | jq '.osd.numpg' | grep 2 ; do sleep 1 ; done + wait_for_clean || return 1 + + # + kill_daemons $dir TERM osd.0 || return 1 + ceph-objectstore-tool --data-path $dir/0 --op remove --pgid 1.0 --force || return 1 + # this will import both halves the original pg + ceph-objectstore-tool --data-path $dir/0 --op import --pgid 1.1 --file $dir/1.1 || return 1 + ceph-objectstore-tool --data-path $dir/0 --op import --pgid 1.0 --file $dir/1.0 || return 1 + activate_osd $dir 0 || return 1 + + wait_for_clean || return 1 + + # make a map gap + for f in `seq 1 50` ; do + ceph osd set nodown + ceph osd unset nodown + done + + # poke and prod to ensure last_epech_clean is big, reported to mon, and + # the osd is able to trim old maps + rados -p foo bench 1 write -b 1024 --no-cleanup || return 1 + wait_for_clean || return 1 + ceph tell osd.0 send_beacon + sleep 5 + ceph osd set nodown + ceph osd unset nodown + sleep 5 + + kill_daemons $dir TERM osd.0 || return 1 + + # this should fail.. 1.1 still doesn't exist + ! ceph-objectstore-tool --data-path $dir/0 --op import --pgid 1.1 --file $dir/1.1 || return 1 + + ceph-objectstore-tool --data-path $dir/0 --op export-remove --pgid 1.0 --force --file $dir/1.0.later || return 1 + + # this should fail too because of the gap + ! ceph-objectstore-tool --data-path $dir/0 --op import --pgid 1.1 --file $dir/1.1 || return 1 + ! ceph-objectstore-tool --data-path $dir/0 --op import --pgid 1.0 --file $dir/1.0 || return 1 + + # we can force it... + ceph-objectstore-tool --data-path $dir/0 --op import --pgid 1.1 --file $dir/1.1 --force || return 1 + ceph-objectstore-tool --data-path $dir/0 --op import --pgid 1.0 --file $dir/1.0 --force || return 1 + + # ...but the osd won't start, so remove it again. + ceph-objectstore-tool --data-path $dir/0 --op remove --pgid 1.0 --force || return 1 + ceph-objectstore-tool --data-path $dir/0 --op remove --pgid 1.1 --force || return 1 + + ceph-objectstore-tool --data-path $dir/0 --op import --pgid 1.0 --file $dir/1.0.later --force || return 1 + + + activate_osd $dir 0 || return 1 + + wait_for_clean || return 1 +} + +function TEST_import_after_split() { + local dir=$1 + + setup $dir || return 1 + run_mon $dir a --osd_pool_default_size=1 || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + + ceph osd pool create foo 1 || return 1 + wait_for_clean || return 1 + rados -p foo bench 3 write -b 1024 --no-cleanup || return 1 + + kill_daemons $dir TERM osd.0 || return 1 + ceph-objectstore-tool --data-path $dir/0 --op export --pgid 1.0 --file $dir/1.0 --force || return 1 + activate_osd $dir 0 || return 1 + + ceph osd pool set foo pg_num 2 + sleep 5 + while ceph daemon osd.0 perf dump | jq '.osd.numpg' | grep 1 ; do sleep 1 ; done + wait_for_clean || return 1 + + kill_daemons $dir TERM osd.0 || return 1 + + ceph-objectstore-tool --data-path $dir/0 --op remove --pgid 1.0 --force || return 1 + + # this should fail because 1.1 (split child) is there + ! ceph-objectstore-tool --data-path $dir/0 --op import --pgid 1.0 --file $dir/1.0 || return 1 + + ceph-objectstore-tool --data-path $dir/0 --op remove --pgid 1.1 --force || return 1 + # now it will work (1.1. is gone) + ceph-objectstore-tool --data-path $dir/0 --op import --pgid 1.0 --file $dir/1.0 || return 1 + + activate_osd $dir 0 || return 1 + + wait_for_clean || return 1 +} + + +main pg-split-merge "$@" + +# Local Variables: +# compile-command: "cd ../.. ; make -j4 && test/osd/pg-split-merge.sh" +# End: diff --git a/qa/standalone/osd/repro_long_log.sh b/qa/standalone/osd/repro_long_log.sh new file mode 100755 index 00000000..97d572e5 --- /dev/null +++ b/qa/standalone/osd/repro_long_log.sh @@ -0,0 +1,152 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2014 Cloudwatt +# Copyright (C) 2018 Red Hat +# +# Author: Josh Durgin +# Author: David Zafman +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Library Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library Public License for more details. +# + +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh + +function run() { + local dir=$1 + shift + + export CEPH_MON="127.0.0.1:7100" # git grep '\<7100\>' : there must be only one + export CEPH_ARGS + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " + CEPH_ARGS+="--mon-host=$CEPH_MON " + + local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} + for func in $funcs ; do + setup $dir || return 1 + $func $dir || return 1 + teardown $dir || return 1 + done +} + +PGID= + +function test_log_size() +{ + local PGID=$1 + local EXPECTED=$2 + ceph tell osd.\* flush_pg_stats + sleep 3 + ceph pg $PGID query | jq .info.stats.log_size + ceph pg $PGID query | jq .info.stats.log_size | grep "${EXPECTED}" +} + +function setup_log_test() { + local dir=$1 + local which=$2 + + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + run_osd $dir 1 || return 1 + run_osd $dir 2 || return 1 + + ceph osd pool create test 1 1 || true + POOL_ID=$(ceph osd dump --format json | jq '.pools[] | select(.pool_name == "test") | .pool') + PGID="${POOL_ID}.0" + + ceph tell osd.\* injectargs -- --osd-min-pg-log-entries 20 || return 1 + ceph tell osd.\* injectargs -- --osd-max-pg-log-entries 30 || return 1 + ceph tell osd.\* injectargs -- --osd-pg-log-trim-min 10 || return 1 + ceph tell osd.\* injectargs -- --osd-pg-log-dups-tracked 10 || return 1 + + touch $dir/foo + for i in $(seq 1 20) + do + rados -p test put foo $dir/foo || return 1 + done + + test_log_size $PGID 20 || return 1 + + rados -p test rm foo || return 1 + + # generate error entries + for i in $(seq 1 20) + do + rados -p test rm foo + done + + # log should have been trimmed down to min_entries with one extra + test_log_size $PGID 21 || return 1 +} + +function TEST_repro_long_log1() +{ + local dir=$1 + + setup_log_test $dir || return 1 + # regular write should trim the log + rados -p test put foo $dir/foo || return 1 + test_log_size $PGID 22 || return 1 +} + +function TEST_repro_long_log2() +{ + local dir=$1 + + setup_log_test $dir || return 1 + local PRIMARY=$(ceph pg $PGID query | jq '.info.stats.up_primary') + kill_daemons $dir TERM osd.$PRIMARY || return 1 + CEPH_ARGS="--osd-max-pg-log-entries=2 --no-mon-config" ceph-objectstore-tool --data-path $dir/$PRIMARY --pgid $PGID --op trim-pg-log || return 1 + activate_osd $dir $PRIMARY || return 1 + wait_for_clean || return 1 + test_log_size $PGID 2 || return 1 +} + +function TEST_trim_max_entries() +{ + local dir=$1 + + setup_log_test $dir || return 1 + + ceph tell osd.\* injectargs -- --osd-min-pg-log-entries 1 + ceph tell osd.\* injectargs -- --osd-pg-log-trim-min 2 + ceph tell osd.\* injectargs -- --osd-pg-log-trim-max 4 + + # adding log entries, should only trim 4 and add one each time + rados -p test rm foo + test_log_size $PGID 17 + rados -p test rm foo + test_log_size $PGID 14 + rados -p test rm foo + test_log_size $PGID 11 + rados -p test rm foo + test_log_size $PGID 8 + rados -p test rm foo + test_log_size $PGID 5 + rados -p test rm foo + test_log_size $PGID 2 + + # below trim_min + rados -p test rm foo + test_log_size $PGID 3 + rados -p test rm foo + test_log_size $PGID 3 + rados -p test rm foo + test_log_size $PGID 3 + rados -p test rm foo + test_log_size $PGID 3 +} + +main repro-long-log "$@" + +# Local Variables: +# compile-command: "cd ../.. ; make -j4 && ../qa/run-standalone.sh repro_long_log.sh" +# End: diff --git a/qa/standalone/scrub/osd-recovery-scrub.sh b/qa/standalone/scrub/osd-recovery-scrub.sh new file mode 100755 index 00000000..965efa41 --- /dev/null +++ b/qa/standalone/scrub/osd-recovery-scrub.sh @@ -0,0 +1,132 @@ +#! /usr/bin/env bash +# +# Copyright (C) 2017 Red Hat +# +# Author: David Zafman +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Library Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library Public License for more details. +# +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh + +function run() { + local dir=$1 + shift + + export CEPH_MON="127.0.0.1:7124" # git grep '\<7124\>' : there must be only one + export CEPH_ARGS + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " + CEPH_ARGS+="--mon-host=$CEPH_MON " + + export -n CEPH_CLI_TEST_DUP_COMMAND + local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} + for func in $funcs ; do + $func $dir || return 1 + done +} + +function TEST_recovery_scrub() { + local dir=$1 + local poolname=test + + TESTDATA="testdata.$$" + OSDS=8 + PGS=32 + OBJECTS=4 + + setup $dir || return 1 + run_mon $dir a --osd_pool_default_size=1 || return 1 + run_mgr $dir x || return 1 + for osd in $(seq 0 $(expr $OSDS - 1)) + do + run_osd $dir $osd || return 1 + done + + # Create a pool with $PGS pgs + create_pool $poolname $PGS $PGS + wait_for_clean || return 1 + poolid=$(ceph osd dump | grep "^pool.*[']test[']" | awk '{ print $2 }') + + dd if=/dev/urandom of=$TESTDATA bs=1M count=50 + for i in $(seq 1 $OBJECTS) + do + rados -p $poolname put obj${i} $TESTDATA + done + rm -f $TESTDATA + + ceph osd pool set $poolname size 4 + + pids="" + for pg in $(seq 0 $(expr $PGS - 1)) + do + run_in_background pids pg_scrub $poolid.$(printf "%x" $pg) + done + ceph pg dump pgs + wait_background pids + return_code=$? + if [ $return_code -ne 0 ]; then return $return_code; fi + + ERRORS=0 + pidfile=$(find $dir 2>/dev/null | grep $name_prefix'[^/]*\.pid') + pid=$(cat $pidfile) + if ! kill -0 $pid + then + echo "OSD crash occurred" + tail -100 $dir/osd.0.log + ERRORS=$(expr $ERRORS + 1) + fi + + # Work around for http://tracker.ceph.com/issues/38195 + kill_daemons $dir #|| return 1 + + declare -a err_strings + err_strings[0]="not scheduling scrubs due to active recovery" + # Test with these two strings after disabled check in OSD::sched_scrub() + #err_strings[0]="handle_scrub_reserve_request: failed to reserve remotely" + #err_strings[1]="sched_scrub: failed to reserve locally" + + for osd in $(seq 0 $(expr $OSDS - 1)) + do + grep "failed to reserve\|not scheduling scrubs" $dir/osd.${osd}.log + done + for err_string in "${err_strings[@]}" + do + found=false + for osd in $(seq 0 $(expr $OSDS - 1)) + do + if grep "$err_string" $dir/osd.${osd}.log > /dev/null; + then + found=true + fi + done + if [ "$found" = "false" ]; then + echo "Missing log message '$err_string'" + ERRORS=$(expr $ERRORS + 1) + fi + done + + teardown $dir || return 1 + + if [ $ERRORS != "0" ]; + then + echo "TEST FAILED WITH $ERRORS ERRORS" + return 1 + fi + + echo "TEST PASSED" + return 0 +} + +main osd-recovery-scrub "$@" + +# Local Variables: +# compile-command: "cd build ; make -j4 && \ +# ../qa/run-standalone.sh osd-recovery-scrub.sh" +# End: diff --git a/qa/standalone/scrub/osd-scrub-dump.sh b/qa/standalone/scrub/osd-scrub-dump.sh new file mode 100755 index 00000000..e218834c --- /dev/null +++ b/qa/standalone/scrub/osd-scrub-dump.sh @@ -0,0 +1,173 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2019 Red Hat +# +# Author: David Zafman +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Library Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library Public License for more details. +# + +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh + +MAX_SCRUBS=4 +SCRUB_SLEEP=2 +POOL_SIZE=3 + +function run() { + local dir=$1 + shift + local SLEEP=0 + local CHUNK_MAX=5 + + export CEPH_MON="127.0.0.1:7184" # git grep '\<7184\>' : there must be only one + export CEPH_ARGS + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " + CEPH_ARGS+="--mon-host=$CEPH_MON " + CEPH_ARGS+="--osd_max_scrubs=$MAX_SCRUBS " + CEPH_ARGS+="--osd_scrub_sleep=$SLEEP " + CEPH_ARGS+="--osd_scrub_chunk_max=$CHUNK_MAX " + CEPH_ARGS+="--osd_scrub_sleep=$SCRUB_SLEEP " + CEPH_ARGS+="--osd_pool_default_size=$POOL_SIZE " + + export -n CEPH_CLI_TEST_DUP_COMMAND + local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} + for func in $funcs ; do + setup $dir || return 1 + $func $dir || return 1 + teardown $dir || return 1 + done +} + +function TEST_recover_unexpected() { + local dir=$1 + shift + local OSDS=6 + local PGS=16 + local POOLS=3 + local OBJS=1000 + + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + for o in $(seq 0 $(expr $OSDS - 1)) + do + run_osd $dir $o + done + + for i in $(seq 1 $POOLS) + do + create_pool test$i $PGS $PGS + done + + wait_for_clean || return 1 + + dd if=/dev/urandom of=datafile bs=4k count=2 + for i in $(seq 1 $POOLS) + do + for j in $(seq 1 $OBJS) + do + rados -p test$i put obj$j datafile + done + done + rm datafile + + ceph osd set noscrub + ceph osd set nodeep-scrub + + for qpg in $(ceph pg dump pgs --format=json-pretty | jq '.pg_stats[].pgid') + do + primary=$(ceph pg dump pgs --format=json | jq ".pg_stats[] | select(.pgid == $qpg) | .acting_primary") + eval pg=$qpg # strip quotes around qpg + CEPH_ARGS='' ceph daemon $(get_asok_path osd.$primary) trigger_scrub $pg + done + + ceph pg dump pgs + + max=$(CEPH_ARGS='' ceph daemon $(get_asok_path osd.0) dump_scrub_reservations | jq '.osd_max_scrubs') + if [ $max != $MAX_SCRUBS]; + then + echo "ERROR: Incorrect osd_max_scrubs from dump_scrub_reservations" + return 1 + fi + + ceph osd unset noscrub + + ok=false + for i in $(seq 0 300) + do + ceph pg dump pgs + if ceph pg dump pgs | grep scrubbing; then + ok=true + break + fi + sleep 1 + done + if test $ok = "false"; then + echo "ERROR: Test set-up failed no scrubbing" + return 1 + fi + + local total=0 + local zerocount=0 + local maxzerocount=3 + while(true) + do + pass=0 + for o in $(seq 0 $(expr $OSDS - 1)) + do + CEPH_ARGS='' ceph daemon $(get_asok_path osd.$o) dump_scrub_reservations + scrubs=$(CEPH_ARGS='' ceph daemon $(get_asok_path osd.$o) dump_scrub_reservations | jq '.scrubs_local + .scrubs_remote') + if [ $scrubs -gt $MAX_SCRUBS ]; then + echo "ERROR: More than $MAX_SCRUBS currently reserved" + return 1 + fi + pass=$(expr $pass + $scrubs) + done + if [ $pass = "0" ]; then + zerocount=$(expr $zerocount + 1) + fi + if [ $zerocount -gt $maxzerocount ]; then + break + fi + total=$(expr $total + $pass) + sleep $(expr $SCRUB_SLEEP \* 2) + done + + # Check that there are no more scrubs + for i in $(seq 0 5) + do + if ceph pg dump pgs | grep scrubbing; then + echo "ERROR: Extra scrubs after test completion...not expected" + return 1 + fi + sleep $SCRUB_SLEEP + done + + echo $total total reservations seen + + # Sort of arbitraty number based on PGS * POOLS * POOL_SIZE as the number of total scrub + # reservations that must occur. However, the loop above might see the same reservation more + # than once. + actual_reservations=$(expr $PGS \* $POOLS \* $POOL_SIZE) + if [ $total -lt $actual_reservations ]; then + echo "ERROR: Unexpectedly low amount of scrub reservations seen during test" + return 1 + fi + + return 0 +} + + +main osd-scrub-dump "$@" + +# Local Variables: +# compile-command: "cd build ; make check && \ +# ../qa/run-standalone.sh osd-scrub-dump.sh" +# End: diff --git a/qa/standalone/scrub/osd-scrub-repair.sh b/qa/standalone/scrub/osd-scrub-repair.sh new file mode 100755 index 00000000..e1b9fe05 --- /dev/null +++ b/qa/standalone/scrub/osd-scrub-repair.sh @@ -0,0 +1,6231 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2014 Red Hat +# +# Author: Loic Dachary +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Library Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library Public License for more details. +# +set -x +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh + +if [ `uname` = FreeBSD ]; then + # erasure coding overwrites are only tested on Bluestore + # erasure coding on filestore is unsafe + # http://docs.ceph.com/docs/master/rados/operations/erasure-code/#erasure-coding-with-overwrites + use_ec_overwrite=false +else + use_ec_overwrite=true +fi + +# Test development and debugging +# Set to "yes" in order to ignore diff errors and save results to update test +getjson="no" + +# Filter out mtime and local_mtime dates, version, prior_version and last_reqid (client) from any object_info. +jqfilter='def walk(f): + . as $in + | if type == "object" then + reduce keys[] as $key + ( {}; . + { ($key): ($in[$key] | walk(f)) } ) | f + elif type == "array" then map( walk(f) ) | f + else f + end; +walk(if type == "object" then del(.mtime) else . end) +| walk(if type == "object" then del(.local_mtime) else . end) +| walk(if type == "object" then del(.last_reqid) else . end) +| walk(if type == "object" then del(.version) else . end) +| walk(if type == "object" then del(.prior_version) else . end)' + +sortkeys='import json; import sys ; JSON=sys.stdin.read() ; ud = json.loads(JSON) ; print(json.dumps(ud, sort_keys=True, indent=2))' + +function run() { + local dir=$1 + shift + + export CEPH_MON="127.0.0.1:7107" # git grep '\<7107\>' : there must be only one + export CEPH_ARGS + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " + CEPH_ARGS+="--mon-host=$CEPH_MON " + CEPH_ARGS+="--osd-skip-data-digest=false " + + export -n CEPH_CLI_TEST_DUP_COMMAND + local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} + for func in $funcs ; do + $func $dir || return 1 + done +} + +function add_something() { + local dir=$1 + local poolname=$2 + local obj=${3:-SOMETHING} + local scrub=${4:-noscrub} + + if [ "$scrub" = "noscrub" ]; + then + ceph osd set noscrub || return 1 + ceph osd set nodeep-scrub || return 1 + else + ceph osd unset noscrub || return 1 + ceph osd unset nodeep-scrub || return 1 + fi + + local payload=ABCDEF + echo $payload > $dir/ORIGINAL + rados --pool $poolname put $obj $dir/ORIGINAL || return 1 +} + +# +# Corrupt one copy of a replicated pool +# +function TEST_corrupt_and_repair_replicated() { + local dir=$1 + local poolname=rbd + + setup $dir || return 1 + run_mon $dir a --osd_pool_default_size=2 || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + run_osd $dir 1 || return 1 + create_rbd_pool || return 1 + wait_for_clean || return 1 + + add_something $dir $poolname || return 1 + corrupt_and_repair_one $dir $poolname $(get_not_primary $poolname SOMETHING) || return 1 + # Reproduces http://tracker.ceph.com/issues/8914 + corrupt_and_repair_one $dir $poolname $(get_primary $poolname SOMETHING) || return 1 + + teardown $dir || return 1 +} + +# +# Allow repair to be scheduled when some recovering is still undergoing on the same OSD +# +function TEST_allow_repair_during_recovery() { + local dir=$1 + local poolname=rbd + + setup $dir || return 1 + run_mon $dir a --osd_pool_default_size=2 || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 --osd_scrub_during_recovery=false \ + --osd_repair_during_recovery=true \ + --osd_debug_pretend_recovery_active=true || return 1 + run_osd $dir 1 --osd_scrub_during_recovery=false \ + --osd_repair_during_recovery=true \ + --osd_debug_pretend_recovery_active=true || return 1 + create_rbd_pool || return 1 + wait_for_clean || return 1 + + add_something $dir $poolname || return 1 + corrupt_and_repair_one $dir $poolname $(get_not_primary $poolname SOMETHING) || return 1 + + teardown $dir || return 1 +} + +# +# Skip non-repair scrub correctly during recovery +# +function TEST_skip_non_repair_during_recovery() { + local dir=$1 + local poolname=rbd + + setup $dir || return 1 + run_mon $dir a --osd_pool_default_size=2 || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 --osd_scrub_during_recovery=false \ + --osd_repair_during_recovery=true \ + --osd_debug_pretend_recovery_active=true || return 1 + run_osd $dir 1 --osd_scrub_during_recovery=false \ + --osd_repair_during_recovery=true \ + --osd_debug_pretend_recovery_active=true || return 1 + create_rbd_pool || return 1 + wait_for_clean || return 1 + + add_something $dir $poolname || return 1 + scrub_and_not_schedule $dir $poolname $(get_not_primary $poolname SOMETHING) || return 1 + + teardown $dir || return 1 +} + +function scrub_and_not_schedule() { + local dir=$1 + local poolname=$2 + local osd=$3 + + # + # 1) start a non-repair scrub + # + local pg=$(get_pg $poolname SOMETHING) + local last_scrub=$(get_last_scrub_stamp $pg) + ceph pg scrub $pg + + # + # 2) Assure the scrub is not scheduled + # + for ((i=0; i < 3; i++)); do + if test "$(get_last_scrub_stamp $pg)" '>' "$last_scrub" ; then + return 1 + fi + sleep 1 + done + + # + # 3) Access to the file must OK + # + objectstore_tool $dir $osd SOMETHING list-attrs || return 1 + rados --pool $poolname get SOMETHING $dir/COPY || return 1 + diff $dir/ORIGINAL $dir/COPY || return 1 +} + +function corrupt_and_repair_two() { + local dir=$1 + local poolname=$2 + local first=$3 + local second=$4 + + # + # 1) remove the corresponding file from the OSDs + # + pids="" + run_in_background pids objectstore_tool $dir $first SOMETHING remove + run_in_background pids objectstore_tool $dir $second SOMETHING remove + wait_background pids + return_code=$? + if [ $return_code -ne 0 ]; then return $return_code; fi + + # + # 2) repair the PG + # + local pg=$(get_pg $poolname SOMETHING) + repair $pg + # + # 3) The files must be back + # + pids="" + run_in_background pids objectstore_tool $dir $first SOMETHING list-attrs + run_in_background pids objectstore_tool $dir $second SOMETHING list-attrs + wait_background pids + return_code=$? + if [ $return_code -ne 0 ]; then return $return_code; fi + + rados --pool $poolname get SOMETHING $dir/COPY || return 1 + diff $dir/ORIGINAL $dir/COPY || return 1 +} + +# +# 1) add an object +# 2) remove the corresponding file from a designated OSD +# 3) repair the PG +# 4) check that the file has been restored in the designated OSD +# +function corrupt_and_repair_one() { + local dir=$1 + local poolname=$2 + local osd=$3 + + # + # 1) remove the corresponding file from the OSD + # + objectstore_tool $dir $osd SOMETHING remove || return 1 + # + # 2) repair the PG + # + local pg=$(get_pg $poolname SOMETHING) + repair $pg + # + # 3) The file must be back + # + objectstore_tool $dir $osd SOMETHING list-attrs || return 1 + rados --pool $poolname get SOMETHING $dir/COPY || return 1 + diff $dir/ORIGINAL $dir/COPY || return 1 +} + +function corrupt_and_repair_erasure_coded() { + local dir=$1 + local poolname=$2 + + add_something $dir $poolname || return 1 + + local primary=$(get_primary $poolname SOMETHING) + local -a osds=($(get_osds $poolname SOMETHING | sed -e "s/$primary//")) + local not_primary_first=${osds[0]} + local not_primary_second=${osds[1]} + + # Reproduces http://tracker.ceph.com/issues/10017 + corrupt_and_repair_one $dir $poolname $primary || return 1 + # Reproduces http://tracker.ceph.com/issues/10409 + corrupt_and_repair_one $dir $poolname $not_primary_first || return 1 + corrupt_and_repair_two $dir $poolname $not_primary_first $not_primary_second || return 1 + corrupt_and_repair_two $dir $poolname $primary $not_primary_first || return 1 + +} + +function auto_repair_erasure_coded() { + local dir=$1 + local allow_overwrites=$2 + local poolname=ecpool + + # Launch a cluster with 5 seconds scrub interval + setup $dir || return 1 + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + local ceph_osd_args="--osd-scrub-auto-repair=true \ + --osd-deep-scrub-interval=5 \ + --osd-scrub-max-interval=5 \ + --osd-scrub-min-interval=5 \ + --osd-scrub-interval-randomize-ratio=0" + for id in $(seq 0 2) ; do + if [ "$allow_overwrites" = "true" ]; then + run_osd $dir $id $ceph_osd_args || return 1 + else + run_osd_filestore $dir $id $ceph_osd_args || return 1 + fi + done + create_rbd_pool || return 1 + wait_for_clean || return 1 + + # Create an EC pool + create_ec_pool $poolname $allow_overwrites k=2 m=1 || return 1 + + # Put an object + local payload=ABCDEF + echo $payload > $dir/ORIGINAL + rados --pool $poolname put SOMETHING $dir/ORIGINAL || return 1 + + # Remove the object from one shard physically + # Restarted osd get $ceph_osd_args passed + objectstore_tool $dir $(get_not_primary $poolname SOMETHING) SOMETHING remove || return 1 + # Wait for auto repair + local pgid=$(get_pg $poolname SOMETHING) + wait_for_scrub $pgid "$(get_last_scrub_stamp $pgid)" + wait_for_clean || return 1 + # Verify - the file should be back + # Restarted osd get $ceph_osd_args passed + objectstore_tool $dir $(get_not_primary $poolname SOMETHING) SOMETHING list-attrs || return 1 + rados --pool $poolname get SOMETHING $dir/COPY || return 1 + diff $dir/ORIGINAL $dir/COPY || return 1 + + # Tear down + teardown $dir || return 1 +} + +function TEST_auto_repair_erasure_coded_appends() { + auto_repair_erasure_coded $1 false +} + +function TEST_auto_repair_erasure_coded_overwrites() { + if [ "$use_ec_overwrite" = "true" ]; then + auto_repair_erasure_coded $1 true + fi +} + +function TEST_auto_repair_bluestore_basic() { + local dir=$1 + local poolname=testpool + + # Launch a cluster with 5 seconds scrub interval + setup $dir || return 1 + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + local ceph_osd_args="--osd-scrub-auto-repair=true \ + --osd_deep_scrub_randomize_ratio=0 \ + --osd-scrub-interval-randomize-ratio=0" + for id in $(seq 0 2) ; do + run_osd $dir $id $ceph_osd_args || return 1 + done + + create_pool $poolname 1 1 || return 1 + ceph osd pool set $poolname size 2 + wait_for_clean || return 1 + + # Put an object + local payload=ABCDEF + echo $payload > $dir/ORIGINAL + rados --pool $poolname put SOMETHING $dir/ORIGINAL || return 1 + + # Remove the object from one shard physically + # Restarted osd get $ceph_osd_args passed + objectstore_tool $dir $(get_not_primary $poolname SOMETHING) SOMETHING remove || return 1 + + local pgid=$(get_pg $poolname SOMETHING) + local primary=$(get_primary $poolname SOMETHING) + local last_scrub_stamp="$(get_last_scrub_stamp $pgid)" + CEPH_ARGS='' ceph daemon $(get_asok_path osd.$primary) trigger_deep_scrub $pgid + CEPH_ARGS='' ceph daemon $(get_asok_path osd.$primary) trigger_scrub $pgid + + # Wait for auto repair + wait_for_scrub $pgid "$last_scrub_stamp" || return 1 + wait_for_clean || return 1 + ceph pg dump pgs + # Verify - the file should be back + # Restarted osd get $ceph_osd_args passed + objectstore_tool $dir $(get_not_primary $poolname SOMETHING) SOMETHING list-attrs || return 1 + objectstore_tool $dir $(get_not_primary $poolname SOMETHING) SOMETHING get-bytes $dir/COPY || return 1 + diff $dir/ORIGINAL $dir/COPY || return 1 + grep scrub_finish $dir/osd.${primary}.log + + # Tear down + teardown $dir || return 1 +} + +function TEST_auto_repair_bluestore_scrub() { + local dir=$1 + local poolname=testpool + + # Launch a cluster with 5 seconds scrub interval + setup $dir || return 1 + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + local ceph_osd_args="--osd-scrub-auto-repair=true \ + --osd_deep_scrub_randomize_ratio=0 \ + --osd-scrub-interval-randomize-ratio=0" + for id in $(seq 0 2) ; do + run_osd $dir $id $ceph_osd_args || return 1 + done + + create_pool $poolname 1 1 || return 1 + ceph osd pool set $poolname size 2 + wait_for_clean || return 1 + + # Put an object + local payload=ABCDEF + echo $payload > $dir/ORIGINAL + rados --pool $poolname put SOMETHING $dir/ORIGINAL || return 1 + + # Remove the object from one shard physically + # Restarted osd get $ceph_osd_args passed + objectstore_tool $dir $(get_not_primary $poolname SOMETHING) SOMETHING remove || return 1 + + local pgid=$(get_pg $poolname SOMETHING) + local primary=$(get_primary $poolname SOMETHING) + local last_scrub_stamp="$(get_last_scrub_stamp $pgid)" + CEPH_ARGS='' ceph daemon $(get_asok_path osd.$primary) trigger_scrub $pgid + + # Wait for scrub -> auto repair + wait_for_scrub $pgid "$last_scrub_stamp" || return 1 + ceph pg dump pgs + # Actually this causes 2 scrubs, so we better wait a little longer + sleep 5 + wait_for_clean || return 1 + ceph pg dump pgs + # Verify - the file should be back + # Restarted osd get $ceph_osd_args passed + objectstore_tool $dir $(get_not_primary $poolname SOMETHING) SOMETHING list-attrs || return 1 + rados --pool $poolname get SOMETHING $dir/COPY || return 1 + diff $dir/ORIGINAL $dir/COPY || return 1 + grep scrub_finish $dir/osd.${primary}.log + + # This should have caused 1 object to be repaired + COUNT=$(ceph pg $pgid query | jq '.info.stats.stat_sum.num_objects_repaired') + test "$COUNT" = "1" || return 1 + + # Tear down + teardown $dir || return 1 +} + +function TEST_auto_repair_bluestore_failed() { + local dir=$1 + local poolname=testpool + + # Launch a cluster with 5 seconds scrub interval + setup $dir || return 1 + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + local ceph_osd_args="--osd-scrub-auto-repair=true \ + --osd_deep_scrub_randomize_ratio=0 \ + --osd-scrub-interval-randomize-ratio=0" + for id in $(seq 0 2) ; do + run_osd $dir $id $ceph_osd_args || return 1 + done + + create_pool $poolname 1 1 || return 1 + ceph osd pool set $poolname size 2 + wait_for_clean || return 1 + + # Put an object + local payload=ABCDEF + echo $payload > $dir/ORIGINAL + for i in $(seq 1 10) + do + rados --pool $poolname put obj$i $dir/ORIGINAL || return 1 + done + + # Remove the object from one shard physically + # Restarted osd get $ceph_osd_args passed + objectstore_tool $dir $(get_not_primary $poolname SOMETHING) obj1 remove || return 1 + # obj2 can't be repaired + objectstore_tool $dir $(get_not_primary $poolname SOMETHING) obj2 remove || return 1 + objectstore_tool $dir $(get_primary $poolname SOMETHING) obj2 rm-attr _ || return 1 + + local pgid=$(get_pg $poolname obj1) + local primary=$(get_primary $poolname obj1) + local last_scrub_stamp="$(get_last_scrub_stamp $pgid)" + CEPH_ARGS='' ceph daemon $(get_asok_path osd.$primary) trigger_deep_scrub $pgid + CEPH_ARGS='' ceph daemon $(get_asok_path osd.$primary) trigger_scrub $pgid + + # Wait for auto repair + wait_for_scrub $pgid "$last_scrub_stamp" || return 1 + wait_for_clean || return 1 + flush_pg_stats + grep scrub_finish $dir/osd.${primary}.log + grep -q "scrub_finish.*still present after re-scrub" $dir/osd.${primary}.log || return 1 + ceph pg dump pgs + ceph pg dump pgs | grep -q "^${pgid}.*+failed_repair" || return 1 + + # Verify - obj1 should be back + # Restarted osd get $ceph_osd_args passed + objectstore_tool $dir $(get_not_primary $poolname obj1) obj1 list-attrs || return 1 + rados --pool $poolname get obj1 $dir/COPY || return 1 + diff $dir/ORIGINAL $dir/COPY || return 1 + grep scrub_finish $dir/osd.${primary}.log + + # Make it repairable + objectstore_tool $dir $(get_primary $poolname SOMETHING) obj2 remove || return 1 + repair $pgid + sleep 2 + + ceph pg dump pgs + ceph pg dump pgs | grep -q "^${pgid}.* active+clean " || return 1 + grep scrub_finish $dir/osd.${primary}.log + + # Tear down + teardown $dir || return 1 +} + +function TEST_auto_repair_bluestore_failed_norecov() { + local dir=$1 + local poolname=testpool + + # Launch a cluster with 5 seconds scrub interval + setup $dir || return 1 + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + local ceph_osd_args="--osd-scrub-auto-repair=true \ + --osd_deep_scrub_randomize_ratio=0 \ + --osd-scrub-interval-randomize-ratio=0" + for id in $(seq 0 2) ; do + run_osd $dir $id $ceph_osd_args || return 1 + done + + create_pool $poolname 1 1 || return 1 + ceph osd pool set $poolname size 2 + wait_for_clean || return 1 + + # Put an object + local payload=ABCDEF + echo $payload > $dir/ORIGINAL + for i in $(seq 1 10) + do + rados --pool $poolname put obj$i $dir/ORIGINAL || return 1 + done + + # Remove the object from one shard physically + # Restarted osd get $ceph_osd_args passed + # obj1 can't be repaired + objectstore_tool $dir $(get_not_primary $poolname SOMETHING) obj1 remove || return 1 + objectstore_tool $dir $(get_primary $poolname SOMETHING) obj1 rm-attr _ || return 1 + # obj2 can't be repaired + objectstore_tool $dir $(get_not_primary $poolname SOMETHING) obj2 remove || return 1 + objectstore_tool $dir $(get_primary $poolname SOMETHING) obj2 rm-attr _ || return 1 + + local pgid=$(get_pg $poolname obj1) + local primary=$(get_primary $poolname obj1) + local last_scrub_stamp="$(get_last_scrub_stamp $pgid)" + CEPH_ARGS='' ceph daemon $(get_asok_path osd.$primary) trigger_deep_scrub $pgid + CEPH_ARGS='' ceph daemon $(get_asok_path osd.$primary) trigger_scrub $pgid + + # Wait for auto repair + wait_for_scrub $pgid "$last_scrub_stamp" || return 1 + wait_for_clean || return 1 + flush_pg_stats + grep -q "scrub_finish.*present with no repair possible" $dir/osd.${primary}.log || return 1 + ceph pg dump pgs + ceph pg dump pgs | grep -q "^${pgid}.*+failed_repair" || return 1 + + # Tear down + teardown $dir || return 1 +} + +function TEST_repair_stats() { + local dir=$1 + local poolname=testpool + local OSDS=2 + local OBJS=30 + # This need to be an even number + local REPAIRS=20 + + # Launch a cluster with 5 seconds scrub interval + setup $dir || return 1 + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + local ceph_osd_args="--osd_deep_scrub_randomize_ratio=0 \ + --osd-scrub-interval-randomize-ratio=0" + for id in $(seq 0 $(expr $OSDS - 1)) ; do + run_osd $dir $id $ceph_osd_args || return 1 + done + + create_pool $poolname 1 1 || return 1 + ceph osd pool set $poolname size 2 + wait_for_clean || return 1 + + # Put an object + local payload=ABCDEF + echo $payload > $dir/ORIGINAL + for i in $(seq 1 $OBJS) + do + rados --pool $poolname put obj$i $dir/ORIGINAL || return 1 + done + + # Remove the object from one shard physically + # Restarted osd get $ceph_osd_args passed + local other=$(get_not_primary $poolname obj1) + local pgid=$(get_pg $poolname obj1) + local primary=$(get_primary $poolname obj1) + + kill_daemons $dir TERM osd.$other >&2 < /dev/null || return 1 + kill_daemons $dir TERM osd.$primary >&2 < /dev/null || return 1 + for i in $(seq 1 $REPAIRS) + do + # Remove from both osd.0 and osd.1 + OSD=$(expr $i % 2) + _objectstore_tool_nodown $dir $OSD obj$i remove || return 1 + done + activate_osd $dir $primary $ceph_osd_args || return 1 + activate_osd $dir $other $ceph_osd_args || return 1 + wait_for_clean || return 1 + + repair $pgid + wait_for_clean || return 1 + ceph pg dump pgs + flush_pg_stats + + # This should have caused 1 object to be repaired + ceph pg $pgid query | jq '.info.stats.stat_sum' + COUNT=$(ceph pg $pgid query | jq '.info.stats.stat_sum.num_objects_repaired') + test "$COUNT" = "$REPAIRS" || return 1 + + ceph pg dump --format=json-pretty | jq ".pg_map.osd_stats[] | select(.osd == $primary )" + COUNT=$(ceph pg dump --format=json-pretty | jq ".pg_map.osd_stats[] | select(.osd == $primary ).num_shards_repaired") + test "$COUNT" = "$(expr $REPAIRS / 2)" || return 1 + + ceph pg dump --format=json-pretty | jq ".pg_map.osd_stats[] | select(.osd == $other )" + COUNT=$(ceph pg dump --format=json-pretty | jq ".pg_map.osd_stats[] | select(.osd == $other ).num_shards_repaired") + test "$COUNT" = "$(expr $REPAIRS / 2)" || return 1 + + ceph pg dump --format=json-pretty | jq ".pg_map.osd_stats_sum" + COUNT=$(ceph pg dump --format=json-pretty | jq ".pg_map.osd_stats_sum.num_shards_repaired") + test "$COUNT" = "$REPAIRS" || return 1 + + # Tear down + teardown $dir || return 1 +} + +function TEST_repair_stats_ec() { + local dir=$1 + local poolname=testpool + local OSDS=3 + local OBJS=30 + # This need to be an even number + local REPAIRS=26 + local allow_overwrites=false + + # Launch a cluster with 5 seconds scrub interval + setup $dir || return 1 + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + local ceph_osd_args="--osd_deep_scrub_randomize_ratio=0 \ + --osd-scrub-interval-randomize-ratio=0" + for id in $(seq 0 $(expr $OSDS - 1)) ; do + run_osd $dir $id $ceph_osd_args || return 1 + done + + # Create an EC pool + create_ec_pool $poolname $allow_overwrites k=2 m=1 || return 1 + + # Put an object + local payload=ABCDEF + echo $payload > $dir/ORIGINAL + for i in $(seq 1 $OBJS) + do + rados --pool $poolname put obj$i $dir/ORIGINAL || return 1 + done + + # Remove the object from one shard physically + # Restarted osd get $ceph_osd_args passed + local other=$(get_not_primary $poolname obj1) + local pgid=$(get_pg $poolname obj1) + local primary=$(get_primary $poolname obj1) + + kill_daemons $dir TERM osd.$other >&2 < /dev/null || return 1 + kill_daemons $dir TERM osd.$primary >&2 < /dev/null || return 1 + for i in $(seq 1 $REPAIRS) + do + # Remove from both osd.0 and osd.1 + OSD=$(expr $i % 2) + _objectstore_tool_nodown $dir $OSD obj$i remove || return 1 + done + activate_osd $dir $primary $ceph_osd_args || return 1 + activate_osd $dir $other $ceph_osd_args || return 1 + wait_for_clean || return 1 + + repair $pgid + wait_for_clean || return 1 + ceph pg dump pgs + flush_pg_stats + + # This should have caused 1 object to be repaired + ceph pg $pgid query | jq '.info.stats.stat_sum' + COUNT=$(ceph pg $pgid query | jq '.info.stats.stat_sum.num_objects_repaired') + test "$COUNT" = "$REPAIRS" || return 1 + + for osd in $(seq 0 $(expr $OSDS - 1)) ; do + if [ $osd = $other -o $osd = $primary ]; then + repair=$(expr $REPAIRS / 2) + else + repair="0" + fi + + ceph pg dump --format=json-pretty | jq ".pg_map.osd_stats[] | select(.osd == $osd )" + COUNT=$(ceph pg dump --format=json-pretty | jq ".pg_map.osd_stats[] | select(.osd == $osd ).num_shards_repaired") + test "$COUNT" = "$repair" || return 1 + done + + ceph pg dump --format=json-pretty | jq ".pg_map.osd_stats_sum" + COUNT=$(ceph pg dump --format=json-pretty | jq ".pg_map.osd_stats_sum.num_shards_repaired") + test "$COUNT" = "$REPAIRS" || return 1 + + # Tear down + teardown $dir || return 1 +} + +function corrupt_and_repair_jerasure() { + local dir=$1 + local allow_overwrites=$2 + local poolname=ecpool + + setup $dir || return 1 + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + for id in $(seq 0 3) ; do + if [ "$allow_overwrites" = "true" ]; then + run_osd $dir $id || return 1 + else + run_osd_filestore $dir $id || return 1 + fi + done + create_rbd_pool || return 1 + wait_for_clean || return 1 + + create_ec_pool $poolname $allow_overwrites k=2 m=2 || return 1 + corrupt_and_repair_erasure_coded $dir $poolname || return 1 + + teardown $dir || return 1 +} + +function TEST_corrupt_and_repair_jerasure_appends() { + corrupt_and_repair_jerasure $1 false +} + +function TEST_corrupt_and_repair_jerasure_overwrites() { + if [ "$use_ec_overwrite" = "true" ]; then + corrupt_and_repair_jerasure $1 true + fi +} + +function corrupt_and_repair_lrc() { + local dir=$1 + local allow_overwrites=$2 + local poolname=ecpool + + setup $dir || return 1 + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + for id in $(seq 0 9) ; do + if [ "$allow_overwrites" = "true" ]; then + run_osd $dir $id || return 1 + else + run_osd_filestore $dir $id || return 1 + fi + done + create_rbd_pool || return 1 + wait_for_clean || return 1 + + create_ec_pool $poolname $allow_overwrites k=4 m=2 l=3 plugin=lrc || return 1 + corrupt_and_repair_erasure_coded $dir $poolname || return 1 + + teardown $dir || return 1 +} + +function TEST_corrupt_and_repair_lrc_appends() { + corrupt_and_repair_lrc $1 false +} + +function TEST_corrupt_and_repair_lrc_overwrites() { + if [ "$use_ec_overwrite" = "true" ]; then + corrupt_and_repair_lrc $1 true + fi +} + +function unfound_erasure_coded() { + local dir=$1 + local allow_overwrites=$2 + local poolname=ecpool + local payload=ABCDEF + + setup $dir || return 1 + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + for id in $(seq 0 3) ; do + if [ "$allow_overwrites" = "true" ]; then + run_osd $dir $id || return 1 + else + run_osd_filestore $dir $id || return 1 + fi + done + + create_ec_pool $poolname $allow_overwrites k=2 m=2 || return 1 + + add_something $dir $poolname || return 1 + + local primary=$(get_primary $poolname SOMETHING) + local -a osds=($(get_osds $poolname SOMETHING | sed -e "s/$primary//")) + local not_primary_first=${osds[0]} + local not_primary_second=${osds[1]} + local not_primary_third=${osds[2]} + + # + # 1) remove the corresponding file from the OSDs + # + pids="" + run_in_background pids objectstore_tool $dir $not_primary_first SOMETHING remove + run_in_background pids objectstore_tool $dir $not_primary_second SOMETHING remove + run_in_background pids objectstore_tool $dir $not_primary_third SOMETHING remove + wait_background pids + return_code=$? + if [ $return_code -ne 0 ]; then return $return_code; fi + + # + # 2) repair the PG + # + local pg=$(get_pg $poolname SOMETHING) + repair $pg + # + # 3) check pg state + # + # it may take a bit to appear due to mon/mgr asynchrony + for f in `seq 1 60`; do + ceph -s | grep "1/1 objects unfound" && break + sleep 1 + done + ceph -s|grep "4 up" || return 1 + ceph -s|grep "4 in" || return 1 + ceph -s|grep "1/1 objects unfound" || return 1 + + teardown $dir || return 1 +} + +function TEST_unfound_erasure_coded_appends() { + unfound_erasure_coded $1 false +} + +function TEST_unfound_erasure_coded_overwrites() { + if [ "$use_ec_overwrite" = "true" ]; then + unfound_erasure_coded $1 true + fi +} + +# +# list_missing for EC pool +# +function list_missing_erasure_coded() { + local dir=$1 + local allow_overwrites=$2 + local poolname=ecpool + + setup $dir || return 1 + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + for id in $(seq 0 2) ; do + if [ "$allow_overwrites" = "true" ]; then + run_osd $dir $id || return 1 + else + run_osd_filestore $dir $id || return 1 + fi + done + create_rbd_pool || return 1 + wait_for_clean || return 1 + + create_ec_pool $poolname $allow_overwrites k=2 m=1 || return 1 + + # Put an object and remove the two shards (including primary) + add_something $dir $poolname MOBJ0 || return 1 + local -a osds0=($(get_osds $poolname MOBJ0)) + + # Put another object and remove two shards (excluding primary) + add_something $dir $poolname MOBJ1 || return 1 + local -a osds1=($(get_osds $poolname MOBJ1)) + + # Stop all osd daemons + for id in $(seq 0 2) ; do + kill_daemons $dir TERM osd.$id >&2 < /dev/null || return 1 + done + + id=${osds0[0]} + ceph-objectstore-tool --data-path $dir/$id \ + MOBJ0 remove || return 1 + id=${osds0[1]} + ceph-objectstore-tool --data-path $dir/$id \ + MOBJ0 remove || return 1 + + id=${osds1[1]} + ceph-objectstore-tool --data-path $dir/$id \ + MOBJ1 remove || return 1 + id=${osds1[2]} + ceph-objectstore-tool --data-path $dir/$id \ + MOBJ1 remove || return 1 + + for id in $(seq 0 2) ; do + activate_osd $dir $id >&2 || return 1 + done + create_rbd_pool || return 1 + wait_for_clean || return 1 + + # Get get - both objects should in the same PG + local pg=$(get_pg $poolname MOBJ0) + + # Repair the PG, which triggers the recovering, + # and should mark the object as unfound + repair $pg + + for i in $(seq 0 120) ; do + [ $i -lt 60 ] || return 1 + matches=$(ceph pg $pg list_unfound | egrep "MOBJ0|MOBJ1" | wc -l) + [ $matches -eq 2 ] && break + done + + teardown $dir || return 1 +} + +function TEST_list_missing_erasure_coded_appends() { + list_missing_erasure_coded $1 false +} + +function TEST_list_missing_erasure_coded_overwrites() { + if [ "$use_ec_overwrite" = "true" ]; then + list_missing_erasure_coded $1 true + fi +} + +# +# Corrupt one copy of a replicated pool +# +function TEST_corrupt_scrub_replicated() { + local dir=$1 + local poolname=csr_pool + local total_objs=19 + + setup $dir || return 1 + run_mon $dir a --osd_pool_default_size=2 || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + run_osd $dir 1 || return 1 + create_rbd_pool || return 1 + wait_for_clean || return 1 + + create_pool foo 1 || return 1 + create_pool $poolname 1 1 || return 1 + wait_for_clean || return 1 + + for i in $(seq 1 $total_objs) ; do + objname=ROBJ${i} + add_something $dir $poolname $objname || return 1 + + rados --pool $poolname setomapheader $objname hdr-$objname || return 1 + rados --pool $poolname setomapval $objname key-$objname val-$objname || return 1 + done + + # Increase file 1 MB + 1KB + dd if=/dev/zero of=$dir/new.ROBJ19 bs=1024 count=1025 + rados --pool $poolname put $objname $dir/new.ROBJ19 || return 1 + rm -f $dir/new.ROBJ19 + + local pg=$(get_pg $poolname ROBJ0) + local primary=$(get_primary $poolname ROBJ0) + + # Compute an old omap digest and save oi + CEPH_ARGS='' ceph daemon $(get_asok_path osd.0) \ + config set osd_deep_scrub_update_digest_min_age 0 + CEPH_ARGS='' ceph daemon $(get_asok_path osd.1) \ + config set osd_deep_scrub_update_digest_min_age 0 + pg_deep_scrub $pg + + for i in $(seq 1 $total_objs) ; do + objname=ROBJ${i} + + # Alternate corruption between osd.0 and osd.1 + local osd=$(expr $i % 2) + + case $i in + 1) + # Size (deep scrub data_digest too) + local payload=UVWXYZZZ + echo $payload > $dir/CORRUPT + objectstore_tool $dir $osd $objname set-bytes $dir/CORRUPT || return 1 + ;; + + 2) + # digest (deep scrub only) + local payload=UVWXYZ + echo $payload > $dir/CORRUPT + objectstore_tool $dir $osd $objname set-bytes $dir/CORRUPT || return 1 + ;; + + 3) + # missing + objectstore_tool $dir $osd $objname remove || return 1 + ;; + + 4) + # Modify omap value (deep scrub only) + objectstore_tool $dir $osd $objname set-omap key-$objname $dir/CORRUPT || return 1 + ;; + + 5) + # Delete omap key (deep scrub only) + objectstore_tool $dir $osd $objname rm-omap key-$objname || return 1 + ;; + + 6) + # Add extra omap key (deep scrub only) + echo extra > $dir/extra-val + objectstore_tool $dir $osd $objname set-omap key2-$objname $dir/extra-val || return 1 + rm $dir/extra-val + ;; + + 7) + # Modify omap header (deep scrub only) + echo -n newheader > $dir/hdr + objectstore_tool $dir $osd $objname set-omaphdr $dir/hdr || return 1 + rm $dir/hdr + ;; + + 8) + rados --pool $poolname setxattr $objname key1-$objname val1-$objname || return 1 + rados --pool $poolname setxattr $objname key2-$objname val2-$objname || return 1 + + # Break xattrs + echo -n bad-val > $dir/bad-val + objectstore_tool $dir $osd $objname set-attr _key1-$objname $dir/bad-val || return 1 + objectstore_tool $dir $osd $objname rm-attr _key2-$objname || return 1 + echo -n val3-$objname > $dir/newval + objectstore_tool $dir $osd $objname set-attr _key3-$objname $dir/newval || return 1 + rm $dir/bad-val $dir/newval + ;; + + 9) + objectstore_tool $dir $osd $objname get-attr _ > $dir/robj9-oi + echo -n D > $dir/change + rados --pool $poolname put $objname $dir/change + objectstore_tool $dir $osd $objname set-attr _ $dir/robj9-oi + rm $dir/oi $dir/change + ;; + + # ROBJ10 must be handled after digests are re-computed by a deep scrub below + # ROBJ11 must be handled with config change before deep scrub + # ROBJ12 must be handled with config change before scrubs + # ROBJ13 must be handled before scrubs + + 14) + echo -n bad-val > $dir/bad-val + objectstore_tool $dir 0 $objname set-attr _ $dir/bad-val || return 1 + objectstore_tool $dir 1 $objname rm-attr _ || return 1 + rm $dir/bad-val + ;; + + 15) + objectstore_tool $dir $osd $objname rm-attr _ || return 1 + ;; + + 16) + objectstore_tool $dir 0 $objname rm-attr snapset || return 1 + echo -n bad-val > $dir/bad-val + objectstore_tool $dir 1 $objname set-attr snapset $dir/bad-val || return 1 + ;; + + 17) + # Deep-scrub only (all replicas are diffent than the object info + local payload=ROBJ17 + echo $payload > $dir/new.ROBJ17 + objectstore_tool $dir 0 $objname set-bytes $dir/new.ROBJ17 || return 1 + objectstore_tool $dir 1 $objname set-bytes $dir/new.ROBJ17 || return 1 + ;; + + 18) + # Deep-scrub only (all replicas are diffent than the object info + local payload=ROBJ18 + echo $payload > $dir/new.ROBJ18 + objectstore_tool $dir 0 $objname set-bytes $dir/new.ROBJ18 || return 1 + objectstore_tool $dir 1 $objname set-bytes $dir/new.ROBJ18 || return 1 + # Make one replica have a different object info, so a full repair must happen too + objectstore_tool $dir $osd $objname corrupt-info || return 1 + ;; + + 19) + # Set osd-max-object-size smaller than this object's size + + esac + done + + local pg=$(get_pg $poolname ROBJ0) + + ceph tell osd.\* injectargs -- --osd-max-object-size=1048576 + + inject_eio rep data $poolname ROBJ11 $dir 0 || return 1 # shard 0 of [1, 0], osd.1 + inject_eio rep mdata $poolname ROBJ12 $dir 1 || return 1 # shard 1 of [1, 0], osd.0 + inject_eio rep mdata $poolname ROBJ13 $dir 1 || return 1 # shard 1 of [1, 0], osd.0 + inject_eio rep data $poolname ROBJ13 $dir 0 || return 1 # shard 0 of [1, 0], osd.1 + + pg_scrub $pg + + ERRORS=0 + declare -a s_err_strings + err_strings[0]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:30259878:::ROBJ15:head : candidate had a missing info key" + err_strings[1]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 soid 3:33aca486:::ROBJ18:head : object info inconsistent " + err_strings[2]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:5c7b2c47:::ROBJ16:head : candidate had a corrupt snapset" + err_strings[3]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 0 soid 3:5c7b2c47:::ROBJ16:head : candidate had a missing snapset key" + err_strings[4]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 soid 3:5c7b2c47:::ROBJ16:head : failed to pick suitable object info" + err_strings[5]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 soid 3:86586531:::ROBJ8:head : attr value mismatch '_key1-ROBJ8', attr name mismatch '_key3-ROBJ8', attr name mismatch '_key2-ROBJ8'" + err_strings[6]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 0 soid 3:bc819597:::ROBJ12:head : candidate had a stat error" + err_strings[7]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:c0c86b1d:::ROBJ14:head : candidate had a missing info key" + err_strings[8]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 0 soid 3:c0c86b1d:::ROBJ14:head : candidate had a corrupt info" + err_strings[9]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 soid 3:c0c86b1d:::ROBJ14:head : failed to pick suitable object info" + err_strings[10]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:ce3f1d6a:::ROBJ1:head : candidate size 9 info size 7 mismatch" + err_strings[11]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:ce3f1d6a:::ROBJ1:head : size 9 != size 7 from auth oi 3:ce3f1d6a:::ROBJ1:head[(][0-9]*'[0-9]* osd.1.0:[0-9]* dirty|omap|data_digest|omap_digest s 7 uv 3 dd 2ddbf8f5 od f5fba2c6 alloc_hint [[]0 0 0[]][)], size 9 != size 7 from shard 0" + err_strings[12]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 0 soid 3:d60617f9:::ROBJ13:head : candidate had a stat error" + err_strings[13]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 3:f2a5b2a4:::ROBJ3:head : missing" + err_strings[14]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:ffdb2004:::ROBJ9:head : candidate size 1 info size 7 mismatch" + err_strings[15]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:ffdb2004:::ROBJ9:head : object info inconsistent " + err_strings[16]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 3:c0c86b1d:::ROBJ14:head : no '_' attr" + err_strings[17]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 3:5c7b2c47:::ROBJ16:head : can't decode 'snapset' attr buffer::malformed_input: .* no longer understand old encoding version 3 < 97" + err_strings[18]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 scrub : stat mismatch, got 19/19 objects, 0/0 clones, 18/19 dirty, 18/19 omap, 0/0 pinned, 0/0 hit_set_archive, 0/0 whiteouts, 1049713/1049720 bytes, 0/0 manifest objects, 0/0 hit_set_archive bytes." + err_strings[19]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 scrub 1 missing, 8 inconsistent objects" + err_strings[20]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 scrub 18 errors" + err_strings[21]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 soid 3:123a5f55:::ROBJ19:head : size 1049600 > 1048576 is too large" + + for err_string in "${err_strings[@]}" + do + if ! grep -q "$err_string" $dir/osd.${primary}.log + then + echo "Missing log message '$err_string'" + ERRORS=$(expr $ERRORS + 1) + fi + done + + rados list-inconsistent-pg $poolname > $dir/json || return 1 + # Check pg count + test $(jq '. | length' $dir/json) = "1" || return 1 + # Check pgid + test $(jq -r '.[0]' $dir/json) = $pg || return 1 + + rados list-inconsistent-obj $pg > $dir/json || return 1 + # Get epoch for repair-get requests + epoch=$(jq .epoch $dir/json) + + jq "$jqfilter" << EOF | jq '.inconsistents' | python -c "$sortkeys" > $dir/checkcsjson +{ + "inconsistents": [ + { + "shards": [ + { + "size": 7, + "errors": [], + "osd": 0, + "primary": false + }, + { + "object_info": { + "oid": { + "oid": "ROBJ1", + "key": "", + "snapid": -2, + "hash": 1454963827, + "max": 0, + "pool": 3, + "namespace": "" + }, + "version": "51'58", + "prior_version": "21'3", + "last_reqid": "osd.1.0:57", + "user_version": 3, + "size": 7, + "mtime": "", + "local_mtime": "", + "lost": 0, + "flags": [ + "dirty", + "omap", + "data_digest", + "omap_digest" + ], + "truncate_seq": 0, + "truncate_size": 0, + "data_digest": "0x2ddbf8f5", + "omap_digest": "0xf5fba2c6", + "expected_object_size": 0, + "expected_write_size": 0, + "alloc_hint_flags": 0, + "manifest": { + "type": 0 + }, + "watchers": {} + }, + "size": 9, + "errors": [ + "size_mismatch_info", + "obj_size_info_mismatch" + ], + "osd": 1, + "primary": true + } + ], + "selected_object_info": { + "oid": { + "oid": "ROBJ1", + "key": "", + "snapid": -2, + "hash": 1454963827, + "max": 0, + "pool": 3, + "namespace": "" + }, + "version": "51'58", + "prior_version": "21'3", + "last_reqid": "osd.1.0:57", + "user_version": 3, + "size": 7, + "mtime": "2018-04-05 14:33:19.804040", + "local_mtime": "2018-04-05 14:33:19.804839", + "lost": 0, + "flags": [ + "dirty", + "omap", + "data_digest", + "omap_digest" + ], + "truncate_seq": 0, + "truncate_size": 0, + "data_digest": "0x2ddbf8f5", + "omap_digest": "0xf5fba2c6", + "expected_object_size": 0, + "expected_write_size": 0, + "alloc_hint_flags": 0, + "manifest": { + "type": 0 + }, + "watchers": {} + }, + "union_shard_errors": [ + "size_mismatch_info", + "obj_size_info_mismatch" + ], + "errors": [ + "size_mismatch" + ], + "object": { + "version": 3, + "snap": "head", + "locator": "", + "nspace": "", + "name": "ROBJ1" + } + }, + { + "shards": [ + { + "errors": [ + "stat_error" + ], + "osd": 0, + "primary": false + }, + { + "size": 7, + "errors": [], + "osd": 1, + "primary": true + } + ], + "selected_object_info": { + "oid": { + "oid": "ROBJ12", + "key": "", + "snapid": -2, + "hash": 3920199997, + "max": 0, + "pool": 3, + "namespace": "" + }, + "version": "51'56", + "prior_version": "43'36", + "last_reqid": "osd.1.0:55", + "user_version": 36, + "size": 7, + "mtime": "", + "local_mtime": "", + "lost": 0, + "flags": [ + "dirty", + "omap", + "data_digest", + "omap_digest" + ], + "truncate_seq": 0, + "truncate_size": 0, + "data_digest": "0x2ddbf8f5", + "omap_digest": "0x067f306a", + "expected_object_size": 0, + "expected_write_size": 0, + "alloc_hint_flags": 0, + "manifest": { + "type": 0 + }, + "watchers": {} + }, + "union_shard_errors": [ + "stat_error" + ], + "errors": [], + "object": { + "version": 36, + "snap": "head", + "locator": "", + "nspace": "", + "name": "ROBJ12" + } + }, + { + "shards": [ + { + "errors": [ + "stat_error" + ], + "osd": 0, + "primary": false + }, + { + "size": 7, + "errors": [], + "osd": 1, + "primary": true + } + ], + "selected_object_info": { + "oid": { + "oid": "ROBJ13", + "key": "", + "snapid": -2, + "hash": 2682806379, + "max": 0, + "pool": 3, + "namespace": "" + }, + "version": "51'59", + "prior_version": "45'39", + "last_reqid": "osd.1.0:58", + "user_version": 39, + "size": 7, + "mtime": "", + "local_mtime": "", + "lost": 0, + "flags": [ + "dirty", + "omap", + "data_digest", + "omap_digest" + ], + "truncate_seq": 0, + "truncate_size": 0, + "data_digest": "0x2ddbf8f5", + "omap_digest": "0x6441854d", + "expected_object_size": 0, + "expected_write_size": 0, + "alloc_hint_flags": 0, + "manifest": { + "type": 0 + }, + "watchers": {} + }, + "union_shard_errors": [ + "stat_error" + ], + "errors": [], + "object": { + "version": 39, + "snap": "head", + "locator": "", + "nspace": "", + "name": "ROBJ13" + } + }, + { + "shards": [ + { + "object_info": "bad-val", + "size": 7, + "errors": [ + "info_corrupted" + ], + "osd": 0, + "primary": false + }, + { + "size": 7, + "errors": [ + "info_missing" + ], + "osd": 1, + "primary": true + } + ], + "union_shard_errors": [ + "info_missing", + "info_corrupted" + ], + "errors": [], + "object": { + "version": 0, + "snap": "head", + "locator": "", + "nspace": "", + "name": "ROBJ14" + } + }, + { + "shards": [ + { + "object_info": { + "oid": { + "oid": "ROBJ15", + "key": "", + "snapid": -2, + "hash": 504996876, + "max": 0, + "pool": 3, + "namespace": "" + }, + "version": "51'49", + "prior_version": "49'45", + "last_reqid": "osd.1.0:48", + "user_version": 45, + "size": 7, + "mtime": "2018-04-05 14:33:29.498969", + "local_mtime": "2018-04-05 14:33:29.499890", + "lost": 0, + "flags": [ + "dirty", + "omap", + "data_digest", + "omap_digest" + ], + "truncate_seq": 0, + "truncate_size": 0, + "data_digest": "0x2ddbf8f5", + "omap_digest": "0x2d2a4d6e", + "expected_object_size": 0, + "expected_write_size": 0, + "alloc_hint_flags": 0, + "manifest": { + "type": 0 + }, + "watchers": {} + }, + "size": 7, + "errors": [], + "osd": 0, + "primary": false + }, + { + "size": 7, + "errors": [ + "info_missing" + ], + "osd": 1, + "primary": true + } + ], + "selected_object_info": { + "oid": { + "oid": "ROBJ15", + "key": "", + "snapid": -2, + "hash": 504996876, + "max": 0, + "pool": 3, + "namespace": "" + }, + "version": "51'49", + "prior_version": "49'45", + "last_reqid": "osd.1.0:48", + "user_version": 45, + "size": 7, + "mtime": "", + "local_mtime": "", + "lost": 0, + "flags": [ + "dirty", + "omap", + "data_digest", + "omap_digest" + ], + "truncate_seq": 0, + "truncate_size": 0, + "data_digest": "0x2ddbf8f5", + "omap_digest": "0x2d2a4d6e", + "expected_object_size": 0, + "expected_write_size": 0, + "alloc_hint_flags": 0, + "manifest": { + "type": 0 + }, + "watchers": {} + }, + "union_shard_errors": [ + "info_missing" + ], + "errors": [], + "object": { + "version": 45, + "snap": "head", + "locator": "", + "nspace": "", + "name": "ROBJ15" + } + }, + { + "errors": [], + "object": { + "locator": "", + "name": "ROBJ16", + "nspace": "", + "snap": "head", + "version": 0 + }, + "shards": [ + { + "errors": [ + "snapset_missing" + ], + "osd": 0, + "primary": false, + "size": 7 + }, + { + "errors": [ + "snapset_corrupted" + ], + "osd": 1, + "primary": true, + "snapset": "bad-val", + "size": 7 + } + ], + "union_shard_errors": [ + "snapset_missing", + "snapset_corrupted" + ] + }, + { + "errors": [ + "object_info_inconsistency" + ], + "object": { + "locator": "", + "name": "ROBJ18", + "nspace": "", + "snap": "head" + }, + "selected_object_info": { + "alloc_hint_flags": 255, + "data_digest": "0x2ddbf8f5", + "expected_object_size": 0, + "expected_write_size": 0, + "flags": [ + "dirty", + "omap", + "data_digest", + "omap_digest" + ], + "lost": 0, + "manifest": { + "type": 0 + }, + "oid": { + "hash": 1629828556, + "key": "", + "max": 0, + "namespace": "", + "oid": "ROBJ18", + "pool": 3, + "snapid": -2 + }, + "omap_digest": "0xddc3680f", + "size": 7, + "truncate_seq": 0, + "truncate_size": 0, + "user_version": 54, + "watchers": {} + }, + "shards": [ + { + "errors": [], + "object_info": { + "alloc_hint_flags": 0, + "data_digest": "0x2ddbf8f5", + "expected_object_size": 0, + "expected_write_size": 0, + "flags": [ + "dirty", + "omap", + "data_digest", + "omap_digest" + ], + "lost": 0, + "manifest": { + "type": 0 + }, + "oid": { + "hash": 1629828556, + "key": "", + "max": 0, + "namespace": "", + "oid": "ROBJ18", + "pool": 3, + "snapid": -2 + }, + "omap_digest": "0xddc3680f", + "size": 7, + "truncate_seq": 0, + "truncate_size": 0, + "user_version": 54, + "watchers": {} + }, + "osd": 0, + "primary": false, + "size": 7 + }, + { + "errors": [], + "object_info": { + "alloc_hint_flags": 255, + "data_digest": "0x2ddbf8f5", + "expected_object_size": 0, + "expected_write_size": 0, + "flags": [ + "dirty", + "omap", + "data_digest", + "omap_digest" + ], + "lost": 0, + "manifest": { + "type": 0 + }, + "oid": { + "hash": 1629828556, + "key": "", + "max": 0, + "namespace": "", + "oid": "ROBJ18", + "pool": 3, + "snapid": -2 + }, + "omap_digest": "0xddc3680f", + "size": 7, + "truncate_seq": 0, + "truncate_size": 0, + "user_version": 54, + "watchers": {} + }, + "osd": 1, + "primary": true, + "size": 7 + } + ], + "union_shard_errors": [] + }, + { + "object": { + "name": "ROBJ19", + "nspace": "", + "locator": "", + "snap": "head", + "version": 58 + }, + "errors": [ + "size_too_large" + ], + "union_shard_errors": [], + "selected_object_info": { + "oid": { + "oid": "ROBJ19", + "key": "", + "snapid": -2, + "hash": 2868534344, + "max": 0, + "pool": 3, + "namespace": "" + }, + "version": "63'59", + "prior_version": "63'58", + "last_reqid": "osd.1.0:58", + "user_version": 58, + "size": 1049600, + "mtime": "2019-08-09T23:33:58.340709+0000", + "local_mtime": "2019-08-09T23:33:58.345676+0000", + "lost": 0, + "flags": [ + "dirty", + "omap", + "data_digest", + "omap_digest" + ], + "truncate_seq": 0, + "truncate_size": 0, + "data_digest": "0x3dde0ef3", + "omap_digest": "0xbffddd28", + "expected_object_size": 0, + "expected_write_size": 0, + "alloc_hint_flags": 0, + "manifest": { + "type": 0 + }, + "watchers": {} + }, + "shards": [ + { + "osd": 0, + "primary": false, + "errors": [], + "size": 1049600 + }, + { + "osd": 1, + "primary": true, + "errors": [], + "size": 1049600 + } + ] + }, + { + "shards": [ + { + "size": 7, + "errors": [], + "osd": 0, + "primary": false + }, + { + "errors": [ + "missing" + ], + "osd": 1, + "primary": true + } + ], + "selected_object_info": { + "oid": { + "oid": "ROBJ3", + "key": "", + "snapid": -2, + "hash": 625845583, + "max": 0, + "pool": 3, + "namespace": "" + }, + "version": "51'61", + "prior_version": "25'9", + "last_reqid": "osd.1.0:60", + "user_version": 9, + "size": 7, + "mtime": "", + "local_mtime": "", + "lost": 0, + "flags": [ + "dirty", + "omap", + "data_digest", + "omap_digest" + ], + "truncate_seq": 0, + "truncate_size": 0, + "data_digest": "0x2ddbf8f5", + "omap_digest": "0x00b35dfd", + "expected_object_size": 0, + "expected_write_size": 0, + "alloc_hint_flags": 0, + "manifest": { + "type": 0 + }, + "watchers": {} + }, + "union_shard_errors": [ + "missing" + ], + "errors": [], + "object": { + "version": 9, + "snap": "head", + "locator": "", + "nspace": "", + "name": "ROBJ3" + } + }, + { + "shards": [ + { + "attrs": [ + { + "Base64": false, + "value": "bad-val", + "name": "key1-ROBJ8" + }, + { + "Base64": false, + "value": "val2-ROBJ8", + "name": "key2-ROBJ8" + } + ], + "size": 7, + "errors": [], + "osd": 0, + "primary": false + }, + { + "attrs": [ + { + "Base64": false, + "value": "val1-ROBJ8", + "name": "key1-ROBJ8" + }, + { + "Base64": false, + "value": "val3-ROBJ8", + "name": "key3-ROBJ8" + } + ], + "size": 7, + "errors": [], + "osd": 1, + "primary": true + } + ], + "selected_object_info": { + "oid": { + "oid": "ROBJ8", + "key": "", + "snapid": -2, + "hash": 2359695969, + "max": 0, + "pool": 3, + "namespace": "" + }, + "version": "79'66", + "prior_version": "79'65", + "last_reqid": "client.4554.0:1", + "user_version": 79, + "size": 7, + "mtime": "", + "local_mtime": "", + "lost": 0, + "flags": [ + "dirty", + "omap", + "data_digest", + "omap_digest" + ], + "truncate_seq": 0, + "truncate_size": 0, + "data_digest": "0x2ddbf8f5", + "omap_digest": "0xd6be81dc", + "expected_object_size": 0, + "expected_write_size": 0, + "alloc_hint_flags": 0, + "manifest": { + "type": 0 + }, + "watchers": {} + }, + "union_shard_errors": [], + "errors": [ + "attr_value_mismatch", + "attr_name_mismatch" + ], + "object": { + "version": 66, + "snap": "head", + "locator": "", + "nspace": "", + "name": "ROBJ8" + } + }, + { + "shards": [ + { + "object_info": { + "oid": { + "oid": "ROBJ9", + "key": "", + "snapid": -2, + "hash": 537189375, + "max": 0, + "pool": 3, + "namespace": "" + }, + "version": "95'67", + "prior_version": "51'64", + "last_reqid": "client.4649.0:1", + "user_version": 80, + "size": 1, + "mtime": "", + "local_mtime": "", + "lost": 0, + "flags": [ + "dirty", + "omap", + "data_digest", + "omap_digest" + ], + "truncate_seq": 0, + "truncate_size": 0, + "data_digest": "0x2b63260d", + "omap_digest": "0x2eecc539", + "expected_object_size": 0, + "expected_write_size": 0, + "alloc_hint_flags": 0, + "manifest": { + "type": 0 + }, + "watchers": {} + }, + "size": 1, + "errors": [], + "osd": 0, + "primary": false + }, + { + "object_info": { + "oid": { + "oid": "ROBJ9", + "key": "", + "snapid": -2, + "hash": 537189375, + "max": 0, + "pool": 3, + "namespace": "" + }, + "version": "51'64", + "prior_version": "37'27", + "last_reqid": "osd.1.0:63", + "user_version": 27, + "size": 7, + "mtime": "2018-04-05 14:33:25.352485", + "local_mtime": "2018-04-05 14:33:25.353746", + "lost": 0, + "flags": [ + "dirty", + "omap", + "data_digest", + "omap_digest" + ], + "truncate_seq": 0, + "truncate_size": 0, + "data_digest": "0x2ddbf8f5", + "omap_digest": "0x2eecc539", + "expected_object_size": 0, + "expected_write_size": 0, + "alloc_hint_flags": 0, + "manifest": { + "type": 0 + }, + "watchers": {} + }, + "size": 1, + "errors": [ + "obj_size_info_mismatch" + ], + "osd": 1, + "primary": true + } + ], + "selected_object_info": { + "oid": { + "oid": "ROBJ9", + "key": "", + "snapid": -2, + "hash": 537189375, + "max": 0, + "pool": 3, + "namespace": "" + }, + "version": "95'67", + "prior_version": "51'64", + "last_reqid": "client.4649.0:1", + "user_version": 80, + "size": 1, + "mtime": "", + "local_mtime": "", + "lost": 0, + "flags": [ + "dirty", + "omap", + "data_digest", + "omap_digest" + ], + "truncate_seq": 0, + "truncate_size": 0, + "data_digest": "0x2b63260d", + "omap_digest": "0x2eecc539", + "expected_object_size": 0, + "expected_write_size": 0, + "alloc_hint_flags": 0, + "manifest": { + "type": 0 + }, + "watchers": {} + }, + "union_shard_errors": [ + "obj_size_info_mismatch" + ], + "errors": [ + "object_info_inconsistency" + ], + "object": { + "version": 67, + "snap": "head", + "locator": "", + "nspace": "", + "name": "ROBJ9" + } + } + ], + "epoch": 0 +} +EOF + + jq "$jqfilter" $dir/json | jq '.inconsistents' | python -c "$sortkeys" > $dir/csjson + multidiff $dir/checkcsjson $dir/csjson || test $getjson = "yes" || return 1 + if test $getjson = "yes" + then + jq '.' $dir/json > save1.json + fi + + if test "$LOCALRUN" = "yes" && which jsonschema > /dev/null; + then + jsonschema -i $dir/json $CEPH_ROOT/doc/rados/command/list-inconsistent-obj.json || return 1 + fi + + objname=ROBJ9 + # Change data and size again because digest was recomputed + echo -n ZZZ > $dir/change + rados --pool $poolname put $objname $dir/change + # Set one to an even older value + objectstore_tool $dir 0 $objname set-attr _ $dir/robj9-oi + rm $dir/oi $dir/change + + objname=ROBJ10 + objectstore_tool $dir 1 $objname get-attr _ > $dir/oi + rados --pool $poolname setomapval $objname key2-$objname val2-$objname + objectstore_tool $dir 0 $objname set-attr _ $dir/oi + objectstore_tool $dir 1 $objname set-attr _ $dir/oi + rm $dir/oi + + inject_eio rep data $poolname ROBJ11 $dir 0 || return 1 # shard 0 of [1, 0], osd.1 + inject_eio rep mdata $poolname ROBJ12 $dir 1 || return 1 # shard 1 of [1, 0], osd.0 + inject_eio rep mdata $poolname ROBJ13 $dir 1 || return 1 # shard 1 of [1, 0], osd.0 + inject_eio rep data $poolname ROBJ13 $dir 0 || return 1 # shard 0 of [1, 0], osd.1 + + # ROBJ19 won't error this time + ceph tell osd.\* injectargs -- --osd-max-object-size=134217728 + + pg_deep_scrub $pg + + err_strings=() + err_strings[0]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:30259878:::ROBJ15:head : candidate had a missing info key" + err_strings[1]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 0 soid 3:33aca486:::ROBJ18:head : data_digest 0xbd89c912 != data_digest 0x2ddbf8f5 from auth oi 3:33aca486:::ROBJ18:head[(][0-9]*'[0-9]* osd.1.0:[0-9]* dirty|omap|data_digest|omap_digest s 7 uv 54 dd 2ddbf8f5 od ddc3680f alloc_hint [[]0 0 255[]][)], object info inconsistent " + err_strings[2]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:33aca486:::ROBJ18:head : data_digest 0xbd89c912 != data_digest 0x2ddbf8f5 from auth oi 3:33aca486:::ROBJ18:head[(][0-9]*'[0-9]* osd.1.0:[0-9]* dirty|omap|data_digest|omap_digest s 7 uv 54 dd 2ddbf8f5 od ddc3680f alloc_hint [[]0 0 255[]][)]" + err_strings[3]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 soid 3:33aca486:::ROBJ18:head : failed to pick suitable auth object" + err_strings[4]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:5c7b2c47:::ROBJ16:head : candidate had a corrupt snapset" + err_strings[5]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 0 soid 3:5c7b2c47:::ROBJ16:head : candidate had a missing snapset key" + err_strings[6]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 soid 3:5c7b2c47:::ROBJ16:head : failed to pick suitable object info" + err_strings[7]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 soid 3:86586531:::ROBJ8:head : attr value mismatch '_key1-ROBJ8', attr name mismatch '_key3-ROBJ8', attr name mismatch '_key2-ROBJ8'" + err_strings[8]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:87abbf36:::ROBJ11:head : candidate had a read error" + err_strings[9]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 0 soid 3:8aa5320e:::ROBJ17:head : data_digest 0x5af0c3ef != data_digest 0x2ddbf8f5 from auth oi 3:8aa5320e:::ROBJ17:head[(][0-9]*'[0-9]* osd.1.0:[0-9]* dirty|omap|data_digest|omap_digest s 7 uv 51 dd 2ddbf8f5 od e9572720 alloc_hint [[]0 0 0[]][)]" + err_strings[10]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:8aa5320e:::ROBJ17:head : data_digest 0x5af0c3ef != data_digest 0x2ddbf8f5 from auth oi 3:8aa5320e:::ROBJ17:head[(][0-9]*'[0-9]* osd.1.0:[0-9]* dirty|omap|data_digest|omap_digest s 7 uv 51 dd 2ddbf8f5 od e9572720 alloc_hint [[]0 0 0[]][)]" + err_strings[11]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 soid 3:8aa5320e:::ROBJ17:head : failed to pick suitable auth object" + err_strings[12]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 soid 3:8b55fa4b:::ROBJ7:head : omap_digest 0xefced57a != omap_digest 0x6a73cc07 from shard 1" + err_strings[13]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:8b55fa4b:::ROBJ7:head : omap_digest 0x6a73cc07 != omap_digest 0xefced57a from auth oi 3:8b55fa4b:::ROBJ7:head[(][0-9]*'[0-9]* osd.1.0:[0-9]* dirty|omap|data_digest|omap_digest s 7 uv 21 dd 2ddbf8f5 od efced57a alloc_hint [[]0 0 0[]][)]" + err_strings[14]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 0 soid 3:a53c12e8:::ROBJ6:head : omap_digest 0x689ee887 != omap_digest 0x179c919f from shard 1, omap_digest 0x689ee887 != omap_digest 0x179c919f from auth oi 3:a53c12e8:::ROBJ6:head[(][0-9]*'[0-9]* osd.1.0:[0-9]* dirty|omap|data_digest|omap_digest s 7 uv 18 dd 2ddbf8f5 od 179c919f alloc_hint [[]0 0 0[]][)]" + err_strings[15]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 0 soid 3:b1f19cbd:::ROBJ10:head : omap_digest 0xa8dd5adc != omap_digest 0xc2025a24 from auth oi 3:b1f19cbd:::ROBJ10:head[(][0-9]*'[0-9]* osd.1.0:[0-9]* dirty|omap|data_digest|omap_digest s 7 uv 30 dd 2ddbf8f5 od c2025a24 alloc_hint [[]0 0 0[]][)]" + err_strings[16]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:b1f19cbd:::ROBJ10:head : omap_digest 0xa8dd5adc != omap_digest 0xc2025a24 from auth oi 3:b1f19cbd:::ROBJ10:head[(][0-9]*'[0-9]* osd.1.0:[0-9]* dirty|omap|data_digest|omap_digest s 7 uv 30 dd 2ddbf8f5 od c2025a24 alloc_hint [[]0 0 0[]][)]" + err_strings[17]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 soid 3:b1f19cbd:::ROBJ10:head : failed to pick suitable auth object" + err_strings[18]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 0 soid 3:bc819597:::ROBJ12:head : candidate had a stat error" + err_strings[19]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:c0c86b1d:::ROBJ14:head : candidate had a missing info key" + err_strings[20]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 0 soid 3:c0c86b1d:::ROBJ14:head : candidate had a corrupt info" + err_strings[21]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 soid 3:c0c86b1d:::ROBJ14:head : failed to pick suitable object info" + err_strings[22]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:ce3f1d6a:::ROBJ1:head : candidate size 9 info size 7 mismatch" + err_strings[23]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:ce3f1d6a:::ROBJ1:head : data_digest 0x2d4a11c2 != data_digest 0x2ddbf8f5 from shard 0, data_digest 0x2d4a11c2 != data_digest 0x2ddbf8f5 from auth oi 3:ce3f1d6a:::ROBJ1:head[(][0-9]*'[0-9]* osd.1.0:[0-9]* dirty|omap|data_digest|omap_digest s 7 uv 3 dd 2ddbf8f5 od f5fba2c6 alloc_hint [[]0 0 0[]][)], size 9 != size 7 from auth oi 3:ce3f1d6a:::ROBJ1:head[(][0-9]*'[0-9]* osd.1.0:[0-9]* dirty|omap|data_digest|omap_digest s 7 uv 3 dd 2ddbf8f5 od f5fba2c6 alloc_hint [[]0 0 0[]][)], size 9 != size 7 from shard 0" + err_strings[24]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:d60617f9:::ROBJ13:head : candidate had a read error" + err_strings[25]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 0 soid 3:d60617f9:::ROBJ13:head : candidate had a stat error" + err_strings[26]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 soid 3:d60617f9:::ROBJ13:head : failed to pick suitable object info" + err_strings[27]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 0 soid 3:e97ce31e:::ROBJ2:head : data_digest 0x578a4830 != data_digest 0x2ddbf8f5 from shard 1, data_digest 0x578a4830 != data_digest 0x2ddbf8f5 from auth oi 3:e97ce31e:::ROBJ2:head[(][0-9]*'[0-9]* osd.1.0:[0-9]* dirty|omap|data_digest|omap_digest s 7 uv 6 dd 2ddbf8f5 od f8e11918 alloc_hint [[]0 0 0[]][)]" + err_strings[28]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 3:f2a5b2a4:::ROBJ3:head : missing" + err_strings[29]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 0 soid 3:f4981d31:::ROBJ4:head : omap_digest 0xd7178dfe != omap_digest 0xe2d46ea4 from shard 1, omap_digest 0xd7178dfe != omap_digest 0xe2d46ea4 from auth oi 3:f4981d31:::ROBJ4:head[(][0-9]*'[0-9]* osd.1.0:[0-9]* dirty|omap|data_digest|omap_digest s 7 uv 12 dd 2ddbf8f5 od e2d46ea4 alloc_hint [[]0 0 0[]][)]" + err_strings[30]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 soid 3:f4bfd4d1:::ROBJ5:head : omap_digest 0x1a862a41 != omap_digest 0x6cac8f6 from shard 1" + err_strings[31]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 1 soid 3:f4bfd4d1:::ROBJ5:head : omap_digest 0x6cac8f6 != omap_digest 0x1a862a41 from auth oi 3:f4bfd4d1:::ROBJ5:head[(][0-9]*'[0-9]* osd.1.0:[0-9]* dirty|omap|data_digest|omap_digest s 7 uv 15 dd 2ddbf8f5 od 1a862a41 alloc_hint [[]0 0 0[]][)]" + err_strings[32]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 0 soid 3:ffdb2004:::ROBJ9:head : candidate size 3 info size 7 mismatch" + err_strings[33]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard 0 soid 3:ffdb2004:::ROBJ9:head : object info inconsistent " + err_strings[34]="log_channel[(]cluster[)] log [[]ERR[]] : deep-scrub [0-9]*[.]0 3:c0c86b1d:::ROBJ14:head : no '_' attr" + err_strings[35]="log_channel[(]cluster[)] log [[]ERR[]] : deep-scrub [0-9]*[.]0 3:5c7b2c47:::ROBJ16:head : can't decode 'snapset' attr buffer::malformed_input: .* no longer understand old encoding version 3 < 97" + err_strings[36]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 deep-scrub : stat mismatch, got 19/19 objects, 0/0 clones, 18/19 dirty, 18/19 omap, 0/0 pinned, 0/0 hit_set_archive, 0/0 whiteouts, 1049715/1049716 bytes, 0/0 manifest objects, 0/0 hit_set_archive bytes." + err_strings[37]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 deep-scrub 1 missing, 11 inconsistent objects" + err_strings[38]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 deep-scrub 35 errors" + + for err_string in "${err_strings[@]}" + do + if ! grep -q "$err_string" $dir/osd.${primary}.log + then + echo "Missing log message '$err_string'" + ERRORS=$(expr $ERRORS + 1) + fi + done + + rados list-inconsistent-pg $poolname > $dir/json || return 1 + # Check pg count + test $(jq '. | length' $dir/json) = "1" || return 1 + # Check pgid + test $(jq -r '.[0]' $dir/json) = $pg || return 1 + + rados list-inconsistent-obj $pg > $dir/json || return 1 + # Get epoch for repair-get requests + epoch=$(jq .epoch $dir/json) + + jq "$jqfilter" << EOF | jq '.inconsistents' | python -c "$sortkeys" > $dir/checkcsjson +{ + "inconsistents": [ + { + "shards": [ + { + "data_digest": "0x2ddbf8f5", + "omap_digest": "0xf5fba2c6", + "size": 7, + "errors": [], + "osd": 0, + "primary": false + }, + { + "object_info": { + "oid": { + "oid": "ROBJ1", + "key": "", + "snapid": -2, + "hash": 1454963827, + "max": 0, + "pool": 3, + "namespace": "" + }, + "version": "51'58", + "prior_version": "21'3", + "last_reqid": "osd.1.0:57", + "user_version": 3, + "size": 7, + "mtime": "2018-04-05 14:33:19.804040", + "local_mtime": "2018-04-05 14:33:19.804839", + "lost": 0, + "flags": [ + "dirty", + "omap", + "data_digest", + "omap_digest" + ], + "truncate_seq": 0, + "truncate_size": 0, + "data_digest": "0x2ddbf8f5", + "omap_digest": "0xf5fba2c6", + "expected_object_size": 0, + "expected_write_size": 0, + "alloc_hint_flags": 0, + "manifest": { + "type": 0 + }, + "watchers": {} + }, + "data_digest": "0x2d4a11c2", + "omap_digest": "0xf5fba2c6", + "size": 9, + "errors": [ + "data_digest_mismatch_info", + "size_mismatch_info", + "obj_size_info_mismatch" + ], + "osd": 1, + "primary": true + } + ], + "selected_object_info": { + "oid": { + "oid": "ROBJ1", + "key": "", + "snapid": -2, + "hash": 1454963827, + "max": 0, + "pool": 3, + "namespace": "" + }, + "version": "51'58", + "prior_version": "21'3", + "last_reqid": "osd.1.0:57", + "user_version": 3, + "size": 7, + "mtime": "2018-04-05 14:33:19.804040", + "local_mtime": "2018-04-05 14:33:19.804839", + "lost": 0, + "flags": [ + "dirty", + "omap", + "data_digest", + "omap_digest" + ], + "truncate_seq": 0, + "truncate_size": 0, + "data_digest": "0x2ddbf8f5", + "omap_digest": "0xf5fba2c6", + "expected_object_size": 0, + "expected_write_size": 0, + "alloc_hint_flags": 0, + "manifest": { + "type": 0 + }, + "watchers": {} + }, + "union_shard_errors": [ + "data_digest_mismatch_info", + "size_mismatch_info", + "obj_size_info_mismatch" + ], + "errors": [ + "data_digest_mismatch", + "size_mismatch" + ], + "object": { + "version": 3, + "snap": "head", + "locator": "", + "nspace": "", + "name": "ROBJ1" + } + }, + { + "shards": [ + { + "data_digest": "0x2ddbf8f5", + "omap_digest": "0xa8dd5adc", + "size": 7, + "errors": [ + "omap_digest_mismatch_info" + ], + "osd": 0, + "primary": false + }, + { + "data_digest": "0x2ddbf8f5", + "omap_digest": "0xa8dd5adc", + "size": 7, + "errors": [ + "omap_digest_mismatch_info" + ], + "osd": 1, + "primary": true + } + ], + "selected_object_info": { + "alloc_hint_flags": 0, + "data_digest": "0x2ddbf8f5", + "expected_object_size": 0, + "expected_write_size": 0, + "flags": [ + "dirty", + "omap", + "data_digest", + "omap_digest" + ], + "lost": 0, + "manifest": { + "type": 0 + }, + "oid": { + "hash": 3174666125, + "key": "", + "max": 0, + "namespace": "", + "oid": "ROBJ10", + "pool": 3, + "snapid": -2 + }, + "omap_digest": "0xc2025a24", + "size": 7, + "truncate_seq": 0, + "truncate_size": 0, + "user_version": 30, + "watchers": {} + }, + "union_shard_errors": [ + "omap_digest_mismatch_info" + ], + "errors": [], + "object": { + "version": 30, + "snap": "head", + "locator": "", + "nspace": "", + "name": "ROBJ10" + } + }, + { + "shards": [ + { + "data_digest": "0x2ddbf8f5", + "omap_digest": "0xa03cef03", + "size": 7, + "errors": [], + "osd": 0, + "primary": false + }, + { + "size": 7, + "errors": [ + "read_error" + ], + "osd": 1, + "primary": true + } + ], + "selected_object_info": { + "oid": { + "oid": "ROBJ11", + "key": "", + "snapid": -2, + "hash": 1828574689, + "max": 0, + "pool": 3, + "namespace": "" + }, + "version": "51'52", + "prior_version": "41'33", + "last_reqid": "osd.1.0:51", + "user_version": 33, + "size": 7, + "mtime": "2018-04-05 14:33:26.761286", + "local_mtime": "2018-04-05 14:33:26.762368", + "lost": 0, + "flags": [ + "dirty", + "omap", + "data_digest", + "omap_digest" + ], + "truncate_seq": 0, + "truncate_size": 0, + "data_digest": "0x2ddbf8f5", + "omap_digest": "0xa03cef03", + "expected_object_size": 0, + "expected_write_size": 0, + "alloc_hint_flags": 0, + "manifest": { + "type": 0 + }, + "watchers": {} + }, + "union_shard_errors": [ + "read_error" + ], + "errors": [], + "object": { + "version": 33, + "snap": "head", + "locator": "", + "nspace": "", + "name": "ROBJ11" + } + }, + { + "shards": [ + { + "errors": [ + "stat_error" + ], + "osd": 0, + "primary": false + }, + { + "data_digest": "0x2ddbf8f5", + "omap_digest": "0x067f306a", + "size": 7, + "errors": [], + "osd": 1, + "primary": true + } + ], + "selected_object_info": { + "oid": { + "oid": "ROBJ12", + "key": "", + "snapid": -2, + "hash": 3920199997, + "max": 0, + "pool": 3, + "namespace": "" + }, + "version": "51'56", + "prior_version": "43'36", + "last_reqid": "osd.1.0:55", + "user_version": 36, + "size": 7, + "mtime": "2018-04-05 14:33:27.460958", + "local_mtime": "2018-04-05 14:33:27.462109", + "lost": 0, + "flags": [ + "dirty", + "omap", + "data_digest", + "omap_digest" + ], + "truncate_seq": 0, + "truncate_size": 0, + "data_digest": "0x2ddbf8f5", + "omap_digest": "0x067f306a", + "expected_object_size": 0, + "expected_write_size": 0, + "alloc_hint_flags": 0, + "manifest": { + "type": 0 + }, + "watchers": {} + }, + "union_shard_errors": [ + "stat_error" + ], + "errors": [], + "object": { + "version": 36, + "snap": "head", + "locator": "", + "nspace": "", + "name": "ROBJ12" + } + }, + { + "shards": [ + { + "errors": [ + "stat_error" + ], + "osd": 0, + "primary": false + }, + { + "size": 7, + "errors": [ + "read_error" + ], + "osd": 1, + "primary": true + } + ], + "union_shard_errors": [ + "stat_error", + "read_error" + ], + "errors": [], + "object": { + "version": 0, + "snap": "head", + "locator": "", + "nspace": "", + "name": "ROBJ13" + } + }, + { + "shards": [ + { + "object_info": "bad-val", + "data_digest": "0x2ddbf8f5", + "omap_digest": "0x4f14f849", + "size": 7, + "errors": [ + "info_corrupted" + ], + "osd": 0, + "primary": false + }, + { + "data_digest": "0x2ddbf8f5", + "omap_digest": "0x4f14f849", + "size": 7, + "errors": [ + "info_missing" + ], + "osd": 1, + "primary": true + } + ], + "union_shard_errors": [ + "info_missing", + "info_corrupted" + ], + "errors": [], + "object": { + "version": 0, + "snap": "head", + "locator": "", + "nspace": "", + "name": "ROBJ14" + } + }, + { + "shards": [ + { + "object_info": { + "oid": { + "oid": "ROBJ15", + "key": "", + "snapid": -2, + "hash": 504996876, + "max": 0, + "pool": 3, + "namespace": "" + }, + "version": "51'49", + "prior_version": "49'45", + "last_reqid": "osd.1.0:48", + "user_version": 45, + "size": 7, + "mtime": "2018-04-05 14:33:29.498969", + "local_mtime": "2018-04-05 14:33:29.499890", + "lost": 0, + "flags": [ + "dirty", + "omap", + "data_digest", + "omap_digest" + ], + "truncate_seq": 0, + "truncate_size": 0, + "data_digest": "0x2ddbf8f5", + "omap_digest": "0x2d2a4d6e", + "expected_object_size": 0, + "expected_write_size": 0, + "alloc_hint_flags": 0, + "manifest": { + "type": 0 + }, + "watchers": {} + }, + "data_digest": "0x2ddbf8f5", + "omap_digest": "0x2d2a4d6e", + "size": 7, + "errors": [], + "osd": 0, + "primary": false + }, + { + "data_digest": "0x2ddbf8f5", + "omap_digest": "0x2d2a4d6e", + "size": 7, + "errors": [ + "info_missing" + ], + "osd": 1, + "primary": true + } + ], + "selected_object_info": { + "oid": { + "oid": "ROBJ15", + "key": "", + "snapid": -2, + "hash": 504996876, + "max": 0, + "pool": 3, + "namespace": "" + }, + "version": "51'49", + "prior_version": "49'45", + "last_reqid": "osd.1.0:48", + "user_version": 45, + "size": 7, + "mtime": "2018-04-05 14:33:29.498969", + "local_mtime": "2018-04-05 14:33:29.499890", + "lost": 0, + "flags": [ + "dirty", + "omap", + "data_digest", + "omap_digest" + ], + "truncate_seq": 0, + "truncate_size": 0, + "data_digest": "0x2ddbf8f5", + "omap_digest": "0x2d2a4d6e", + "expected_object_size": 0, + "expected_write_size": 0, + "alloc_hint_flags": 0, + "manifest": { + "type": 0 + }, + "watchers": {} + }, + "union_shard_errors": [ + "info_missing" + ], + "errors": [], + "object": { + "version": 45, + "snap": "head", + "locator": "", + "nspace": "", + "name": "ROBJ15" + } + }, + { + "errors": [], + "object": { + "locator": "", + "name": "ROBJ16", + "nspace": "", + "snap": "head", + "version": 0 + }, + "shards": [ + { + "data_digest": "0x2ddbf8f5", + "errors": [ + "snapset_missing" + ], + "omap_digest": "0x8b699207", + "osd": 0, + "primary": false, + "size": 7 + }, + { + "snapset": "bad-val", + "data_digest": "0x2ddbf8f5", + "errors": [ + "snapset_corrupted" + ], + "omap_digest": "0x8b699207", + "osd": 1, + "primary": true, + "size": 7 + } + ], + "union_shard_errors": [ + "snapset_missing", + "snapset_corrupted" + ] + }, + { + "errors": [], + "object": { + "locator": "", + "name": "ROBJ17", + "nspace": "", + "snap": "head" + }, + "selected_object_info": { + "alloc_hint_flags": 0, + "data_digest": "0x2ddbf8f5", + "expected_object_size": 0, + "expected_write_size": 0, + "flags": [ + "dirty", + "omap", + "data_digest", + "omap_digest" + ], + "lost": 0, + "manifest": { + "type": 0 + }, + "oid": { + "hash": 1884071249, + "key": "", + "max": 0, + "namespace": "", + "oid": "ROBJ17", + "pool": 3, + "snapid": -2 + }, + "omap_digest": "0xe9572720", + "size": 7, + "truncate_seq": 0, + "truncate_size": 0, + "user_version": 51, + "watchers": {} + }, + "shards": [ + { + "data_digest": "0x5af0c3ef", + "errors": [ + "data_digest_mismatch_info" + ], + "omap_digest": "0xe9572720", + "osd": 0, + "primary": false, + "size": 7 + }, + { + "data_digest": "0x5af0c3ef", + "errors": [ + "data_digest_mismatch_info" + ], + "omap_digest": "0xe9572720", + "osd": 1, + "primary": true, + "size": 7 + } + ], + "union_shard_errors": [ + "data_digest_mismatch_info" + ] + }, + { + "errors": [ + "object_info_inconsistency" + ], + "object": { + "locator": "", + "name": "ROBJ18", + "nspace": "", + "snap": "head" + }, + "selected_object_info": { + "alloc_hint_flags": 255, + "data_digest": "0x2ddbf8f5", + "expected_object_size": 0, + "expected_write_size": 0, + "flags": [ + "dirty", + "omap", + "data_digest", + "omap_digest" + ], + "lost": 0, + "manifest": { + "type": 0 + }, + "oid": { + "hash": 1629828556, + "key": "", + "max": 0, + "namespace": "", + "oid": "ROBJ18", + "pool": 3, + "snapid": -2 + }, + "omap_digest": "0xddc3680f", + "size": 7, + "truncate_seq": 0, + "truncate_size": 0, + "user_version": 54, + "watchers": {} + }, + "shards": [ + { + "data_digest": "0xbd89c912", + "errors": [ + "data_digest_mismatch_info" + ], + "object_info": { + "alloc_hint_flags": 0, + "data_digest": "0x2ddbf8f5", + "expected_object_size": 0, + "expected_write_size": 0, + "flags": [ + "dirty", + "omap", + "data_digest", + "omap_digest" + ], + "lost": 0, + "manifest": { + "type": 0 + }, + "oid": { + "hash": 1629828556, + "key": "", + "max": 0, + "namespace": "", + "oid": "ROBJ18", + "pool": 3, + "snapid": -2 + }, + "omap_digest": "0xddc3680f", + "size": 7, + "truncate_seq": 0, + "truncate_size": 0, + "user_version": 54, + "watchers": {} + }, + "omap_digest": "0xddc3680f", + "osd": 0, + "primary": false, + "size": 7 + }, + { + "data_digest": "0xbd89c912", + "errors": [ + "data_digest_mismatch_info" + ], + "object_info": { + "alloc_hint_flags": 255, + "data_digest": "0x2ddbf8f5", + "expected_object_size": 0, + "expected_write_size": 0, + "flags": [ + "dirty", + "omap", + "data_digest", + "omap_digest" + ], + "lost": 0, + "manifest": { + "type": 0 + }, + "oid": { + "hash": 1629828556, + "key": "", + "max": 0, + "namespace": "", + "oid": "ROBJ18", + "pool": 3, + "snapid": -2 + }, + "omap_digest": "0xddc3680f", + "size": 7, + "truncate_seq": 0, + "truncate_size": 0, + "user_version": 54, + "watchers": {} + }, + "omap_digest": "0xddc3680f", + "osd": 1, + "primary": true, + "size": 7 + } + ], + "union_shard_errors": [ + "data_digest_mismatch_info" + ] + }, + { + "shards": [ + { + "data_digest": "0x578a4830", + "omap_digest": "0xf8e11918", + "size": 7, + "errors": [ + "data_digest_mismatch_info" + ], + "osd": 0, + "primary": false + }, + { + "data_digest": "0x2ddbf8f5", + "omap_digest": "0xf8e11918", + "size": 7, + "errors": [], + "osd": 1, + "primary": true + } + ], + "selected_object_info": { + "oid": { + "oid": "ROBJ2", + "key": "", + "snapid": -2, + "hash": 2026323607, + "max": 0, + "pool": 3, + "namespace": "" + }, + "version": "51'60", + "prior_version": "23'6", + "last_reqid": "osd.1.0:59", + "user_version": 6, + "size": 7, + "mtime": "2018-04-05 14:33:20.498756", + "local_mtime": "2018-04-05 14:33:20.499704", + "lost": 0, + "flags": [ + "dirty", + "omap", + "data_digest", + "omap_digest" + ], + "truncate_seq": 0, + "truncate_size": 0, + "data_digest": "0x2ddbf8f5", + "omap_digest": "0xf8e11918", + "expected_object_size": 0, + "expected_write_size": 0, + "alloc_hint_flags": 0, + "manifest": { + "type": 0 + }, + "watchers": {} + }, + "union_shard_errors": [ + "data_digest_mismatch_info" + ], + "errors": [ + "data_digest_mismatch" + ], + "object": { + "version": 6, + "snap": "head", + "locator": "", + "nspace": "", + "name": "ROBJ2" + } + }, + { + "shards": [ + { + "data_digest": "0x2ddbf8f5", + "omap_digest": "0x00b35dfd", + "size": 7, + "errors": [], + "osd": 0, + "primary": false + }, + { + "errors": [ + "missing" + ], + "osd": 1, + "primary": true + } + ], + "selected_object_info": { + "oid": { + "oid": "ROBJ3", + "key": "", + "snapid": -2, + "hash": 625845583, + "max": 0, + "pool": 3, + "namespace": "" + }, + "version": "51'61", + "prior_version": "25'9", + "last_reqid": "osd.1.0:60", + "user_version": 9, + "size": 7, + "mtime": "2018-04-05 14:33:21.189382", + "local_mtime": "2018-04-05 14:33:21.190446", + "lost": 0, + "flags": [ + "dirty", + "omap", + "data_digest", + "omap_digest" + ], + "truncate_seq": 0, + "truncate_size": 0, + "data_digest": "0x2ddbf8f5", + "omap_digest": "0x00b35dfd", + "expected_object_size": 0, + "expected_write_size": 0, + "alloc_hint_flags": 0, + "manifest": { + "type": 0 + }, + "watchers": {} + }, + "union_shard_errors": [ + "missing" + ], + "errors": [], + "object": { + "version": 9, + "snap": "head", + "locator": "", + "nspace": "", + "name": "ROBJ3" + } + }, + { + "shards": [ + { + "data_digest": "0x2ddbf8f5", + "omap_digest": "0xd7178dfe", + "size": 7, + "errors": [ + "omap_digest_mismatch_info" + ], + "osd": 0, + "primary": false + }, + { + "data_digest": "0x2ddbf8f5", + "omap_digest": "0xe2d46ea4", + "size": 7, + "errors": [], + "osd": 1, + "primary": true + } + ], + "selected_object_info": { + "oid": { + "oid": "ROBJ4", + "key": "", + "snapid": -2, + "hash": 2360875311, + "max": 0, + "pool": 3, + "namespace": "" + }, + "version": "51'62", + "prior_version": "27'12", + "last_reqid": "osd.1.0:61", + "user_version": 12, + "size": 7, + "mtime": "2018-04-05 14:33:21.862313", + "local_mtime": "2018-04-05 14:33:21.863261", + "lost": 0, + "flags": [ + "dirty", + "omap", + "data_digest", + "omap_digest" + ], + "truncate_seq": 0, + "truncate_size": 0, + "data_digest": "0x2ddbf8f5", + "omap_digest": "0xe2d46ea4", + "expected_object_size": 0, + "expected_write_size": 0, + "alloc_hint_flags": 0, + "manifest": { + "type": 0 + }, + "watchers": {} + }, + "union_shard_errors": [ + "omap_digest_mismatch_info" + ], + "errors": [ + "omap_digest_mismatch" + ], + "object": { + "version": 12, + "snap": "head", + "locator": "", + "nspace": "", + "name": "ROBJ4" + } + }, + { + "shards": [ + { + "data_digest": "0x2ddbf8f5", + "omap_digest": "0x1a862a41", + "size": 7, + "errors": [], + "osd": 0, + "primary": false + }, + { + "data_digest": "0x2ddbf8f5", + "omap_digest": "0x06cac8f6", + "size": 7, + "errors": [ + "omap_digest_mismatch_info" + ], + "osd": 1, + "primary": true + } + ], + "selected_object_info": { + "oid": { + "oid": "ROBJ5", + "key": "", + "snapid": -2, + "hash": 2334915887, + "max": 0, + "pool": 3, + "namespace": "" + }, + "version": "51'63", + "prior_version": "29'15", + "last_reqid": "osd.1.0:62", + "user_version": 15, + "size": 7, + "mtime": "2018-04-05 14:33:22.589300", + "local_mtime": "2018-04-05 14:33:22.590376", + "lost": 0, + "flags": [ + "dirty", + "omap", + "data_digest", + "omap_digest" + ], + "truncate_seq": 0, + "truncate_size": 0, + "data_digest": "0x2ddbf8f5", + "omap_digest": "0x1a862a41", + "expected_object_size": 0, + "expected_write_size": 0, + "alloc_hint_flags": 0, + "manifest": { + "type": 0 + }, + "watchers": {} + }, + "union_shard_errors": [ + "omap_digest_mismatch_info" + ], + "errors": [ + "omap_digest_mismatch" + ], + "object": { + "version": 15, + "snap": "head", + "locator": "", + "nspace": "", + "name": "ROBJ5" + } + }, + { + "shards": [ + { + "data_digest": "0x2ddbf8f5", + "omap_digest": "0x689ee887", + "size": 7, + "errors": [ + "omap_digest_mismatch_info" + ], + "osd": 0, + "primary": false + }, + { + "data_digest": "0x2ddbf8f5", + "omap_digest": "0x179c919f", + "size": 7, + "errors": [], + "osd": 1, + "primary": true + } + ], + "selected_object_info": { + "oid": { + "oid": "ROBJ6", + "key": "", + "snapid": -2, + "hash": 390610085, + "max": 0, + "pool": 3, + "namespace": "" + }, + "version": "51'54", + "prior_version": "31'18", + "last_reqid": "osd.1.0:53", + "user_version": 18, + "size": 7, + "mtime": "2018-04-05 14:33:23.289188", + "local_mtime": "2018-04-05 14:33:23.290130", + "lost": 0, + "flags": [ + "dirty", + "omap", + "data_digest", + "omap_digest" + ], + "truncate_seq": 0, + "truncate_size": 0, + "data_digest": "0x2ddbf8f5", + "omap_digest": "0x179c919f", + "expected_object_size": 0, + "expected_write_size": 0, + "alloc_hint_flags": 0, + "manifest": { + "type": 0 + }, + "watchers": {} + }, + "union_shard_errors": [ + "omap_digest_mismatch_info" + ], + "errors": [ + "omap_digest_mismatch" + ], + "object": { + "version": 18, + "snap": "head", + "locator": "", + "nspace": "", + "name": "ROBJ6" + } + }, + { + "shards": [ + { + "data_digest": "0x2ddbf8f5", + "omap_digest": "0xefced57a", + "size": 7, + "errors": [], + "osd": 0, + "primary": false + }, + { + "data_digest": "0x2ddbf8f5", + "omap_digest": "0x6a73cc07", + "size": 7, + "errors": [ + "omap_digest_mismatch_info" + ], + "osd": 1, + "primary": true + } + ], + "selected_object_info": { + "oid": { + "oid": "ROBJ7", + "key": "", + "snapid": -2, + "hash": 3529485009, + "max": 0, + "pool": 3, + "namespace": "" + }, + "version": "51'53", + "prior_version": "33'21", + "last_reqid": "osd.1.0:52", + "user_version": 21, + "size": 7, + "mtime": "2018-04-05 14:33:23.979658", + "local_mtime": "2018-04-05 14:33:23.980731", + "lost": 0, + "flags": [ + "dirty", + "omap", + "data_digest", + "omap_digest" + ], + "truncate_seq": 0, + "truncate_size": 0, + "data_digest": "0x2ddbf8f5", + "omap_digest": "0xefced57a", + "expected_object_size": 0, + "expected_write_size": 0, + "alloc_hint_flags": 0, + "manifest": { + "type": 0 + }, + "watchers": {} + }, + "union_shard_errors": [ + "omap_digest_mismatch_info" + ], + "errors": [ + "omap_digest_mismatch" + ], + "object": { + "version": 21, + "snap": "head", + "locator": "", + "nspace": "", + "name": "ROBJ7" + } + }, + { + "shards": [ + { + "attrs": [ + { + "Base64": false, + "value": "bad-val", + "name": "key1-ROBJ8" + }, + { + "Base64": false, + "value": "val2-ROBJ8", + "name": "key2-ROBJ8" + } + ], + "data_digest": "0x2ddbf8f5", + "omap_digest": "0xd6be81dc", + "size": 7, + "errors": [], + "osd": 0, + "primary": false + }, + { + "attrs": [ + { + "Base64": false, + "value": "val1-ROBJ8", + "name": "key1-ROBJ8" + }, + { + "Base64": false, + "value": "val3-ROBJ8", + "name": "key3-ROBJ8" + } + ], + "data_digest": "0x2ddbf8f5", + "omap_digest": "0xd6be81dc", + "size": 7, + "errors": [], + "osd": 1, + "primary": true + } + ], + "selected_object_info": { + "oid": { + "oid": "ROBJ8", + "key": "", + "snapid": -2, + "hash": 2359695969, + "max": 0, + "pool": 3, + "namespace": "" + }, + "version": "79'66", + "prior_version": "79'65", + "last_reqid": "client.4554.0:1", + "user_version": 79, + "size": 7, + "mtime": "2018-04-05 14:34:05.598688", + "local_mtime": "2018-04-05 14:34:05.599698", + "lost": 0, + "flags": [ + "dirty", + "omap", + "data_digest", + "omap_digest" + ], + "truncate_seq": 0, + "truncate_size": 0, + "data_digest": "0x2ddbf8f5", + "omap_digest": "0xd6be81dc", + "expected_object_size": 0, + "expected_write_size": 0, + "alloc_hint_flags": 0, + "manifest": { + "type": 0 + }, + "watchers": {} + }, + "union_shard_errors": [], + "errors": [ + "attr_value_mismatch", + "attr_name_mismatch" + ], + "object": { + "version": 66, + "snap": "head", + "locator": "", + "nspace": "", + "name": "ROBJ8" + } + }, + { + "shards": [ + { + "object_info": { + "oid": { + "oid": "ROBJ9", + "key": "", + "snapid": -2, + "hash": 537189375, + "max": 0, + "pool": 3, + "namespace": "" + }, + "version": "51'64", + "prior_version": "37'27", + "last_reqid": "osd.1.0:63", + "user_version": 27, + "size": 7, + "mtime": "2018-04-05 14:33:25.352485", + "local_mtime": "2018-04-05 14:33:25.353746", + "lost": 0, + "flags": [ + "dirty", + "omap", + "data_digest", + "omap_digest" + ], + "truncate_seq": 0, + "truncate_size": 0, + "data_digest": "0x2ddbf8f5", + "omap_digest": "0x2eecc539", + "expected_object_size": 0, + "expected_write_size": 0, + "alloc_hint_flags": 0, + "manifest": { + "type": 0 + }, + "watchers": {} + }, + "data_digest": "0x1f26fb26", + "omap_digest": "0x2eecc539", + "size": 3, + "errors": [ + "obj_size_info_mismatch" + ], + "osd": 0, + "primary": false + }, + { + "object_info": { + "oid": { + "oid": "ROBJ9", + "key": "", + "snapid": -2, + "hash": 537189375, + "max": 0, + "pool": 3, + "namespace": "" + }, + "version": "119'68", + "prior_version": "51'64", + "last_reqid": "client.4834.0:1", + "user_version": 81, + "size": 3, + "mtime": "2018-04-05 14:35:01.500659", + "local_mtime": "2018-04-05 14:35:01.502117", + "lost": 0, + "flags": [ + "dirty", + "omap", + "data_digest", + "omap_digest" + ], + "truncate_seq": 0, + "truncate_size": 0, + "data_digest": "0x1f26fb26", + "omap_digest": "0x2eecc539", + "expected_object_size": 0, + "expected_write_size": 0, + "alloc_hint_flags": 0, + "manifest": { + "type": 0 + }, + "watchers": {} + }, + "data_digest": "0x1f26fb26", + "omap_digest": "0x2eecc539", + "size": 3, + "errors": [], + "osd": 1, + "primary": true + } + ], + "selected_object_info": { + "oid": { + "oid": "ROBJ9", + "key": "", + "snapid": -2, + "hash": 537189375, + "max": 0, + "pool": 3, + "namespace": "" + }, + "version": "119'68", + "prior_version": "51'64", + "last_reqid": "client.4834.0:1", + "user_version": 81, + "size": 3, + "mtime": "2018-04-05 14:35:01.500659", + "local_mtime": "2018-04-05 14:35:01.502117", + "lost": 0, + "flags": [ + "dirty", + "omap", + "data_digest", + "omap_digest" + ], + "truncate_seq": 0, + "truncate_size": 0, + "data_digest": "0x1f26fb26", + "omap_digest": "0x2eecc539", + "expected_object_size": 0, + "expected_write_size": 0, + "alloc_hint_flags": 0, + "manifest": { + "type": 0 + }, + "watchers": {} + }, + "union_shard_errors": [ + "obj_size_info_mismatch" + ], + "errors": [ + "object_info_inconsistency" + ], + "object": { + "version": 68, + "snap": "head", + "locator": "", + "nspace": "", + "name": "ROBJ9" + } + } + ], + "epoch": 0 +} +EOF + + jq "$jqfilter" $dir/json | jq '.inconsistents' | python -c "$sortkeys" > $dir/csjson + multidiff $dir/checkcsjson $dir/csjson || test $getjson = "yes" || return 1 + if test $getjson = "yes" + then + jq '.' $dir/json > save2.json + fi + + if test "$LOCALRUN" = "yes" && which jsonschema > /dev/null; + then + jsonschema -i $dir/json $CEPH_ROOT/doc/rados/command/list-inconsistent-obj.json || return 1 + fi + + repair $pg + wait_for_clean + + # This hangs if the repair doesn't work + timeout 30 rados -p $poolname get ROBJ17 $dir/robj17.out || return 1 + timeout 30 rados -p $poolname get ROBJ18 $dir/robj18.out || return 1 + # Even though we couldn't repair all of the introduced errors, we can fix ROBJ17 + diff -q $dir/new.ROBJ17 $dir/robj17.out || return 1 + rm -f $dir/new.ROBJ17 $dir/robj17.out || return 1 + diff -q $dir/new.ROBJ18 $dir/robj18.out || return 1 + rm -f $dir/new.ROBJ18 $dir/robj18.out || return 1 + + if [ $ERRORS != "0" ]; + then + echo "TEST FAILED WITH $ERRORS ERRORS" + return 1 + fi + + ceph osd pool rm $poolname $poolname --yes-i-really-really-mean-it + teardown $dir || return 1 +} + + +# +# Test scrub errors for an erasure coded pool +# +function corrupt_scrub_erasure() { + local dir=$1 + local allow_overwrites=$2 + local poolname=ecpool + local total_objs=7 + + setup $dir || return 1 + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + for id in $(seq 0 2) ; do + if [ "$allow_overwrites" = "true" ]; then + run_osd $dir $id || return 1 + else + run_osd_filestore $dir $id || return 1 + fi + done + create_rbd_pool || return 1 + create_pool foo 1 + + create_ec_pool $poolname $allow_overwrites k=2 m=1 stripe_unit=2K --force || return 1 + wait_for_clean || return 1 + + for i in $(seq 1 $total_objs) ; do + objname=EOBJ${i} + add_something $dir $poolname $objname || return 1 + + local osd=$(expr $i % 2) + + case $i in + 1) + # Size (deep scrub data_digest too) + local payload=UVWXYZZZ + echo $payload > $dir/CORRUPT + objectstore_tool $dir $osd $objname set-bytes $dir/CORRUPT || return 1 + ;; + + 2) + # Corrupt EC shard + dd if=/dev/urandom of=$dir/CORRUPT bs=2048 count=1 + objectstore_tool $dir $osd $objname set-bytes $dir/CORRUPT || return 1 + ;; + + 3) + # missing + objectstore_tool $dir $osd $objname remove || return 1 + ;; + + 4) + rados --pool $poolname setxattr $objname key1-$objname val1-$objname || return 1 + rados --pool $poolname setxattr $objname key2-$objname val2-$objname || return 1 + + # Break xattrs + echo -n bad-val > $dir/bad-val + objectstore_tool $dir $osd $objname set-attr _key1-$objname $dir/bad-val || return 1 + objectstore_tool $dir $osd $objname rm-attr _key2-$objname || return 1 + echo -n val3-$objname > $dir/newval + objectstore_tool $dir $osd $objname set-attr _key3-$objname $dir/newval || return 1 + rm $dir/bad-val $dir/newval + ;; + + 5) + # Corrupt EC shard + dd if=/dev/urandom of=$dir/CORRUPT bs=2048 count=2 + objectstore_tool $dir $osd $objname set-bytes $dir/CORRUPT || return 1 + ;; + + 6) + objectstore_tool $dir 0 $objname rm-attr hinfo_key || return 1 + echo -n bad-val > $dir/bad-val + objectstore_tool $dir 1 $objname set-attr hinfo_key $dir/bad-val || return 1 + ;; + + 7) + local payload=MAKETHISDIFFERENTFROMOTHEROBJECTS + echo $payload > $dir/DIFFERENT + rados --pool $poolname put $objname $dir/DIFFERENT || return 1 + + # Get hinfo_key from EOBJ1 + objectstore_tool $dir 0 EOBJ1 get-attr hinfo_key > $dir/hinfo + objectstore_tool $dir 0 $objname set-attr hinfo_key $dir/hinfo || return 1 + rm -f $dir/hinfo + ;; + + esac + done + + local pg=$(get_pg $poolname EOBJ0) + + pg_scrub $pg + + rados list-inconsistent-pg $poolname > $dir/json || return 1 + # Check pg count + test $(jq '. | length' $dir/json) = "1" || return 1 + # Check pgid + test $(jq -r '.[0]' $dir/json) = $pg || return 1 + + rados list-inconsistent-obj $pg > $dir/json || return 1 + # Get epoch for repair-get requests + epoch=$(jq .epoch $dir/json) + + jq "$jqfilter" << EOF | jq '.inconsistents' | python -c "$sortkeys" > $dir/checkcsjson +{ + "inconsistents": [ + { + "shards": [ + { + "size": 2048, + "errors": [], + "shard": 2, + "osd": 0, + "primary": false + }, + { + "object_info": { + "oid": { + "oid": "EOBJ1", + "key": "", + "snapid": -2, + "hash": 560836233, + "max": 0, + "pool": 3, + "namespace": "" + }, + "version": "27'1", + "prior_version": "0'0", + "last_reqid": "client.4184.0:1", + "user_version": 1, + "size": 7, + "mtime": "", + "local_mtime": "", + "lost": 0, + "flags": [ + "dirty", + "data_digest" + ], + "truncate_seq": 0, + "truncate_size": 0, + "data_digest": "0x2ddbf8f5", + "omap_digest": "0xffffffff", + "expected_object_size": 0, + "expected_write_size": 0, + "alloc_hint_flags": 0, + "manifest": { + "type": 0 + }, + "watchers": {} + }, + "size": 9, + "shard": 0, + "errors": [ + "size_mismatch_info", + "obj_size_info_mismatch" + ], + "osd": 1, + "primary": true + }, + { + "size": 2048, + "shard": 1, + "errors": [], + "osd": 2, + "primary": false + } + ], + "selected_object_info": { + "oid": { + "oid": "EOBJ1", + "key": "", + "snapid": -2, + "hash": 560836233, + "max": 0, + "pool": 3, + "namespace": "" + }, + "version": "27'1", + "prior_version": "0'0", + "last_reqid": "client.4184.0:1", + "user_version": 1, + "size": 7, + "mtime": "", + "local_mtime": "", + "lost": 0, + "flags": [ + "dirty", + "data_digest" + ], + "truncate_seq": 0, + "truncate_size": 0, + "data_digest": "0x2ddbf8f5", + "omap_digest": "0xffffffff", + "expected_object_size": 0, + "expected_write_size": 0, + "alloc_hint_flags": 0, + "manifest": { + "type": 0 + }, + "watchers": {} + }, + "union_shard_errors": [ + "size_mismatch_info", + "obj_size_info_mismatch" + ], + "errors": [ + "size_mismatch" + ], + "object": { + "version": 1, + "snap": "head", + "locator": "", + "nspace": "", + "name": "EOBJ1" + } + }, + { + "shards": [ + { + "size": 2048, + "errors": [], + "shard": 2, + "osd": 0, + "primary": false + }, + { + "shard": 0, + "errors": [ + "missing" + ], + "osd": 1, + "primary": true + }, + { + "size": 2048, + "shard": 1, + "errors": [], + "osd": 2, + "primary": false + } + ], + "selected_object_info": { + "oid": { + "oid": "EOBJ3", + "key": "", + "snapid": -2, + "hash": 3125668237, + "max": 0, + "pool": 3, + "namespace": "" + }, + "version": "39'3", + "prior_version": "0'0", + "last_reqid": "client.4252.0:1", + "user_version": 3, + "size": 7, + "mtime": "", + "local_mtime": "", + "lost": 0, + "flags": [ + "dirty", + "data_digest" + ], + "truncate_seq": 0, + "truncate_size": 0, + "data_digest": "0x2ddbf8f5", + "omap_digest": "0xffffffff", + "expected_object_size": 0, + "expected_write_size": 0, + "alloc_hint_flags": 0, + "manifest": { + "type": 0 + }, + "watchers": {} + }, + "union_shard_errors": [ + "missing" + ], + "errors": [], + "object": { + "version": 3, + "snap": "head", + "locator": "", + "nspace": "", + "name": "EOBJ3" + } + }, + { + "shards": [ + { + "attrs": [ + { + "Base64": false, + "value": "bad-val", + "name": "key1-EOBJ4" + }, + { + "Base64": false, + "value": "val2-EOBJ4", + "name": "key2-EOBJ4" + } + ], + "size": 2048, + "errors": [], + "shard": 2, + "osd": 0, + "primary": false + }, + { + "osd": 1, + "primary": true, + "shard": 0, + "errors": [], + "size": 2048, + "attrs": [ + { + "Base64": false, + "value": "val1-EOBJ4", + "name": "key1-EOBJ4" + }, + { + "Base64": false, + "value": "val2-EOBJ4", + "name": "key2-EOBJ4" + } + ] + }, + { + "osd": 2, + "primary": false, + "shard": 1, + "errors": [], + "size": 2048, + "attrs": [ + { + "Base64": false, + "value": "val1-EOBJ4", + "name": "key1-EOBJ4" + }, + { + "Base64": false, + "value": "val3-EOBJ4", + "name": "key3-EOBJ4" + } + ] + } + ], + "selected_object_info": { + "oid": { + "oid": "EOBJ4", + "key": "", + "snapid": -2, + "hash": 1618759290, + "max": 0, + "pool": 3, + "namespace": "" + }, + "version": "45'6", + "prior_version": "45'5", + "last_reqid": "client.4294.0:1", + "user_version": 6, + "size": 7, + "mtime": "", + "local_mtime": "", + "lost": 0, + "flags": [ + "dirty", + "data_digest" + ], + "truncate_seq": 0, + "truncate_size": 0, + "data_digest": "0x2ddbf8f5", + "omap_digest": "0xffffffff", + "expected_object_size": 0, + "expected_write_size": 0, + "alloc_hint_flags": 0, + "manifest": { + "type": 0 + }, + "watchers": {} + }, + "union_shard_errors": [], + "errors": [ + "attr_value_mismatch", + "attr_name_mismatch" + ], + "object": { + "version": 6, + "snap": "head", + "locator": "", + "nspace": "", + "name": "EOBJ4" + } + }, + { + "shards": [ + { + "size": 2048, + "errors": [], + "shard": 2, + "osd": 0, + "primary": false + }, + { + "object_info": { + "oid": { + "oid": "EOBJ5", + "key": "", + "snapid": -2, + "hash": 2918945441, + "max": 0, + "pool": 3, + "namespace": "" + }, + "version": "59'7", + "prior_version": "0'0", + "last_reqid": "client.4382.0:1", + "user_version": 7, + "size": 7, + "mtime": "", + "local_mtime": "", + "lost": 0, + "flags": [ + "dirty", + "data_digest" + ], + "truncate_seq": 0, + "truncate_size": 0, + "data_digest": "0x2ddbf8f5", + "omap_digest": "0xffffffff", + "expected_object_size": 0, + "expected_write_size": 0, + "alloc_hint_flags": 0, + "manifest": { + "type": 0 + }, + "watchers": {} + }, + "size": 4096, + "shard": 0, + "errors": [ + "size_mismatch_info", + "obj_size_info_mismatch" + ], + "osd": 1, + "primary": true + }, + { + "size": 2048, + "shard": 1, + "errors": [], + "osd": 2, + "primary": false + } + ], + "selected_object_info": { + "oid": { + "oid": "EOBJ5", + "key": "", + "snapid": -2, + "hash": 2918945441, + "max": 0, + "pool": 3, + "namespace": "" + }, + "version": "59'7", + "prior_version": "0'0", + "last_reqid": "client.4382.0:1", + "user_version": 7, + "size": 7, + "mtime": "", + "local_mtime": "", + "lost": 0, + "flags": [ + "dirty", + "data_digest" + ], + "truncate_seq": 0, + "truncate_size": 0, + "data_digest": "0x2ddbf8f5", + "omap_digest": "0xffffffff", + "expected_object_size": 0, + "expected_write_size": 0, + "alloc_hint_flags": 0, + "manifest": { + "type": 0 + }, + "watchers": {} + }, + "union_shard_errors": [ + "size_mismatch_info", + "obj_size_info_mismatch" + ], + "errors": [ + "size_mismatch" + ], + "object": { + "version": 7, + "snap": "head", + "locator": "", + "nspace": "", + "name": "EOBJ5" + } + }, + { + "errors": [], + "object": { + "locator": "", + "name": "EOBJ6", + "nspace": "", + "snap": "head", + "version": 8 + }, + "selected_object_info": { + "oid": { + "oid": "EOBJ6", + "key": "", + "snapid": -2, + "hash": 3050890866, + "max": 0, + "pool": 3, + "namespace": "" + }, + "version": "65'8", + "prior_version": "0'0", + "last_reqid": "client.4418.0:1", + "user_version": 8, + "size": 7, + "mtime": "", + "local_mtime": "", + "lost": 0, + "flags": [ + "dirty", + "data_digest" + ], + "truncate_seq": 0, + "truncate_size": 0, + "data_digest": "0x2ddbf8f5", + "omap_digest": "0xffffffff", + "expected_object_size": 0, + "expected_write_size": 0, + "alloc_hint_flags": 0, + "manifest": { + "type": 0 + }, + "watchers": {} + }, + "shards": [ + { + "errors": [ + "hinfo_missing" + ], + "osd": 0, + "primary": false, + "shard": 2, + "size": 2048 + }, + { + "errors": [ + "hinfo_corrupted" + ], + "osd": 1, + "primary": true, + "shard": 0, + "hashinfo": "bad-val", + "size": 2048 + }, + { + "errors": [], + "osd": 2, + "primary": false, + "shard": 1, + "size": 2048, + "hashinfo": { + "cumulative_shard_hashes": [ + { + "hash": 80717615, + "shard": 0 + }, + { + "hash": 1534491824, + "shard": 1 + }, + { + "hash": 80717615, + "shard": 2 + } + ], + "total_chunk_size": 2048 + } + } + ], + "union_shard_errors": [ + "hinfo_missing", + "hinfo_corrupted" + ] + }, + { + "errors": [ + "hinfo_inconsistency" + ], + "object": { + "locator": "", + "name": "EOBJ7", + "nspace": "", + "snap": "head", + "version": 10 + }, + "selected_object_info": { + "oid": { + "oid": "EOBJ7", + "key": "", + "snapid": -2, + "hash": 3258066308, + "max": 0, + "pool": 3, + "namespace": "" + }, + "version": "75'10", + "prior_version": "75'9", + "last_reqid": "client.4482.0:1", + "user_version": 10, + "size": 34, + "mtime": "", + "local_mtime": "", + "lost": 0, + "flags": [ + "dirty", + "data_digest" + ], + "truncate_seq": 0, + "truncate_size": 0, + "data_digest": "0x136e4e27", + "omap_digest": "0xffffffff", + "expected_object_size": 0, + "expected_write_size": 0, + "alloc_hint_flags": 0, + "manifest": { + "type": 0 + }, + "watchers": {} + }, + "shards": [ + { + "hashinfo": { + "cumulative_shard_hashes": [ + { + "hash": 80717615, + "shard": 0 + }, + { + "hash": 1534491824, + "shard": 1 + }, + { + "hash": 80717615, + "shard": 2 + } + ], + "total_chunk_size": 2048 + }, + "errors": [], + "osd": 0, + "primary": false, + "shard": 2, + "size": 2048 + }, + { + "hashinfo": { + "cumulative_shard_hashes": [ + { + "hash": 1534350760, + "shard": 0 + }, + { + "hash": 1534491824, + "shard": 1 + }, + { + "hash": 1534350760, + "shard": 2 + } + ], + "total_chunk_size": 2048 + }, + "errors": [], + "osd": 1, + "primary": true, + "shard": 0, + "size": 2048 + }, + { + "hashinfo": { + "cumulative_shard_hashes": [ + { + "hash": 1534350760, + "shard": 0 + }, + { + "hash": 1534491824, + "shard": 1 + }, + { + "hash": 1534350760, + "shard": 2 + } + ], + "total_chunk_size": 2048 + }, + "errors": [], + "osd": 2, + "primary": false, + "shard": 1, + "size": 2048 + } + ], + "union_shard_errors": [] + } + ], + "epoch": 0 +} +EOF + + jq "$jqfilter" $dir/json | jq '.inconsistents' | python -c "$sortkeys" > $dir/csjson + multidiff $dir/checkcsjson $dir/csjson || test $getjson = "yes" || return 1 + if test $getjson = "yes" + then + jq '.' $dir/json > save3.json + fi + + if test "$LOCALRUN" = "yes" && which jsonschema > /dev/null; + then + jsonschema -i $dir/json $CEPH_ROOT/doc/rados/command/list-inconsistent-obj.json || return 1 + fi + + pg_deep_scrub $pg + + rados list-inconsistent-pg $poolname > $dir/json || return 1 + # Check pg count + test $(jq '. | length' $dir/json) = "1" || return 1 + # Check pgid + test $(jq -r '.[0]' $dir/json) = $pg || return 1 + + rados list-inconsistent-obj $pg > $dir/json || return 1 + # Get epoch for repair-get requests + epoch=$(jq .epoch $dir/json) + + if [ "$allow_overwrites" = "true" ] + then + jq "$jqfilter" << EOF | jq '.inconsistents' | python -c "$sortkeys" > $dir/checkcsjson +{ + "inconsistents": [ + { + "shards": [ + { + "data_digest": "0x00000000", + "omap_digest": "0xffffffff", + "size": 2048, + "errors": [], + "shard": 2, + "osd": 0, + "primary": false + }, + { + "object_info": { + "oid": { + "oid": "EOBJ1", + "key": "", + "snapid": -2, + "hash": 560836233, + "max": 0, + "pool": 3, + "namespace": "" + }, + "version": "27'1", + "prior_version": "0'0", + "last_reqid": "client.4184.0:1", + "user_version": 1, + "size": 7, + "mtime": "2018-04-05 14:31:33.837147", + "local_mtime": "2018-04-05 14:31:33.840763", + "lost": 0, + "flags": [ + "dirty", + "data_digest" + ], + "truncate_seq": 0, + "truncate_size": 0, + "data_digest": "0x2ddbf8f5", + "omap_digest": "0xffffffff", + "expected_object_size": 0, + "expected_write_size": 0, + "alloc_hint_flags": 0, + "manifest": { + "type": 0 + }, + "watchers": {} + }, + "size": 9, + "shard": 0, + "errors": [ + "read_error", + "size_mismatch_info", + "obj_size_info_mismatch" + ], + "osd": 1, + "primary": true + }, + { + "data_digest": "0x00000000", + "omap_digest": "0xffffffff", + "size": 2048, + "shard": 1, + "errors": [], + "osd": 2, + "primary": false + } + ], + "selected_object_info": { + "oid": { + "oid": "EOBJ1", + "key": "", + "snapid": -2, + "hash": 560836233, + "max": 0, + "pool": 3, + "namespace": "" + }, + "version": "27'1", + "prior_version": "0'0", + "last_reqid": "client.4184.0:1", + "user_version": 1, + "size": 7, + "mtime": "2018-04-05 14:31:33.837147", + "local_mtime": "2018-04-05 14:31:33.840763", + "lost": 0, + "flags": [ + "dirty", + "data_digest" + ], + "truncate_seq": 0, + "truncate_size": 0, + "data_digest": "0x2ddbf8f5", + "omap_digest": "0xffffffff", + "expected_object_size": 0, + "expected_write_size": 0, + "alloc_hint_flags": 0, + "manifest": { + "type": 0 + }, + "watchers": {} + }, + "union_shard_errors": [ + "read_error", + "size_mismatch_info", + "obj_size_info_mismatch" + ], + "errors": [ + "size_mismatch" + ], + "object": { + "version": 1, + "snap": "head", + "locator": "", + "nspace": "", + "name": "EOBJ1" + } + }, + { + "shards": [ + { + "data_digest": "0x00000000", + "omap_digest": "0xffffffff", + "size": 2048, + "errors": [], + "shard": 2, + "osd": 0, + "primary": false + }, + { + "shard": 0, + "errors": [ + "missing" + ], + "osd": 1, + "primary": true + }, + { + "data_digest": "0x00000000", + "omap_digest": "0xffffffff", + "size": 2048, + "shard": 1, + "errors": [], + "osd": 2, + "primary": false + } + ], + "selected_object_info": { + "oid": { + "oid": "EOBJ3", + "key": "", + "snapid": -2, + "hash": 3125668237, + "max": 0, + "pool": 3, + "namespace": "" + }, + "version": "39'3", + "prior_version": "0'0", + "last_reqid": "client.4252.0:1", + "user_version": 3, + "size": 7, + "mtime": "2018-04-05 14:31:46.841145", + "local_mtime": "2018-04-05 14:31:46.844996", + "lost": 0, + "flags": [ + "dirty", + "data_digest" + ], + "truncate_seq": 0, + "truncate_size": 0, + "data_digest": "0x2ddbf8f5", + "omap_digest": "0xffffffff", + "expected_object_size": 0, + "expected_write_size": 0, + "alloc_hint_flags": 0, + "manifest": { + "type": 0 + }, + "watchers": {} + }, + "union_shard_errors": [ + "missing" + ], + "errors": [], + "object": { + "version": 3, + "snap": "head", + "locator": "", + "nspace": "", + "name": "EOBJ3" + } + }, + { + "shards": [ + { + "attrs": [ + { + "Base64": false, + "value": "bad-val", + "name": "key1-EOBJ4" + }, + { + "Base64": false, + "value": "val2-EOBJ4", + "name": "key2-EOBJ4" + } + ], + "data_digest": "0x00000000", + "omap_digest": "0xffffffff", + "size": 2048, + "errors": [], + "shard": 2, + "osd": 0, + "primary": false + }, + { + "attrs": [ + { + "Base64": false, + "value": "val1-EOBJ4", + "name": "key1-EOBJ4" + }, + { + "Base64": false, + "value": "val2-EOBJ4", + "name": "key2-EOBJ4" + } + ], + "data_digest": "0x00000000", + "omap_digest": "0xffffffff", + "size": 2048, + "errors": [], + "shard": 0, + "osd": 1, + "primary": true + }, + { + "attrs": [ + { + "Base64": false, + "value": "val1-EOBJ4", + "name": "key1-EOBJ4" + }, + { + "Base64": false, + "value": "val3-EOBJ4", + "name": "key3-EOBJ4" + } + ], + "data_digest": "0x00000000", + "omap_digest": "0xffffffff", + "size": 2048, + "errors": [], + "shard": 1, + "osd": 2, + "primary": false + } + ], + "selected_object_info": { + "oid": { + "oid": "EOBJ4", + "key": "", + "snapid": -2, + "hash": 1618759290, + "max": 0, + "pool": 3, + "namespace": "" + }, + "version": "45'6", + "prior_version": "45'5", + "last_reqid": "client.4294.0:1", + "user_version": 6, + "size": 7, + "mtime": "2018-04-05 14:31:54.663622", + "local_mtime": "2018-04-05 14:31:54.664527", + "lost": 0, + "flags": [ + "dirty", + "data_digest" + ], + "truncate_seq": 0, + "truncate_size": 0, + "data_digest": "0x2ddbf8f5", + "omap_digest": "0xffffffff", + "expected_object_size": 0, + "expected_write_size": 0, + "alloc_hint_flags": 0, + "manifest": { + "type": 0 + }, + "watchers": {} + }, + "union_shard_errors": [], + "errors": [ + "attr_value_mismatch", + "attr_name_mismatch" + ], + "object": { + "version": 6, + "snap": "head", + "locator": "", + "nspace": "", + "name": "EOBJ4" + } + }, + { + "shards": [ + { + "data_digest": "0x00000000", + "omap_digest": "0xffffffff", + "size": 2048, + "errors": [], + "shard": 2, + "osd": 0, + "primary": false + }, + { + "data_digest": "0x00000000", + "omap_digest": "0xffffffff", + "object_info": { + "oid": { + "oid": "EOBJ5", + "key": "", + "snapid": -2, + "hash": 2918945441, + "max": 0, + "pool": 3, + "namespace": "" + }, + "version": "59'7", + "prior_version": "0'0", + "last_reqid": "client.4382.0:1", + "user_version": 7, + "size": 7, + "mtime": "2018-04-05 14:32:12.929161", + "local_mtime": "2018-04-05 14:32:12.934707", + "lost": 0, + "flags": [ + "dirty", + "data_digest" + ], + "truncate_seq": 0, + "truncate_size": 0, + "data_digest": "0x2ddbf8f5", + "omap_digest": "0xffffffff", + "expected_object_size": 0, + "expected_write_size": 0, + "alloc_hint_flags": 0, + "manifest": { + "type": 0 + }, + "watchers": {} + }, + "size": 4096, + "errors": [ + "size_mismatch_info", + "obj_size_info_mismatch" + ], + "shard": 0, + "osd": 1, + "primary": true + }, + { + "data_digest": "0x00000000", + "omap_digest": "0xffffffff", + "size": 2048, + "errors": [], + "shard": 1, + "osd": 2, + "primary": false + } + ], + "selected_object_info": { + "oid": { + "oid": "EOBJ5", + "key": "", + "snapid": -2, + "hash": 2918945441, + "max": 0, + "pool": 3, + "namespace": "" + }, + "version": "59'7", + "prior_version": "0'0", + "last_reqid": "client.4382.0:1", + "user_version": 7, + "size": 7, + "mtime": "2018-04-05 14:32:12.929161", + "local_mtime": "2018-04-05 14:32:12.934707", + "lost": 0, + "flags": [ + "dirty", + "data_digest" + ], + "truncate_seq": 0, + "truncate_size": 0, + "data_digest": "0x2ddbf8f5", + "omap_digest": "0xffffffff", + "expected_object_size": 0, + "expected_write_size": 0, + "alloc_hint_flags": 0, + "manifest": { + "type": 0 + }, + "watchers": {} + }, + "union_shard_errors": [ + "size_mismatch_info", + "obj_size_info_mismatch" + ], + "errors": [ + "size_mismatch" + ], + "object": { + "version": 7, + "snap": "head", + "locator": "", + "nspace": "", + "name": "EOBJ5" + } + }, + { + "object": { + "name": "EOBJ6", + "nspace": "", + "locator": "", + "snap": "head", + "version": 8 + }, + "errors": [], + "union_shard_errors": [ + "read_error", + "hinfo_missing", + "hinfo_corrupted" + ], + "selected_object_info": { + "oid": { + "oid": "EOBJ6", + "key": "", + "snapid": -2, + "hash": 3050890866, + "max": 0, + "pool": 3, + "namespace": "" + }, + "version": "65'8", + "prior_version": "0'0", + "last_reqid": "client.4418.0:1", + "user_version": 8, + "size": 7, + "mtime": "2018-04-05 14:32:20.634116", + "local_mtime": "2018-04-05 14:32:20.637999", + "lost": 0, + "flags": [ + "dirty", + "data_digest" + ], + "truncate_seq": 0, + "truncate_size": 0, + "data_digest": "0x2ddbf8f5", + "omap_digest": "0xffffffff", + "expected_object_size": 0, + "expected_write_size": 0, + "alloc_hint_flags": 0, + "manifest": { + "type": 0 + }, + "watchers": {} + }, + "shards": [ + { + "osd": 0, + "primary": false, + "shard": 2, + "errors": [ + "read_error", + "hinfo_missing" + ], + "size": 2048 + }, + { + "osd": 1, + "primary": true, + "shard": 0, + "errors": [ + "read_error", + "hinfo_corrupted" + ], + "size": 2048, + "hashinfo": "bad-val" + }, + { + "osd": 2, + "primary": false, + "shard": 1, + "errors": [], + "size": 2048, + "omap_digest": "0xffffffff", + "data_digest": "0x00000000", + "hashinfo": { + "cumulative_shard_hashes": [ + { + "hash": 80717615, + "shard": 0 + }, + { + "hash": 1534491824, + "shard": 1 + }, + { + "hash": 80717615, + "shard": 2 + } + ], + "total_chunk_size": 2048 + } + } + ] + }, + { + "object": { + "name": "EOBJ7", + "nspace": "", + "locator": "", + "snap": "head", + "version": 10 + }, + "errors": [ + "hinfo_inconsistency" + ], + "union_shard_errors": [], + "selected_object_info": { + "oid": { + "oid": "EOBJ7", + "key": "", + "snapid": -2, + "hash": 3258066308, + "max": 0, + "pool": 3, + "namespace": "" + }, + "version": "75'10", + "prior_version": "75'9", + "last_reqid": "client.4482.0:1", + "user_version": 10, + "size": 34, + "mtime": "2018-04-05 14:32:33.058782", + "local_mtime": "2018-04-05 14:32:33.059679", + "lost": 0, + "flags": [ + "dirty", + "data_digest" + ], + "truncate_seq": 0, + "truncate_size": 0, + "data_digest": "0x136e4e27", + "omap_digest": "0xffffffff", + "expected_object_size": 0, + "expected_write_size": 0, + "alloc_hint_flags": 0, + "manifest": { + "type": 0 + }, + "watchers": {} + }, + "shards": [ + { + "osd": 0, + "primary": false, + "shard": 2, + "errors": [], + "size": 2048, + "omap_digest": "0xffffffff", + "data_digest": "0x00000000", + "hashinfo": { + "cumulative_shard_hashes": [ + { + "hash": 80717615, + "shard": 0 + }, + { + "hash": 1534491824, + "shard": 1 + }, + { + "hash": 80717615, + "shard": 2 + } + ], + "total_chunk_size": 2048 + } + }, + { + "osd": 1, + "primary": true, + "shard": 0, + "errors": [], + "size": 2048, + "omap_digest": "0xffffffff", + "data_digest": "0x00000000", + "hashinfo": { + "cumulative_shard_hashes": [ + { + "hash": 1534350760, + "shard": 0 + }, + { + "hash": 1534491824, + "shard": 1 + }, + { + "hash": 1534350760, + "shard": 2 + } + ], + "total_chunk_size": 2048 + } + }, + { + "osd": 2, + "primary": false, + "shard": 1, + "errors": [], + "size": 2048, + "omap_digest": "0xffffffff", + "data_digest": "0x00000000", + "hashinfo": { + "cumulative_shard_hashes": [ + { + "hash": 1534350760, + "shard": 0 + }, + { + "hash": 1534491824, + "shard": 1 + }, + { + "hash": 1534350760, + "shard": 2 + } + ], + "total_chunk_size": 2048 + } + } + ] + } + ], + "epoch": 0 +} +EOF + + else + + jq "$jqfilter" << EOF | jq '.inconsistents' | python -c "$sortkeys" > $dir/checkcsjson +{ + "inconsistents": [ + { + "shards": [ + { + "data_digest": "0x04cfa72f", + "omap_digest": "0xffffffff", + "size": 2048, + "errors": [], + "shard": 2, + "osd": 0, + "primary": false + }, + { + "object_info": { + "oid": { + "oid": "EOBJ1", + "key": "", + "snapid": -2, + "hash": 560836233, + "max": 0, + "pool": 3, + "namespace": "" + }, + "version": "27'1", + "prior_version": "0'0", + "last_reqid": "client.4192.0:1", + "user_version": 1, + "size": 7, + "mtime": "2018-04-05 14:30:10.688009", + "local_mtime": "2018-04-05 14:30:10.691774", + "lost": 0, + "flags": [ + "dirty", + "data_digest" + ], + "truncate_seq": 0, + "truncate_size": 0, + "data_digest": "0x2ddbf8f5", + "omap_digest": "0xffffffff", + "expected_object_size": 0, + "expected_write_size": 0, + "alloc_hint_flags": 0, + "manifest": { + "type": 0 + }, + "watchers": {} + }, + "size": 9, + "shard": 0, + "errors": [ + "read_error", + "size_mismatch_info", + "obj_size_info_mismatch" + ], + "osd": 1, + "primary": true + }, + { + "data_digest": "0x04cfa72f", + "omap_digest": "0xffffffff", + "size": 2048, + "shard": 1, + "errors": [], + "osd": 2, + "primary": false + } + ], + "selected_object_info": { + "oid": { + "oid": "EOBJ1", + "key": "", + "snapid": -2, + "hash": 560836233, + "max": 0, + "pool": 3, + "namespace": "" + }, + "version": "27'1", + "prior_version": "0'0", + "last_reqid": "client.4192.0:1", + "user_version": 1, + "size": 7, + "mtime": "2018-04-05 14:30:10.688009", + "local_mtime": "2018-04-05 14:30:10.691774", + "lost": 0, + "flags": [ + "dirty", + "data_digest" + ], + "truncate_seq": 0, + "truncate_size": 0, + "data_digest": "0x2ddbf8f5", + "omap_digest": "0xffffffff", + "expected_object_size": 0, + "expected_write_size": 0, + "alloc_hint_flags": 0, + "manifest": { + "type": 0 + }, + "watchers": {} + }, + "union_shard_errors": [ + "read_error", + "size_mismatch_info", + "obj_size_info_mismatch" + ], + "errors": [ + "size_mismatch" + ], + "object": { + "version": 1, + "snap": "head", + "locator": "", + "nspace": "", + "name": "EOBJ1" + } + }, + { + "shards": [ + { + "size": 2048, + "errors": [ + "ec_hash_error" + ], + "shard": 2, + "osd": 0, + "primary": false + }, + { + "data_digest": "0x04cfa72f", + "omap_digest": "0xffffffff", + "size": 2048, + "errors": [], + "shard": 0, + "osd": 1, + "primary": true + }, + { + "data_digest": "0x04cfa72f", + "omap_digest": "0xffffffff", + "size": 2048, + "errors": [], + "shard": 1, + "osd": 2, + "primary": false + } + ], + "selected_object_info": { + "oid": { + "oid": "EOBJ2", + "key": "", + "snapid": -2, + "hash": 562812377, + "max": 0, + "pool": 3, + "namespace": "" + }, + "version": "33'2", + "prior_version": "0'0", + "last_reqid": "client.4224.0:1", + "user_version": 2, + "size": 7, + "mtime": "2018-04-05 14:30:14.152945", + "local_mtime": "2018-04-05 14:30:14.154014", + "lost": 0, + "flags": [ + "dirty", + "data_digest" + ], + "truncate_seq": 0, + "truncate_size": 0, + "data_digest": "0x2ddbf8f5", + "omap_digest": "0xffffffff", + "expected_object_size": 0, + "expected_write_size": 0, + "alloc_hint_flags": 0, + "manifest": { + "type": 0 + }, + "watchers": {} + }, + "union_shard_errors": [ + "ec_hash_error" + ], + "errors": [], + "object": { + "version": 2, + "snap": "head", + "locator": "", + "nspace": "", + "name": "EOBJ2" + } + }, + { + "shards": [ + { + "data_digest": "0x04cfa72f", + "omap_digest": "0xffffffff", + "size": 2048, + "errors": [], + "shard": 2, + "osd": 0, + "primary": false + }, + { + "osd": 1, + "primary": true, + "shard": 0, + "errors": [ + "missing" + ] + }, + { + "data_digest": "0x04cfa72f", + "omap_digest": "0xffffffff", + "size": 2048, + "shard": 1, + "errors": [], + "osd": 2, + "primary": false + } + ], + "selected_object_info": { + "oid": { + "oid": "EOBJ3", + "key": "", + "snapid": -2, + "hash": 3125668237, + "max": 0, + "pool": 3, + "namespace": "" + }, + "version": "39'3", + "prior_version": "0'0", + "last_reqid": "client.4258.0:1", + "user_version": 3, + "size": 7, + "mtime": "2018-04-05 14:30:18.875544", + "local_mtime": "2018-04-05 14:30:18.880153", + "lost": 0, + "flags": [ + "dirty", + "data_digest" + ], + "truncate_seq": 0, + "truncate_size": 0, + "data_digest": "0x2ddbf8f5", + "omap_digest": "0xffffffff", + "expected_object_size": 0, + "expected_write_size": 0, + "alloc_hint_flags": 0, + "manifest": { + "type": 0 + }, + "watchers": {} + }, + "union_shard_errors": [ + "missing" + ], + "errors": [], + "object": { + "version": 3, + "snap": "head", + "locator": "", + "nspace": "", + "name": "EOBJ3" + } + }, + { + "shards": [ + { + "attrs": [ + { + "Base64": false, + "value": "bad-val", + "name": "key1-EOBJ4" + }, + { + "Base64": false, + "value": "val2-EOBJ4", + "name": "key2-EOBJ4" + } + ], + "data_digest": "0x04cfa72f", + "omap_digest": "0xffffffff", + "size": 2048, + "errors": [], + "shard": 2, + "osd": 0, + "primary": false + }, + { + "osd": 1, + "primary": true, + "shard": 0, + "errors": [], + "size": 2048, + "omap_digest": "0xffffffff", + "data_digest": "0x04cfa72f", + "attrs": [ + { + "Base64": false, + "value": "val1-EOBJ4", + "name": "key1-EOBJ4" + }, + { + "Base64": false, + "value": "val2-EOBJ4", + "name": "key2-EOBJ4" + } + ] + }, + { + "osd": 2, + "primary": false, + "shard": 1, + "errors": [], + "size": 2048, + "omap_digest": "0xffffffff", + "data_digest": "0x04cfa72f", + "attrs": [ + { + "Base64": false, + "value": "val1-EOBJ4", + "name": "key1-EOBJ4" + }, + { + "Base64": false, + "value": "val3-EOBJ4", + "name": "key3-EOBJ4" + } + ] + } + ], + "selected_object_info": { + "oid": { + "oid": "EOBJ4", + "key": "", + "snapid": -2, + "hash": 1618759290, + "max": 0, + "pool": 3, + "namespace": "" + }, + "version": "45'6", + "prior_version": "45'5", + "last_reqid": "client.4296.0:1", + "user_version": 6, + "size": 7, + "mtime": "2018-04-05 14:30:22.271983", + "local_mtime": "2018-04-05 14:30:22.272840", + "lost": 0, + "flags": [ + "dirty", + "data_digest" + ], + "truncate_seq": 0, + "truncate_size": 0, + "data_digest": "0x2ddbf8f5", + "omap_digest": "0xffffffff", + "expected_object_size": 0, + "expected_write_size": 0, + "alloc_hint_flags": 0, + "manifest": { + "type": 0 + }, + "watchers": {} + }, + "union_shard_errors": [], + "errors": [ + "attr_value_mismatch", + "attr_name_mismatch" + ], + "object": { + "version": 6, + "snap": "head", + "locator": "", + "nspace": "", + "name": "EOBJ4" + } + }, + { + "shards": [ + { + "data_digest": "0x04cfa72f", + "omap_digest": "0xffffffff", + "size": 2048, + "errors": [], + "shard": 2, + "osd": 0, + "primary": false + }, + { + "object_info": { + "oid": { + "oid": "EOBJ5", + "key": "", + "snapid": -2, + "hash": 2918945441, + "max": 0, + "pool": 3, + "namespace": "" + }, + "version": "59'7", + "prior_version": "0'0", + "last_reqid": "client.4384.0:1", + "user_version": 7, + "size": 7, + "mtime": "2018-04-05 14:30:35.162395", + "local_mtime": "2018-04-05 14:30:35.166390", + "lost": 0, + "flags": [ + "dirty", + "data_digest" + ], + "truncate_seq": 0, + "truncate_size": 0, + "data_digest": "0x2ddbf8f5", + "omap_digest": "0xffffffff", + "expected_object_size": 0, + "expected_write_size": 0, + "alloc_hint_flags": 0, + "manifest": { + "type": 0 + }, + "watchers": {} + }, + "size": 4096, + "shard": 0, + "errors": [ + "size_mismatch_info", + "ec_size_error", + "obj_size_info_mismatch" + ], + "osd": 1, + "primary": true + }, + { + "data_digest": "0x04cfa72f", + "omap_digest": "0xffffffff", + "size": 2048, + "shard": 1, + "errors": [], + "osd": 2, + "primary": false + } + ], + "selected_object_info": { + "oid": { + "oid": "EOBJ5", + "key": "", + "snapid": -2, + "hash": 2918945441, + "max": 0, + "pool": 3, + "namespace": "" + }, + "version": "59'7", + "prior_version": "0'0", + "last_reqid": "client.4384.0:1", + "user_version": 7, + "size": 7, + "mtime": "2018-04-05 14:30:35.162395", + "local_mtime": "2018-04-05 14:30:35.166390", + "lost": 0, + "flags": [ + "dirty", + "data_digest" + ], + "truncate_seq": 0, + "truncate_size": 0, + "data_digest": "0x2ddbf8f5", + "omap_digest": "0xffffffff", + "expected_object_size": 0, + "expected_write_size": 0, + "alloc_hint_flags": 0, + "manifest": { + "type": 0 + }, + "watchers": {} + }, + "union_shard_errors": [ + "size_mismatch_info", + "ec_size_error", + "obj_size_info_mismatch" + ], + "errors": [ + "size_mismatch" + ], + "object": { + "version": 7, + "snap": "head", + "locator": "", + "nspace": "", + "name": "EOBJ5" + } + }, + { + "object": { + "name": "EOBJ6", + "nspace": "", + "locator": "", + "snap": "head", + "version": 8 + }, + "errors": [], + "union_shard_errors": [ + "read_error", + "hinfo_missing", + "hinfo_corrupted" + ], + "selected_object_info": { + "oid": { + "oid": "EOBJ6", + "key": "", + "snapid": -2, + "hash": 3050890866, + "max": 0, + "pool": 3, + "namespace": "" + }, + "version": "65'8", + "prior_version": "0'0", + "last_reqid": "client.4420.0:1", + "user_version": 8, + "size": 7, + "mtime": "2018-04-05 14:30:40.914673", + "local_mtime": "2018-04-05 14:30:40.917705", + "lost": 0, + "flags": [ + "dirty", + "data_digest" + ], + "truncate_seq": 0, + "truncate_size": 0, + "data_digest": "0x2ddbf8f5", + "omap_digest": "0xffffffff", + "expected_object_size": 0, + "expected_write_size": 0, + "alloc_hint_flags": 0, + "manifest": { + "type": 0 + }, + "watchers": {} + }, + "shards": [ + { + "osd": 0, + "primary": false, + "shard": 2, + "errors": [ + "read_error", + "hinfo_missing" + ], + "size": 2048 + }, + { + "osd": 1, + "primary": true, + "shard": 0, + "errors": [ + "read_error", + "hinfo_corrupted" + ], + "size": 2048, + "hashinfo": "bad-val" + }, + { + "osd": 2, + "primary": false, + "shard": 1, + "errors": [], + "size": 2048, + "omap_digest": "0xffffffff", + "data_digest": "0x04cfa72f", + "hashinfo": { + "cumulative_shard_hashes": [ + { + "hash": 80717615, + "shard": 0 + }, + { + "hash": 1534491824, + "shard": 1 + }, + { + "hash": 80717615, + "shard": 2 + } + ], + "total_chunk_size": 2048 + } + } + ] + }, + { + "object": { + "name": "EOBJ7", + "nspace": "", + "locator": "", + "snap": "head", + "version": 10 + }, + "errors": [ + "hinfo_inconsistency" + ], + "union_shard_errors": [ + "ec_hash_error" + ], + "selected_object_info": { + "oid": { + "oid": "EOBJ7", + "key": "", + "snapid": -2, + "hash": 3258066308, + "max": 0, + "pool": 3, + "namespace": "" + }, + "version": "75'10", + "prior_version": "75'9", + "last_reqid": "client.4486.0:1", + "user_version": 10, + "size": 34, + "mtime": "2018-04-05 14:30:50.995009", + "local_mtime": "2018-04-05 14:30:50.996112", + "lost": 0, + "flags": [ + "dirty", + "data_digest" + ], + "truncate_seq": 0, + "truncate_size": 0, + "data_digest": "0x136e4e27", + "omap_digest": "0xffffffff", + "expected_object_size": 0, + "expected_write_size": 0, + "alloc_hint_flags": 0, + "manifest": { + "type": 0 + }, + "watchers": {} + }, + "shards": [ + { + "osd": 0, + "primary": false, + "shard": 2, + "errors": [ + "ec_hash_error" + ], + "size": 2048, + "hashinfo": { + "cumulative_shard_hashes": [ + { + "hash": 80717615, + "shard": 0 + }, + { + "hash": 1534491824, + "shard": 1 + }, + { + "hash": 80717615, + "shard": 2 + } + ], + "total_chunk_size": 2048 + } + }, + { + "osd": 1, + "primary": true, + "shard": 0, + "errors": [], + "size": 2048, + "omap_digest": "0xffffffff", + "data_digest": "0x5b7455a8", + "hashinfo": { + "cumulative_shard_hashes": [ + { + "hash": 1534350760, + "shard": 0 + }, + { + "hash": 1534491824, + "shard": 1 + }, + { + "hash": 1534350760, + "shard": 2 + } + ], + "total_chunk_size": 2048 + } + }, + { + "osd": 2, + "primary": false, + "shard": 1, + "errors": [], + "size": 2048, + "omap_digest": "0xffffffff", + "data_digest": "0x5b7455a8", + "hashinfo": { + "cumulative_shard_hashes": [ + { + "hash": 1534350760, + "shard": 0 + }, + { + "hash": 1534491824, + "shard": 1 + }, + { + "hash": 1534350760, + "shard": 2 + } + ], + "total_chunk_size": 2048 + } + } + ] + } + ], + "epoch": 0 +} +EOF + + fi + + jq "$jqfilter" $dir/json | jq '.inconsistents' | python -c "$sortkeys" > $dir/csjson + multidiff $dir/checkcsjson $dir/csjson || test $getjson = "yes" || return 1 + if test $getjson = "yes" + then + if [ "$allow_overwrites" = "true" ] + then + num=4 + else + num=5 + fi + jq '.' $dir/json > save${num}.json + fi + + if test "$LOCALRUN" = "yes" && which jsonschema > /dev/null; + then + jsonschema -i $dir/json $CEPH_ROOT/doc/rados/command/list-inconsistent-obj.json || return 1 + fi + + ceph osd pool rm $poolname $poolname --yes-i-really-really-mean-it + teardown $dir || return 1 +} + +function TEST_corrupt_scrub_erasure_appends() { + corrupt_scrub_erasure $1 false +} + +function TEST_corrupt_scrub_erasure_overwrites() { + if [ "$use_ec_overwrite" = "true" ]; then + corrupt_scrub_erasure $1 true + fi +} + +# +# Test to make sure that a periodic scrub won't cause deep-scrub info to be lost +# +function TEST_periodic_scrub_replicated() { + local dir=$1 + local poolname=psr_pool + local objname=POBJ + + setup $dir || return 1 + run_mon $dir a --osd_pool_default_size=2 || return 1 + run_mgr $dir x || return 1 + local ceph_osd_args="--osd-scrub-interval-randomize-ratio=0 --osd-deep-scrub-randomize-ratio=0 " + ceph_osd_args+="--osd_scrub_backoff_ratio=0" + run_osd $dir 0 $ceph_osd_args || return 1 + run_osd $dir 1 $ceph_osd_args || return 1 + create_rbd_pool || return 1 + wait_for_clean || return 1 + + create_pool $poolname 1 1 || return 1 + wait_for_clean || return 1 + + local osd=0 + add_something $dir $poolname $objname scrub || return 1 + local primary=$(get_primary $poolname $objname) + local pg=$(get_pg $poolname $objname) + + # Add deep-scrub only error + local payload=UVWXYZ + echo $payload > $dir/CORRUPT + # Uses $ceph_osd_args for osd restart + objectstore_tool $dir $osd $objname set-bytes $dir/CORRUPT || return 1 + + # No scrub information available, so expect failure + set -o pipefail + ! rados list-inconsistent-obj $pg | jq '.' || return 1 + set +o pipefail + + pg_deep_scrub $pg || return 1 + + # Make sure bad object found + rados list-inconsistent-obj $pg | jq '.' | grep -q $objname || return 1 + + flush_pg_stats + local last_scrub=$(get_last_scrub_stamp $pg) + # Fake a schedule scrub + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${primary}) \ + trigger_scrub $pg || return 1 + # Wait for schedule regular scrub + wait_for_scrub $pg "$last_scrub" + + # It needed to be upgraded + grep -q "Deep scrub errors, upgrading scrub to deep-scrub" $dir/osd.${primary}.log || return 1 + + # Bad object still known + rados list-inconsistent-obj $pg | jq '.' | grep -q $objname || return 1 + + # Can't upgrade with this set + ceph osd set nodeep-scrub + # Let map change propagate to OSDs + flush_pg_stats + sleep 5 + + # Fake a schedule scrub + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${primary}) \ + trigger_scrub $pg || return 1 + # Wait for schedule regular scrub + # to notice scrub and skip it + local found=false + for i in $(seq 14 -1 0) + do + sleep 1 + ! grep -q "Regular scrub skipped due to deep-scrub errors and nodeep-scrub set" $dir/osd.${primary}.log || { found=true ; break; } + echo Time left: $i seconds + done + test $found = "true" || return 1 + + # Bad object still known + rados list-inconsistent-obj $pg | jq '.' | grep -q $objname || return 1 + + flush_pg_stats + # Request a regular scrub and it will be done + pg_scrub $pg + grep -q "Regular scrub request, deep-scrub details will be lost" $dir/osd.${primary}.log || return 1 + + # deep-scrub error is no longer present + rados list-inconsistent-obj $pg | jq '.' | grep -qv $objname || return 1 +} + +function TEST_scrub_warning() { + local dir=$1 + local poolname=psr_pool + local objname=POBJ + local scrubs=5 + local deep_scrubs=5 + local i1_day=86400 + local i7_days=$(calc $i1_day \* 7) + local i14_days=$(calc $i1_day \* 14) + local overdue=0.5 + local conf_overdue_seconds=$(calc $i7_days + $i1_day + \( $i7_days \* $overdue \) ) + local pool_overdue_seconds=$(calc $i14_days + $i1_day + \( $i14_days \* $overdue \) ) + + setup $dir || return 1 + run_mon $dir a --osd_pool_default_size=1 || return 1 + run_mgr $dir x --mon_warn_pg_not_scrubbed_ratio=${overdue} --mon_warn_pg_not_deep_scrubbed_ratio=${overdue} || return 1 + run_osd $dir 0 $ceph_osd_args --osd_scrub_backoff_ratio=0 || return 1 + + for i in $(seq 1 $(expr $scrubs + $deep_scrubs)) + do + create_pool $poolname-$i 1 1 || return 1 + wait_for_clean || return 1 + if [ $i = "1" ]; + then + ceph osd pool set $poolname-$i scrub_max_interval $i14_days + fi + if [ $i = $(expr $scrubs + 1) ]; + then + ceph osd pool set $poolname-$i deep_scrub_interval $i14_days + fi + done + + # Only 1 osd + local primary=0 + + ceph osd set noscrub || return 1 + ceph osd set nodeep-scrub || return 1 + ceph config set global osd_scrub_interval_randomize_ratio 0 + ceph config set global osd_deep_scrub_randomize_ratio 0 + ceph config set global osd_scrub_max_interval ${i7_days} + ceph config set global osd_deep_scrub_interval ${i7_days} + + # Fake schedule scrubs + for i in $(seq 1 $scrubs) + do + if [ $i = "1" ]; + then + overdue_seconds=$pool_overdue_seconds + else + overdue_seconds=$conf_overdue_seconds + fi + CEPH_ARGS='' ceph daemon $(get_asok_path osd.${primary}) \ + trigger_scrub ${i}.0 $(expr ${overdue_seconds} + ${i}00) || return 1 + done + # Fake schedule deep scrubs + for i in $(seq $(expr $scrubs + 1) $(expr $scrubs + $deep_scrubs)) + do + if [ $i = "$(expr $scrubs + 1)" ]; + then + overdue_seconds=$pool_overdue_seconds + else + overdue_seconds=$conf_overdue_seconds + fi + CEPH_ARGS='' ceph daemon $(get_asok_path osd.${primary}) \ + trigger_deep_scrub ${i}.0 $(expr ${overdue_seconds} + ${i}00) || return 1 + done + flush_pg_stats + + ceph health + ceph health detail + ceph health | grep -q "$deep_scrubs pgs not deep-scrubbed in time" || return 1 + ceph health | grep -q "$scrubs pgs not scrubbed in time" || return 1 + COUNT=$(ceph health detail | grep "not scrubbed since" | wc -l) + if [ "$COUNT" != $scrubs ]; then + ceph health detail | grep "not scrubbed since" + return 1 + fi + COUNT=$(ceph health detail | grep "not deep-scrubbed since" | wc -l) + if [ "$COUNT" != $deep_scrubs ]; then + ceph health detail | grep "not deep-scrubbed since" + return 1 + fi + return 0 +} + +# +# Corrupt snapset in replicated pool +# +function TEST_corrupt_snapset_scrub_rep() { + local dir=$1 + local poolname=csr_pool + local total_objs=2 + + setup $dir || return 1 + run_mon $dir a --osd_pool_default_size=2 || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + run_osd $dir 1 || return 1 + create_rbd_pool || return 1 + wait_for_clean || return 1 + + create_pool foo 1 || return 1 + create_pool $poolname 1 1 || return 1 + wait_for_clean || return 1 + + for i in $(seq 1 $total_objs) ; do + objname=ROBJ${i} + add_something $dir $poolname $objname || return 1 + + rados --pool $poolname setomapheader $objname hdr-$objname || return 1 + rados --pool $poolname setomapval $objname key-$objname val-$objname || return 1 + done + + local pg=$(get_pg $poolname ROBJ0) + local primary=$(get_primary $poolname ROBJ0) + + rados -p $poolname mksnap snap1 + echo -n head_of_snapshot_data > $dir/change + + for i in $(seq 1 $total_objs) ; do + objname=ROBJ${i} + + # Alternate corruption between osd.0 and osd.1 + local osd=$(expr $i % 2) + + case $i in + 1) + rados --pool $poolname put $objname $dir/change + objectstore_tool $dir $osd --head $objname clear-snapset corrupt || return 1 + ;; + + 2) + rados --pool $poolname put $objname $dir/change + objectstore_tool $dir $osd --head $objname clear-snapset corrupt || return 1 + ;; + + esac + done + rm $dir/change + + pg_scrub $pg + + rados list-inconsistent-pg $poolname > $dir/json || return 1 + # Check pg count + test $(jq '. | length' $dir/json) = "1" || return 1 + # Check pgid + test $(jq -r '.[0]' $dir/json) = $pg || return 1 + + rados list-inconsistent-obj $pg > $dir/json || return 1 + + jq "$jqfilter" << EOF | jq '.inconsistents' | python -c "$sortkeys" > $dir/checkcsjson +{ + "epoch": 34, + "inconsistents": [ + { + "object": { + "name": "ROBJ1", + "nspace": "", + "locator": "", + "snap": "head", + "version": 8 + }, + "errors": [ + "snapset_inconsistency" + ], + "union_shard_errors": [], + "selected_object_info": { + "oid": { + "oid": "ROBJ1", + "key": "", + "snapid": -2, + "hash": 1454963827, + "max": 0, + "pool": 3, + "namespace": "" + }, + "version": "24'8", + "prior_version": "21'3", + "last_reqid": "client.4195.0:1", + "user_version": 8, + "size": 21, + "mtime": "2018-04-05 14:35:43.286117", + "local_mtime": "2018-04-05 14:35:43.288990", + "lost": 0, + "flags": [ + "dirty", + "omap", + "data_digest" + ], + "truncate_seq": 0, + "truncate_size": 0, + "data_digest": "0x53acb008", + "omap_digest": "0xffffffff", + "expected_object_size": 0, + "expected_write_size": 0, + "alloc_hint_flags": 0, + "manifest": { + "type": 0 + }, + "watchers": {} + }, + "shards": [ + { + "osd": 0, + "primary": false, + "errors": [], + "size": 21, + "snapset": { + "clones": [ + { + "overlap": "[]", + "size": 7, + "snap": 1, + "snaps": [ + 1 + ] + } + ], + "snap_context": { + "seq": 1, + "snaps": [ + 1 + ] + } + } + }, + { + "osd": 1, + "primary": true, + "errors": [], + "size": 21, + "snapset": { + "clones": [], + "snap_context": { + "seq": 0, + "snaps": [] + } + } + } + ] + }, + { + "object": { + "name": "ROBJ2", + "nspace": "", + "locator": "", + "snap": "head", + "version": 10 + }, + "errors": [ + "snapset_inconsistency" + ], + "union_shard_errors": [], + "selected_object_info": { + "oid": { + "oid": "ROBJ2", + "key": "", + "snapid": -2, + "hash": 2026323607, + "max": 0, + "pool": 3, + "namespace": "" + }, + "version": "28'10", + "prior_version": "23'6", + "last_reqid": "client.4223.0:1", + "user_version": 10, + "size": 21, + "mtime": "2018-04-05 14:35:48.326856", + "local_mtime": "2018-04-05 14:35:48.328097", + "lost": 0, + "flags": [ + "dirty", + "omap", + "data_digest" + ], + "truncate_seq": 0, + "truncate_size": 0, + "data_digest": "0x53acb008", + "omap_digest": "0xffffffff", + "expected_object_size": 0, + "expected_write_size": 0, + "alloc_hint_flags": 0, + "manifest": { + "type": 0 + }, + "watchers": {} + }, + "shards": [ + { + "osd": 0, + "primary": false, + "errors": [], + "size": 21, + "snapset": { + "clones": [], + "snap_context": { + "seq": 0, + "snaps": [] + } + } + }, + { + "osd": 1, + "primary": true, + "errors": [], + "size": 21, + "snapset": { + "clones": [ + { + "overlap": "[]", + "size": 7, + "snap": 1, + "snaps": [ + 1 + ] + } + ], + "snap_context": { + "seq": 1, + "snaps": [ + 1 + ] + } + } + } + ] + } + ] +} +EOF + + jq "$jqfilter" $dir/json | jq '.inconsistents' | python -c "$sortkeys" > $dir/csjson + multidiff $dir/checkcsjson $dir/csjson || test $getjson = "yes" || return 1 + if test $getjson = "yes" + then + jq '.' $dir/json > save6.json + fi + + if test "$LOCALRUN" = "yes" && which jsonschema > /dev/null; + then + jsonschema -i $dir/json $CEPH_ROOT/doc/rados/command/list-inconsistent-obj.json || return 1 + fi + + ERRORS=0 + declare -a err_strings + err_strings[0]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 soid [0-9]*:.*:::ROBJ1:head : snapset inconsistent" + err_strings[1]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 soid [0-9]*:.*:::ROBJ2:head : snapset inconsistent" + err_strings[2]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 [0-9]*:.*:::ROBJ1:1 : is an unexpected clone" + err_strings[3]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 scrub : stat mismatch, got 3/4 objects, 1/2 clones, 3/4 dirty, 3/4 omap, 0/0 pinned, 0/0 hit_set_archive, 0/0 whiteouts, 49/56 bytes, 0/0 manifest objects, 0/0 hit_set_archive bytes." + err_strings[4]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 scrub 0 missing, 2 inconsistent objects" + err_strings[5]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 scrub 4 errors" + + for err_string in "${err_strings[@]}" + do + if ! grep -q "$err_string" $dir/osd.${primary}.log + then + echo "Missing log message '$err_string'" + ERRORS=$(expr $ERRORS + 1) + fi + done + + if [ $ERRORS != "0" ]; + then + echo "TEST FAILED WITH $ERRORS ERRORS" + return 1 + fi + + ceph osd pool rm $poolname $poolname --yes-i-really-really-mean-it + teardown $dir || return 1 +} + +function TEST_request_scrub_priority() { + local dir=$1 + local poolname=psr_pool + local objname=POBJ + local OBJECTS=64 + local PGS=8 + + setup $dir || return 1 + run_mon $dir a --osd_pool_default_size=1 || return 1 + run_mgr $dir x || return 1 + local ceph_osd_args="--osd-scrub-interval-randomize-ratio=0 --osd-deep-scrub-randomize-ratio=0 " + ceph_osd_args+="--osd_scrub_backoff_ratio=0" + run_osd $dir 0 $ceph_osd_args || return 1 + + create_pool $poolname $PGS $PGS || return 1 + wait_for_clean || return 1 + + local osd=0 + add_something $dir $poolname $objname noscrub || return 1 + local primary=$(get_primary $poolname $objname) + local pg=$(get_pg $poolname $objname) + poolid=$(ceph osd dump | grep "^pool.*[']${poolname}[']" | awk '{ print $2 }') + + local otherpgs + for i in $(seq 0 $(expr $PGS - 1)) + do + opg="${poolid}.${i}" + if [ "$opg" = "$pg" ]; then + continue + fi + otherpgs="${otherpgs}${opg} " + local other_last_scrub=$(get_last_scrub_stamp $pg) + # Fake a schedule scrub + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${primary}) \ + trigger_scrub $opg || return 1 + done + + sleep 15 + flush_pg_stats + + # Request a regular scrub and it will be done + local last_scrub=$(get_last_scrub_stamp $pg) + ceph pg scrub $pg + + ceph osd unset noscrub || return 1 + ceph osd unset nodeep-scrub || return 1 + + wait_for_scrub $pg "$last_scrub" + + for opg in $otherpgs $pg + do + wait_for_scrub $opg "$other_last_scrub" + done + + # Verify that the requested scrub ran first + grep "log_channel.*scrub ok" $dir/osd.${primary}.log | head -1 | sed 's/.*[[]DBG[]]//' | grep -q $pg || return 1 + + return 0 +} + + +main osd-scrub-repair "$@" + +# Local Variables: +# compile-command: "cd build ; make -j4 && \ +# ../qa/run-standalone.sh osd-scrub-repair.sh" +# End: diff --git a/qa/standalone/scrub/osd-scrub-snaps.sh b/qa/standalone/scrub/osd-scrub-snaps.sh new file mode 100755 index 00000000..04c3f4a5 --- /dev/null +++ b/qa/standalone/scrub/osd-scrub-snaps.sh @@ -0,0 +1,1274 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2015 Red Hat +# +# Author: David Zafman +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Library Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library Public License for more details. +# +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh + +# Test development and debugging +# Set to "yes" in order to ignore diff errors and save results to update test +getjson="no" + +jqfilter='.inconsistents' +sortkeys='import json; import sys ; JSON=sys.stdin.read() ; ud = json.loads(JSON) ; print json.dumps(ud, sort_keys=True, indent=2)' + +function run() { + local dir=$1 + shift + + export CEPH_MON="127.0.0.1:7121" # git grep '\<7121\>' : there must be only one + export CEPH_ARGS + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " + CEPH_ARGS+="--mon-host=$CEPH_MON " + + export -n CEPH_CLI_TEST_DUP_COMMAND + local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} + for func in $funcs ; do + setup $dir || return 1 + $func $dir || return 1 + teardown $dir || return 1 + done +} + +function create_scenario() { + local dir=$1 + local poolname=$2 + local TESTDATA=$3 + local osd=$4 + + SNAP=1 + rados -p $poolname mksnap snap${SNAP} + dd if=/dev/urandom of=$TESTDATA bs=256 count=${SNAP} + rados -p $poolname put obj1 $TESTDATA + rados -p $poolname put obj5 $TESTDATA + rados -p $poolname put obj3 $TESTDATA + for i in `seq 6 14` + do rados -p $poolname put obj${i} $TESTDATA + done + + SNAP=2 + rados -p $poolname mksnap snap${SNAP} + dd if=/dev/urandom of=$TESTDATA bs=256 count=${SNAP} + rados -p $poolname put obj5 $TESTDATA + + SNAP=3 + rados -p $poolname mksnap snap${SNAP} + dd if=/dev/urandom of=$TESTDATA bs=256 count=${SNAP} + rados -p $poolname put obj3 $TESTDATA + + SNAP=4 + rados -p $poolname mksnap snap${SNAP} + dd if=/dev/urandom of=$TESTDATA bs=256 count=${SNAP} + rados -p $poolname put obj5 $TESTDATA + rados -p $poolname put obj2 $TESTDATA + + SNAP=5 + rados -p $poolname mksnap snap${SNAP} + SNAP=6 + rados -p $poolname mksnap snap${SNAP} + dd if=/dev/urandom of=$TESTDATA bs=256 count=${SNAP} + rados -p $poolname put obj5 $TESTDATA + + SNAP=7 + rados -p $poolname mksnap snap${SNAP} + + rados -p $poolname rm obj4 + rados -p $poolname rm obj16 + rados -p $poolname rm obj2 + + kill_daemons $dir TERM osd || return 1 + + # Don't need to use ceph_objectstore_tool() function because osd stopped + + JSON="$(ceph-objectstore-tool --data-path $dir/${osd} --head --op list obj1)" + ceph-objectstore-tool --data-path $dir/${osd} "$JSON" --force remove || return 1 + + JSON="$(ceph-objectstore-tool --data-path $dir/${osd} --op list obj5 | grep \"snapid\":2)" + ceph-objectstore-tool --data-path $dir/${osd} "$JSON" remove || return 1 + + JSON="$(ceph-objectstore-tool --data-path $dir/${osd} --op list obj5 | grep \"snapid\":1)" + OBJ5SAVE="$JSON" + # Starts with a snapmap + ceph-kvstore-tool bluestore-kv $dir/${osd} list 2> /dev/null > $dir/drk.log + grep "^M.*MAP_.*[.]1[.]obj5[.][.]$" $dir/drk.log || return 1 + ceph-objectstore-tool --data-path $dir/${osd} --rmtype nosnapmap "$JSON" remove || return 1 + # Check that snapmap is stil there + ceph-kvstore-tool bluestore-kv $dir/${osd} list 2> /dev/null > $dir/drk.log + grep "^M.*MAP_.*[.]1[.]obj5[.][.]$" $dir/drk.log || return 1 + rm -f $dir/drk.log + + JSON="$(ceph-objectstore-tool --data-path $dir/${osd} --op list obj5 | grep \"snapid\":4)" + dd if=/dev/urandom of=$TESTDATA bs=256 count=18 + ceph-objectstore-tool --data-path $dir/${osd} "$JSON" set-bytes $TESTDATA || return 1 + + JSON="$(ceph-objectstore-tool --data-path $dir/${osd} --head --op list obj3)" + dd if=/dev/urandom of=$TESTDATA bs=256 count=15 + ceph-objectstore-tool --data-path $dir/${osd} "$JSON" set-bytes $TESTDATA || return 1 + + JSON="$(ceph-objectstore-tool --data-path $dir/${osd} --op list obj4 | grep \"snapid\":7)" + ceph-objectstore-tool --data-path $dir/${osd} "$JSON" remove || return 1 + + # Starts with a snapmap + ceph-kvstore-tool bluestore-kv $dir/${osd} list 2> /dev/null > $dir/drk.log + grep "^M.*MAP_.*[.]7[.]obj16[.][.]$" $dir/drk.log || return 1 + JSON="$(ceph-objectstore-tool --data-path $dir/${osd} --op list obj16 | grep \"snapid\":7)" + ceph-objectstore-tool --data-path $dir/${osd} --rmtype snapmap "$JSON" remove || return 1 + # Check that snapmap is now removed + ceph-kvstore-tool bluestore-kv $dir/${osd} list 2> /dev/null > $dir/drk.log + ! grep "^M.*MAP_.*[.]7[.]obj16[.][.]$" $dir/drk.log || return 1 + rm -f $dir/drk.log + + JSON="$(ceph-objectstore-tool --data-path $dir/${osd} --head --op list obj2)" + ceph-objectstore-tool --data-path $dir/${osd} "$JSON" rm-attr snapset || return 1 + + # Create a clone which isn't in snapset and doesn't have object info + JSON="$(echo "$OBJ5SAVE" | sed s/snapid\":1/snapid\":7/)" + dd if=/dev/urandom of=$TESTDATA bs=256 count=7 + ceph-objectstore-tool --data-path $dir/${osd} "$JSON" set-bytes $TESTDATA || return 1 + + JSON="$(ceph-objectstore-tool --data-path $dir/${osd} --head --op list obj6)" + ceph-objectstore-tool --data-path $dir/${osd} "$JSON" clear-snapset || return 1 + JSON="$(ceph-objectstore-tool --data-path $dir/${osd} --head --op list obj7)" + ceph-objectstore-tool --data-path $dir/${osd} "$JSON" clear-snapset corrupt || return 1 + JSON="$(ceph-objectstore-tool --data-path $dir/${osd} --head --op list obj8)" + ceph-objectstore-tool --data-path $dir/${osd} "$JSON" clear-snapset seq || return 1 + JSON="$(ceph-objectstore-tool --data-path $dir/${osd} --head --op list obj9)" + ceph-objectstore-tool --data-path $dir/${osd} "$JSON" clear-snapset clone_size || return 1 + JSON="$(ceph-objectstore-tool --data-path $dir/${osd} --head --op list obj10)" + ceph-objectstore-tool --data-path $dir/${osd} "$JSON" clear-snapset clone_overlap || return 1 + JSON="$(ceph-objectstore-tool --data-path $dir/${osd} --head --op list obj11)" + ceph-objectstore-tool --data-path $dir/${osd} "$JSON" clear-snapset clones || return 1 + JSON="$(ceph-objectstore-tool --data-path $dir/${osd} --head --op list obj12)" + ceph-objectstore-tool --data-path $dir/${osd} "$JSON" clear-snapset head || return 1 + JSON="$(ceph-objectstore-tool --data-path $dir/${osd} --head --op list obj13)" + ceph-objectstore-tool --data-path $dir/${osd} "$JSON" clear-snapset snaps || return 1 + JSON="$(ceph-objectstore-tool --data-path $dir/${osd} --head --op list obj14)" + ceph-objectstore-tool --data-path $dir/${osd} "$JSON" clear-snapset size || return 1 + + echo "garbage" > $dir/bad + JSON="$(ceph-objectstore-tool --data-path $dir/${osd} --head --op list obj15)" + ceph-objectstore-tool --data-path $dir/${osd} "$JSON" set-attr snapset $dir/bad || return 1 + rm -f $dir/bad + return 0 +} + +function TEST_scrub_snaps() { + local dir=$1 + local poolname=test + local OBJS=16 + local OSDS=1 + + TESTDATA="testdata.$$" + + run_mon $dir a --osd_pool_default_size=$OSDS || return 1 + run_mgr $dir x || return 1 + for osd in $(seq 0 $(expr $OSDS - 1)) + do + run_osd $dir $osd || return 1 + done + + # All scrubs done manually. Don't want any unexpected scheduled scrubs. + ceph osd set noscrub || return 1 + ceph osd set nodeep-scrub || return 1 + + # Create a pool with a single pg + create_pool $poolname 1 1 + wait_for_clean || return 1 + poolid=$(ceph osd dump | grep "^pool.*[']test[']" | awk '{ print $2 }') + + dd if=/dev/urandom of=$TESTDATA bs=1032 count=1 + for i in `seq 1 $OBJS` + do + rados -p $poolname put obj${i} $TESTDATA + done + + local primary=$(get_primary $poolname obj1) + + create_scenario $dir $poolname $TESTDATA $primary || return 1 + + rm -f $TESTDATA + + for osd in $(seq 0 $(expr $OSDS - 1)) + do + activate_osd $dir $osd || return 1 + done + + wait_for_clean || return 1 + + local pgid="${poolid}.0" + if ! pg_scrub "$pgid" ; then + return 1 + fi + + test "$(grep "_scan_snaps start" $dir/osd.${primary}.log | wc -l)" = "2" || return 1 + + rados list-inconsistent-pg $poolname > $dir/json || return 1 + # Check pg count + test $(jq '. | length' $dir/json) = "1" || return 1 + # Check pgid + test $(jq -r '.[0]' $dir/json) = $pgid || return 1 + + rados list-inconsistent-obj $pgid > $dir/json || return 1 + + # The injected snapshot errors with a single copy pool doesn't + # see object errors because all the issues are detected by + # comparing copies. + jq "$jqfilter" << EOF | python -c "$sortkeys" > $dir/checkcsjson +{ + "epoch": 17, + "inconsistents": [] +} +EOF + + jq "$jqfilter" $dir/json | python -c "$sortkeys" > $dir/csjson + multidiff $dir/checkcsjson $dir/csjson || test $getjson = "yes" || return 1 + + rados list-inconsistent-snapset $pgid > $dir/json || return 1 + + jq "$jqfilter" << EOF | python -c "$sortkeys" > $dir/checkcsjson +{ + "inconsistents": [ + { + "errors": [ + "headless" + ], + "snap": 1, + "locator": "", + "nspace": "", + "name": "obj1" + }, + { + "errors": [ + "size_mismatch" + ], + "snap": 1, + "locator": "", + "nspace": "", + "name": "obj10" + }, + { + "errors": [ + "headless" + ], + "snap": 1, + "locator": "", + "nspace": "", + "name": "obj11" + }, + { + "errors": [ + "size_mismatch" + ], + "snap": 1, + "locator": "", + "nspace": "", + "name": "obj14" + }, + { + "errors": [ + "headless" + ], + "snap": 1, + "locator": "", + "nspace": "", + "name": "obj6" + }, + { + "errors": [ + "headless" + ], + "snap": 1, + "locator": "", + "nspace": "", + "name": "obj7" + }, + { + "errors": [ + "size_mismatch" + ], + "snap": 1, + "locator": "", + "nspace": "", + "name": "obj9" + }, + { + "errors": [ + "headless" + ], + "snap": 4, + "locator": "", + "nspace": "", + "name": "obj2" + }, + { + "errors": [ + "size_mismatch" + ], + "snap": 4, + "locator": "", + "nspace": "", + "name": "obj5" + }, + { + "errors": [ + "headless" + ], + "snap": 7, + "locator": "", + "nspace": "", + "name": "obj2" + }, + { + "errors": [ + "info_missing", + "headless" + ], + "snap": 7, + "locator": "", + "nspace": "", + "name": "obj5" + }, + { + "name": "obj10", + "nspace": "", + "locator": "", + "snap": "head", + "snapset": { + "snap_context": { + "seq": 1, + "snaps": [ + 1 + ] + }, + "clones": [ + { + "snap": 1, + "size": 1032, + "overlap": "????", + "snaps": [ + 1 + ] + } + ] + }, + "errors": [] + }, + { + "extra clones": [ + 1 + ], + "errors": [ + "extra_clones" + ], + "snap": "head", + "locator": "", + "nspace": "", + "name": "obj11", + "snapset": { + "snap_context": { + "seq": 1, + "snaps": [ + 1 + ] + }, + "clones": [] + } + }, + { + "name": "obj14", + "nspace": "", + "locator": "", + "snap": "head", + "snapset": { + "snap_context": { + "seq": 1, + "snaps": [ + 1 + ] + }, + "clones": [ + { + "snap": 1, + "size": 1033, + "overlap": "[]", + "snaps": [ + 1 + ] + } + ] + }, + "errors": [] + }, + { + "errors": [ + "snapset_corrupted" + ], + "snap": "head", + "locator": "", + "nspace": "", + "name": "obj15" + }, + { + "extra clones": [ + 7, + 4 + ], + "errors": [ + "snapset_missing", + "extra_clones" + ], + "snap": "head", + "locator": "", + "nspace": "", + "name": "obj2" + }, + { + "errors": [ + "size_mismatch" + ], + "snap": "head", + "locator": "", + "nspace": "", + "name": "obj3", + "snapset": { + "snap_context": { + "seq": 3, + "snaps": [ + 3, + 2, + 1 + ] + }, + "clones": [ + { + "snap": 1, + "size": 1032, + "overlap": "[]", + "snaps": [ + 1 + ] + }, + { + "snap": 3, + "size": 256, + "overlap": "[]", + "snaps": [ + 3, + 2 + ] + } + ] + } + }, + { + "missing": [ + 7 + ], + "errors": [ + "clone_missing" + ], + "snap": "head", + "locator": "", + "nspace": "", + "name": "obj4", + "snapset": { + "snap_context": { + "seq": 7, + "snaps": [ + 7, + 6, + 5, + 4, + 3, + 2, + 1 + ] + }, + "clones": [ + { + "snap": 7, + "size": 1032, + "overlap": "[]", + "snaps": [ + 7, + 6, + 5, + 4, + 3, + 2, + 1 + ] + } + ] + } + }, + { + "missing": [ + 2, + 1 + ], + "extra clones": [ + 7 + ], + "errors": [ + "extra_clones", + "clone_missing" + ], + "snap": "head", + "locator": "", + "nspace": "", + "name": "obj5", + "snapset": { + "snap_context": { + "seq": 6, + "snaps": [ + 6, + 5, + 4, + 3, + 2, + 1 + ] + }, + "clones": [ + { + "snap": 1, + "size": 1032, + "overlap": "[]", + "snaps": [ + 1 + ] + }, + { + "snap": 2, + "size": 256, + "overlap": "[]", + "snaps": [ + 2 + ] + }, + { + "snap": 4, + "size": 512, + "overlap": "[]", + "snaps": [ + 4, + 3 + ] + }, + { + "snap": 6, + "size": 1024, + "overlap": "[]", + "snaps": [ + 6, + 5 + ] + } + ] + } + }, + { + "extra clones": [ + 1 + ], + "errors": [ + "extra_clones" + ], + "snap": "head", + "locator": "", + "nspace": "", + "name": "obj6", + "snapset": { + "snap_context": { + "seq": 1, + "snaps": [ + 1 + ] + }, + "clones": [] + } + }, + { + "extra clones": [ + 1 + ], + "errors": [ + "extra_clones" + ], + "snap": "head", + "locator": "", + "nspace": "", + "name": "obj7", + "snapset": { + "snap_context": { + "seq": 0, + "snaps": [] + }, + "clones": [] + } + }, + { + "errors": [ + "snapset_error" + ], + "snap": "head", + "locator": "", + "nspace": "", + "name": "obj8", + "snapset": { + "snap_context": { + "seq": 0, + "snaps": [ + 1 + ] + }, + "clones": [ + { + "snap": 1, + "size": 1032, + "overlap": "[]", + "snaps": [ + 1 + ] + } + ] + } + }, + { + "name": "obj9", + "nspace": "", + "locator": "", + "snap": "head", + "snapset": { + "snap_context": { + "seq": 1, + "snaps": [ + 1 + ] + }, + "clones": [ + { + "snap": 1, + "size": "????", + "overlap": "[]", + "snaps": [ + 1 + ] + } + ] + }, + "errors": [] + } + ], + "epoch": 20 +} +EOF + + jq "$jqfilter" $dir/json | python -c "$sortkeys" > $dir/csjson + multidiff $dir/checkcsjson $dir/csjson || test $getjson = "yes" || return 1 + if test $getjson = "yes" + then + jq '.' $dir/json > save1.json + fi + + if test "$LOCALRUN" = "yes" && which jsonschema > /dev/null; + then + jsonschema -i $dir/json $CEPH_ROOT/doc/rados/command/list-inconsistent-snap.json || return 1 + fi + + pidfiles=$(find $dir 2>/dev/null | grep 'osd[^/]*\.pid') + pids="" + for pidfile in ${pidfiles} + do + pids+="$(cat $pidfile) " + done + + ERRORS=0 + + for i in `seq 1 7` + do + rados -p $poolname rmsnap snap$i + done + sleep 5 + local -i loop=0 + while ceph pg dump pgs | grep -q snaptrim; + do + if ceph pg dump pgs | grep -q snaptrim_error; + then + break + fi + sleep 2 + loop+=1 + if (( $loop >= 10 )) ; then + ERRORS=$(expr $ERRORS + 1) + break + fi + done + ceph pg dump pgs + + for pid in $pids + do + if ! kill -0 $pid + then + echo "OSD Crash occurred" + ERRORS=$(expr $ERRORS + 1) + fi + done + + kill_daemons $dir || return 1 + + declare -a err_strings + err_strings[0]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*::obj10:.* : is missing in clone_overlap" + err_strings[1]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*::obj5:7 : no '_' attr" + err_strings[2]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*::obj5:7 : is an unexpected clone" + err_strings[3]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*::obj5:4 : on disk size [(]4608[)] does not match object info size [(]512[)] adjusted for ondisk to [(]512[)]" + err_strings[4]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj5:head : expected clone .*:::obj5:2" + err_strings[5]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj5:head : expected clone .*:::obj5:1" + err_strings[6]="log_channel[(]cluster[)] log [[]INF[]] : scrub [0-9]*[.]0 .*:::obj5:head : 2 missing clone[(]s[)]" + err_strings[7]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj8:head : snaps.seq not set" + err_strings[8]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj7:1 : is an unexpected clone" + err_strings[9]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj3:head : on disk size [(]3840[)] does not match object info size [(]768[)] adjusted for ondisk to [(]768[)]" + err_strings[10]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj6:1 : is an unexpected clone" + err_strings[11]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj2:head : no 'snapset' attr" + err_strings[12]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj2:7 : clone ignored due to missing snapset" + err_strings[13]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj2:4 : clone ignored due to missing snapset" + err_strings[14]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj4:head : expected clone .*:::obj4:7" + err_strings[15]="log_channel[(]cluster[)] log [[]INF[]] : scrub [0-9]*[.]0 .*:::obj4:head : 1 missing clone[(]s[)]" + err_strings[16]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj1:1 : is an unexpected clone" + err_strings[17]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj9:1 : is missing in clone_size" + err_strings[18]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj11:1 : is an unexpected clone" + err_strings[19]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj14:1 : size 1032 != clone_size 1033" + err_strings[20]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 scrub 20 errors" + err_strings[21]="log_channel[(]cluster[)] log [[]ERR[]] : scrub [0-9]*[.]0 .*:::obj15:head : can't decode 'snapset' attr buffer" + err_strings[22]="log_channel[(]cluster[)] log [[]ERR[]] : osd[.][0-9]* found snap mapper error on pg 1.0 oid 1:461f8b5e:::obj16:7 snaps missing in mapper, should be: 1,2,3,4,5,6,7 was r -2...repaired" + + for err_string in "${err_strings[@]}" + do + if ! grep "$err_string" $dir/osd.${primary}.log > /dev/null; + then + echo "Missing log message '$err_string'" + ERRORS=$(expr $ERRORS + 1) + fi + done + + if [ $ERRORS != "0" ]; + then + echo "TEST FAILED WITH $ERRORS ERRORS" + return 1 + fi + + echo "TEST PASSED" + return 0 +} + +function _scrub_snaps_multi() { + local dir=$1 + local poolname=test + local OBJS=16 + local OSDS=2 + local which=$2 + + TESTDATA="testdata.$$" + + run_mon $dir a --osd_pool_default_size=$OSDS || return 1 + run_mgr $dir x || return 1 + for osd in $(seq 0 $(expr $OSDS - 1)) + do + run_osd $dir $osd || return 1 + done + + # All scrubs done manually. Don't want any unexpected scheduled scrubs. + ceph osd set noscrub || return 1 + ceph osd set nodeep-scrub || return 1 + + # Create a pool with a single pg + create_pool $poolname 1 1 + wait_for_clean || return 1 + poolid=$(ceph osd dump | grep "^pool.*[']test[']" | awk '{ print $2 }') + + dd if=/dev/urandom of=$TESTDATA bs=1032 count=1 + for i in `seq 1 $OBJS` + do + rados -p $poolname put obj${i} $TESTDATA + done + + local primary=$(get_primary $poolname obj1) + local replica=$(get_not_primary $poolname obj1) + + eval create_scenario $dir $poolname $TESTDATA \$$which || return 1 + + rm -f $TESTDATA + + for osd in $(seq 0 $(expr $OSDS - 1)) + do + activate_osd $dir $osd || return 1 + done + + wait_for_clean || return 1 + + local pgid="${poolid}.0" + if ! pg_scrub "$pgid" ; then + return 1 + fi + + test "$(grep "_scan_snaps start" $dir/osd.${primary}.log | wc -l)" -gt "3" || return 1 + test "$(grep "_scan_snaps start" $dir/osd.${replica}.log | wc -l)" -gt "3" || return 1 + + rados list-inconsistent-pg $poolname > $dir/json || return 1 + # Check pg count + test $(jq '. | length' $dir/json) = "1" || return 1 + # Check pgid + test $(jq -r '.[0]' $dir/json) = $pgid || return 1 + + rados list-inconsistent-obj $pgid --format=json-pretty + + rados list-inconsistent-snapset $pgid > $dir/json || return 1 + + # Since all of the snapshots on the primary is consistent there are no errors here + if [ $which = "replica" ]; + then + scruberrors="20" + jq "$jqfilter" << EOF | python -c "$sortkeys" > $dir/checkcsjson +{ + "epoch": 23, + "inconsistents": [] +} +EOF + +else + scruberrors="30" + jq "$jqfilter" << EOF | python -c "$sortkeys" > $dir/checkcsjson +{ + "epoch": 23, + "inconsistents": [ + { + "name": "obj10", + "nspace": "", + "locator": "", + "snap": 1, + "errors": [ + "size_mismatch" + ] + }, + { + "name": "obj11", + "nspace": "", + "locator": "", + "snap": 1, + "errors": [ + "headless" + ] + }, + { + "name": "obj14", + "nspace": "", + "locator": "", + "snap": 1, + "errors": [ + "size_mismatch" + ] + }, + { + "name": "obj6", + "nspace": "", + "locator": "", + "snap": 1, + "errors": [ + "headless" + ] + }, + { + "name": "obj7", + "nspace": "", + "locator": "", + "snap": 1, + "errors": [ + "headless" + ] + }, + { + "name": "obj9", + "nspace": "", + "locator": "", + "snap": 1, + "errors": [ + "size_mismatch" + ] + }, + { + "name": "obj5", + "nspace": "", + "locator": "", + "snap": 7, + "errors": [ + "info_missing", + "headless" + ] + }, + { + "name": "obj10", + "nspace": "", + "locator": "", + "snap": "head", + "snapset": { + "snap_context": { + "seq": 1, + "snaps": [ + 1 + ] + }, + "clones": [ + { + "snap": 1, + "size": 1032, + "overlap": "????", + "snaps": [ + 1 + ] + } + ] + }, + "errors": [] + }, + { + "name": "obj11", + "nspace": "", + "locator": "", + "snap": "head", + "snapset": { + "snap_context": { + "seq": 1, + "snaps": [ + 1 + ] + }, + "clones": [] + }, + "errors": [ + "extra_clones" + ], + "extra clones": [ + 1 + ] + }, + { + "name": "obj14", + "nspace": "", + "locator": "", + "snap": "head", + "snapset": { + "snap_context": { + "seq": 1, + "snaps": [ + 1 + ] + }, + "clones": [ + { + "snap": 1, + "size": 1033, + "overlap": "[]", + "snaps": [ + 1 + ] + } + ] + }, + "errors": [] + }, + { + "name": "obj5", + "nspace": "", + "locator": "", + "snap": "head", + "snapset": { + "snap_context": { + "seq": 6, + "snaps": [ + 6, + 5, + 4, + 3, + 2, + 1 + ] + }, + "clones": [ + { + "snap": 1, + "size": 1032, + "overlap": "[]", + "snaps": [ + 1 + ] + }, + { + "snap": 2, + "size": 256, + "overlap": "[]", + "snaps": [ + 2 + ] + }, + { + "snap": 4, + "size": 512, + "overlap": "[]", + "snaps": [ + 4, + 3 + ] + }, + { + "snap": 6, + "size": 1024, + "overlap": "[]", + "snaps": [ + 6, + 5 + ] + } + ] + }, + "errors": [ + "extra_clones" + ], + "extra clones": [ + 7 + ] + }, + { + "name": "obj6", + "nspace": "", + "locator": "", + "snap": "head", + "snapset": { + "snap_context": { + "seq": 1, + "snaps": [ + 1 + ] + }, + "clones": [] + }, + "errors": [ + "extra_clones" + ], + "extra clones": [ + 1 + ] + }, + { + "name": "obj7", + "nspace": "", + "locator": "", + "snap": "head", + "snapset": { + "snap_context": { + "seq": 0, + "snaps": [] + }, + "clones": [] + }, + "errors": [ + "extra_clones" + ], + "extra clones": [ + 1 + ] + }, + { + "name": "obj8", + "nspace": "", + "locator": "", + "snap": "head", + "snapset": { + "snap_context": { + "seq": 0, + "snaps": [ + 1 + ] + }, + "clones": [ + { + "snap": 1, + "size": 1032, + "overlap": "[]", + "snaps": [ + 1 + ] + } + ] + }, + "errors": [ + "snapset_error" + ] + }, + { + "name": "obj9", + "nspace": "", + "locator": "", + "snap": "head", + "snapset": { + "snap_context": { + "seq": 1, + "snaps": [ + 1 + ] + }, + "clones": [ + { + "snap": 1, + "size": "????", + "overlap": "[]", + "snaps": [ + 1 + ] + } + ] + }, + "errors": [] + } + ] +} +EOF +fi + + jq "$jqfilter" $dir/json | python -c "$sortkeys" > $dir/csjson + multidiff $dir/checkcsjson $dir/csjson || test $getjson = "yes" || return 1 + if test $getjson = "yes" + then + jq '.' $dir/json > save1.json + fi + + if test "$LOCALRUN" = "yes" && which jsonschema > /dev/null; + then + jsonschema -i $dir/json $CEPH_ROOT/doc/rados/command/list-inconsistent-snap.json || return 1 + fi + + pidfiles=$(find $dir 2>/dev/null | grep 'osd[^/]*\.pid') + pids="" + for pidfile in ${pidfiles} + do + pids+="$(cat $pidfile) " + done + + ERRORS=0 + + # When removing snapshots with a corrupt replica, it crashes. + # See http://tracker.ceph.com/issues/23875 + if [ $which = "primary" ]; + then + for i in `seq 1 7` + do + rados -p $poolname rmsnap snap$i + done + sleep 5 + local -i loop=0 + while ceph pg dump pgs | grep -q snaptrim; + do + if ceph pg dump pgs | grep -q snaptrim_error; + then + break + fi + sleep 2 + loop+=1 + if (( $loop >= 10 )) ; then + ERRORS=$(expr $ERRORS + 1) + break + fi + done + fi + ceph pg dump pgs + + for pid in $pids + do + if ! kill -0 $pid + then + echo "OSD Crash occurred" + ERRORS=$(expr $ERRORS + 1) + fi + done + + kill_daemons $dir || return 1 + + declare -a err_strings + err_strings[0]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard [0-1] .*:::obj4:7 : missing" + err_strings[1]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard [0-1] soid .*:::obj3:head : size 3840 != size 768 from auth oi" + err_strings[2]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard [0-1] .*:::obj5:1 : missing" + err_strings[3]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard [0-1] .*:::obj5:2 : missing" + err_strings[4]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard [0-1] soid .*:::obj5:4 : size 4608 != size 512 from auth oi" + err_strings[5]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 soid .*:::obj5:7 : failed to pick suitable object info" + err_strings[6]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 shard [0-1] .*:::obj1:head : missing" + err_strings[7]="log_channel[(]cluster[)] log [[]ERR[]] : [0-9]*[.]0 scrub ${scruberrors} errors" + + for err_string in "${err_strings[@]}" + do + if ! grep "$err_string" $dir/osd.${primary}.log > /dev/null; + then + echo "Missing log message '$err_string'" + ERRORS=$(expr $ERRORS + 1) + fi + done + + # Check replica specific messages + declare -a rep_err_strings + osd=$(eval echo \$$which) + rep_err_strings[0]="log_channel[(]cluster[)] log [[]ERR[]] : osd[.][0-9]* found snap mapper error on pg 1.0 oid 1:461f8b5e:::obj16:7 snaps missing in mapper, should be: 1,2,3,4,5,6,7 was r -2...repaired" + for err_string in "${rep_err_strings[@]}" + do + if ! grep "$err_string" $dir/osd.${osd}.log > /dev/null; + then + echo "Missing log message '$err_string'" + ERRORS=$(expr $ERRORS + 1) + fi + done + + if [ $ERRORS != "0" ]; + then + echo "TEST FAILED WITH $ERRORS ERRORS" + return 1 + fi + + echo "TEST PASSED" + return 0 +} + +function TEST_scrub_snaps_replica() { + local dir=$1 + ORIG_ARGS=$CEPH_ARGS + CEPH_ARGS+=" --osd_scrub_chunk_min=3 --osd_scrub_chunk_max=3" + _scrub_snaps_multi $dir replica + err=$? + CEPH_ARGS=$ORIG_ARGS + return $err +} + +function TEST_scrub_snaps_primary() { + local dir=$1 + ORIG_ARGS=$CEPH_ARGS + CEPH_ARGS+=" --osd_scrub_chunk_min=3 --osd_scrub_chunk_max=3" + _scrub_snaps_multi $dir primary + err=$? + CEPH_ARGS=$ORIG_ARGS + return $err +} + +main osd-scrub-snaps "$@" + +# Local Variables: +# compile-command: "cd build ; make -j4 && \ +# ../qa/run-standalone.sh osd-scrub-snaps.sh" +# End: diff --git a/qa/standalone/scrub/osd-scrub-test.sh b/qa/standalone/scrub/osd-scrub-test.sh new file mode 100755 index 00000000..7887969a --- /dev/null +++ b/qa/standalone/scrub/osd-scrub-test.sh @@ -0,0 +1,319 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2018 Red Hat +# +# Author: David Zafman +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Library Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library Public License for more details. +# +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh + +function run() { + local dir=$1 + shift + + export CEPH_MON="127.0.0.1:7138" # git grep '\<7138\>' : there must be only one + export CEPH_ARGS + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " + CEPH_ARGS+="--mon-host=$CEPH_MON " + + export -n CEPH_CLI_TEST_DUP_COMMAND + local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} + for func in $funcs ; do + $func $dir || return 1 + done +} + +function TEST_scrub_test() { + local dir=$1 + local poolname=test + local OSDS=3 + local objects=15 + + TESTDATA="testdata.$$" + + setup $dir || return 1 + run_mon $dir a --osd_pool_default_size=3 || return 1 + run_mgr $dir x || return 1 + for osd in $(seq 0 $(expr $OSDS - 1)) + do + run_osd $dir $osd || return 1 + done + + # Create a pool with a single pg + create_pool $poolname 1 1 + wait_for_clean || return 1 + poolid=$(ceph osd dump | grep "^pool.*[']${poolname}[']" | awk '{ print $2 }') + + dd if=/dev/urandom of=$TESTDATA bs=1032 count=1 + for i in `seq 1 $objects` + do + rados -p $poolname put obj${i} $TESTDATA + done + rm -f $TESTDATA + + local primary=$(get_primary $poolname obj1) + local otherosd=$(get_not_primary $poolname obj1) + if [ "$otherosd" = "2" ]; + then + local anotherosd="0" + else + local anotherosd="2" + fi + + objectstore_tool $dir $anotherosd obj1 set-bytes /etc/fstab + + local pgid="${poolid}.0" + pg_deep_scrub "$pgid" || return 1 + + ceph pg dump pgs | grep ^${pgid} | grep -q -- +inconsistent || return 1 + test "$(ceph pg $pgid query | jq '.info.stats.stat_sum.num_scrub_errors')" = "2" || return 1 + + ceph osd out $primary + wait_for_clean || return 1 + + pg_deep_scrub "$pgid" || return 1 + + test "$(ceph pg $pgid query | jq '.info.stats.stat_sum.num_scrub_errors')" = "2" || return 1 + test "$(ceph pg $pgid query | jq '.peer_info[0].stats.stat_sum.num_scrub_errors')" = "2" || return 1 + ceph pg dump pgs | grep ^${pgid} | grep -q -- +inconsistent || return 1 + + ceph osd in $primary + wait_for_clean || return 1 + + repair "$pgid" || return 1 + wait_for_clean || return 1 + + # This sets up the test after we've repaired with previous primary has old value + test "$(ceph pg $pgid query | jq '.peer_info[0].stats.stat_sum.num_scrub_errors')" = "2" || return 1 + ceph pg dump pgs | grep ^${pgid} | grep -vq -- +inconsistent || return 1 + + ceph osd out $primary + wait_for_clean || return 1 + + test "$(ceph pg $pgid query | jq '.info.stats.stat_sum.num_scrub_errors')" = "0" || return 1 + test "$(ceph pg $pgid query | jq '.peer_info[0].stats.stat_sum.num_scrub_errors')" = "0" || return 1 + test "$(ceph pg $pgid query | jq '.peer_info[1].stats.stat_sum.num_scrub_errors')" = "0" || return 1 + ceph pg dump pgs | grep ^${pgid} | grep -vq -- +inconsistent || return 1 + + teardown $dir || return 1 +} + +# Grab year-month-day +DATESED="s/\([0-9]*-[0-9]*-[0-9]*\).*/\1/" +DATEFORMAT="%Y-%m-%d" + +function check_dump_scrubs() { + local primary=$1 + local sched_time_check="$2" + local deadline_check="$3" + + DS="$(CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${primary}) dump_scrubs)" + # use eval to drop double-quotes + eval SCHED_TIME=$(echo $DS | jq '.[0].sched_time') + test $(echo $SCHED_TIME | sed $DATESED) = $(date +${DATEFORMAT} -d "now + $sched_time_check") || return 1 + # use eval to drop double-quotes + eval DEADLINE=$(echo $DS | jq '.[0].deadline') + test $(echo $DEADLINE | sed $DATESED) = $(date +${DATEFORMAT} -d "now + $deadline_check") || return 1 +} + +function TEST_interval_changes() { + local poolname=test + local OSDS=2 + local objects=10 + # Don't assume how internal defaults are set + local day="$(expr 24 \* 60 \* 60)" + local week="$(expr $day \* 7)" + local min_interval=$day + local max_interval=$week + local WAIT_FOR_UPDATE=2 + + TESTDATA="testdata.$$" + + setup $dir || return 1 + # This min scrub interval results in 30 seconds backoff time + run_mon $dir a --osd_pool_default_size=$OSDS || return 1 + run_mgr $dir x || return 1 + for osd in $(seq 0 $(expr $OSDS - 1)) + do + run_osd $dir $osd --osd_scrub_min_interval=$min_interval --osd_scrub_max_interval=$max_interval --osd_scrub_interval_randomize_ratio=0 || return 1 + done + + # Create a pool with a single pg + create_pool $poolname 1 1 + wait_for_clean || return 1 + local poolid=$(ceph osd dump | grep "^pool.*[']${poolname}[']" | awk '{ print $2 }') + + dd if=/dev/urandom of=$TESTDATA bs=1032 count=1 + for i in `seq 1 $objects` + do + rados -p $poolname put obj${i} $TESTDATA + done + rm -f $TESTDATA + + local primary=$(get_primary $poolname obj1) + + # Check initial settings from above (min 1 day, min 1 week) + check_dump_scrubs $primary "1 day" "1 week" || return 1 + + # Change global osd_scrub_min_interval to 2 days + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${primary}) config set osd_scrub_min_interval $(expr $day \* 2) + sleep $WAIT_FOR_UPDATE + check_dump_scrubs $primary "2 days" "1 week" || return 1 + + # Change global osd_scrub_max_interval to 2 weeks + CEPH_ARGS='' ceph --admin-daemon $(get_asok_path osd.${primary}) config set osd_scrub_max_interval $(expr $week \* 2) + sleep $WAIT_FOR_UPDATE + check_dump_scrubs $primary "2 days" "2 week" || return 1 + + # Change pool osd_scrub_min_interval to 3 days + ceph osd pool set $poolname scrub_min_interval $(expr $day \* 3) + sleep $WAIT_FOR_UPDATE + check_dump_scrubs $primary "3 days" "2 week" || return 1 + + # Change pool osd_scrub_max_interval to 3 weeks + ceph osd pool set $poolname scrub_max_interval $(expr $week \* 3) + sleep $WAIT_FOR_UPDATE + check_dump_scrubs $primary "3 days" "3 week" || return 1 + + teardown $dir || return 1 +} + +function _scrub_abort() { + local dir=$1 + local poolname=test + local OSDS=3 + local objects=1000 + local type=$2 + + TESTDATA="testdata.$$" + if test $type = "scrub"; + then + stopscrub="noscrub" + check="noscrub" + else + stopscrub="nodeep-scrub" + check="nodeep_scrub" + fi + + + setup $dir || return 1 + run_mon $dir a --osd_pool_default_size=3 || return 1 + run_mgr $dir x || return 1 + for osd in $(seq 0 $(expr $OSDS - 1)) + do + run_osd $dir $osd --osd_pool_default_pg_autoscale_mode=off \ + --osd_deep_scrub_randomize_ratio=0.0 \ + --osd_scrub_sleep=5.0 \ + --osd_scrub_interval_randomize_ratio=0 || return 1 + done + + # Create a pool with a single pg + create_pool $poolname 1 1 + wait_for_clean || return 1 + poolid=$(ceph osd dump | grep "^pool.*[']${poolname}[']" | awk '{ print $2 }') + + dd if=/dev/urandom of=$TESTDATA bs=1032 count=1 + for i in `seq 1 $objects` + do + rados -p $poolname put obj${i} $TESTDATA + done + rm -f $TESTDATA + + local primary=$(get_primary $poolname obj1) + local pgid="${poolid}.0" + + CEPH_ARGS='' ceph daemon $(get_asok_path osd.$primary) trigger_$type $pgid + # deep-scrub won't start without scrub noticing + if [ "$type" = "deep_scrub" ]; + then + CEPH_ARGS='' ceph daemon $(get_asok_path osd.$primary) trigger_scrub $pgid + fi + + # Wait for scrubbing to start + set -o pipefail + found="no" + for i in $(seq 0 200) + do + flush_pg_stats + if ceph pg dump pgs | grep ^$pgid| grep -q "scrubbing" + then + found="yes" + #ceph pg dump pgs + break + fi + done + set +o pipefail + + if test $found = "no"; + then + echo "Scrubbing never started" + return 1 + fi + + ceph osd set $stopscrub + if [ "$type" = "deep_scrub" ]; + then + ceph osd set noscrub + fi + + # Wait for scrubbing to end + set -o pipefail + for i in $(seq 0 200) + do + flush_pg_stats + if ceph pg dump pgs | grep ^$pgid | grep -q "scrubbing" + then + continue + fi + #ceph pg dump pgs + break + done + set +o pipefail + + sleep 5 + + if ! grep "$check set, aborting" $dir/osd.${primary}.log + then + echo "Abort not seen in log" + return 1 + fi + + local last_scrub=$(get_last_scrub_stamp $pgid) + ceph config set osd "osd_scrub_sleep" "0.1" + + ceph osd unset $stopscrub + if [ "$type" = "deep_scrub" ]; + then + ceph osd unset noscrub + fi + TIMEOUT=$(($objects / 2)) + wait_for_scrub $pgid "$last_scrub" || return 1 + + teardown $dir || return 1 +} + +function TEST_scrub_abort() { + local dir=$1 + _scrub_abort $dir scrub +} + +function TEST_deep_scrub_abort() { + local dir=$1 + _scrub_abort $dir deep_scrub +} + +main osd-scrub-test "$@" + +# Local Variables: +# compile-command: "cd build ; make -j4 && \ +# ../qa/run-standalone.sh osd-scrub-test.sh" +# End: diff --git a/qa/standalone/scrub/osd-unexpected-clone.sh b/qa/standalone/scrub/osd-unexpected-clone.sh new file mode 100755 index 00000000..6895bfee --- /dev/null +++ b/qa/standalone/scrub/osd-unexpected-clone.sh @@ -0,0 +1,89 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2015 Intel +# Copyright (C) 2014, 2015 Red Hat +# +# Author: Xiaoxi Chen +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Library Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library Public License for more details. +# + +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh + +function run() { + local dir=$1 + shift + + export CEPH_MON="127.0.0.1:7144" # git grep '\<7144\>' : there must be only one + export CEPH_ARGS + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " + CEPH_ARGS+="--mon-host=$CEPH_MON " + + export -n CEPH_CLI_TEST_DUP_COMMAND + local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} + for func in $funcs ; do + setup $dir || return 1 + $func $dir || return 1 + teardown $dir || return 1 + done +} + +function TEST_recover_unexpected() { + local dir=$1 + + run_mon $dir a || return 1 + run_mgr $dir x || return 1 + run_osd $dir 0 || return 1 + run_osd $dir 1 || return 1 + run_osd $dir 2 || return 1 + + ceph osd pool create foo 1 + rados -p foo put foo /etc/passwd + rados -p foo mksnap snap + rados -p foo put foo /etc/group + + wait_for_clean || return 1 + + local osd=$(get_primary foo foo) + + JSON=`objectstore_tool $dir $osd --op list foo | grep snapid.:1` + echo "JSON is $JSON" + rm -f $dir/_ $dir/data + objectstore_tool $dir $osd "$JSON" get-attr _ > $dir/_ || return 1 + objectstore_tool $dir $osd "$JSON" get-bytes $dir/data || return 1 + + rados -p foo rmsnap snap + + sleep 5 + + objectstore_tool $dir $osd "$JSON" set-bytes $dir/data || return 1 + objectstore_tool $dir $osd "$JSON" set-attr _ $dir/_ || return 1 + + sleep 5 + + ceph pg repair 1.0 || return 1 + + sleep 10 + + ceph log last + + # make sure osds are still up + timeout 60 ceph tell osd.0 version || return 1 + timeout 60 ceph tell osd.1 version || return 1 + timeout 60 ceph tell osd.2 version || return 1 +} + + +main osd-unexpected-clone "$@" + +# Local Variables: +# compile-command: "cd ../.. ; make -j4 && test/osd/osd-bench.sh" +# End: diff --git a/qa/standalone/special/ceph_objectstore_tool.py b/qa/standalone/special/ceph_objectstore_tool.py new file mode 100755 index 00000000..111f4359 --- /dev/null +++ b/qa/standalone/special/ceph_objectstore_tool.py @@ -0,0 +1,2080 @@ +#!/usr/bin/env python + +from __future__ import print_function +from subprocess import call +try: + from subprocess import check_output +except ImportError: + def check_output(*popenargs, **kwargs): + import subprocess + # backported from python 2.7 stdlib + process = subprocess.Popen( + stdout=subprocess.PIPE, *popenargs, **kwargs) + output, unused_err = process.communicate() + retcode = process.poll() + if retcode: + cmd = kwargs.get("args") + if cmd is None: + cmd = popenargs[0] + error = subprocess.CalledProcessError(retcode, cmd) + error.output = output + raise error + return output + +import filecmp +import os +import subprocess +import math +import time +import sys +import re +import logging +import json +import tempfile +import platform + +try: + from subprocess import DEVNULL +except ImportError: + DEVNULL = open(os.devnull, "wb") + +logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.WARNING) + + +if sys.version_info[0] >= 3: + def decode(s): + return s.decode('utf-8') + + def check_output(*args, **kwargs): # noqa + return decode(subprocess.check_output(*args, **kwargs)) +else: + def decode(s): + return s + + + +def wait_for_health(): + print("Wait for health_ok...", end="") + tries = 0 + while call("{path}/ceph health 2> /dev/null | grep -v 'HEALTH_OK\|HEALTH_WARN' > /dev/null".format(path=CEPH_BIN), shell=True) == 0: + tries += 1 + if tries == 150: + raise Exception("Time exceeded to go to health") + time.sleep(1) + print("DONE") + + +def get_pool_id(name, nullfd): + cmd = "{path}/ceph osd pool stats {pool}".format(pool=name, path=CEPH_BIN).split() + # pool {pool} id # .... grab the 4 field + return check_output(cmd, stderr=nullfd).split()[3] + + +# return a list of unique PGS given an osd subdirectory +def get_osd_pgs(SUBDIR, ID): + PGS = [] + if ID: + endhead = re.compile("{id}.*_head$".format(id=ID)) + DIR = os.path.join(SUBDIR, "current") + PGS += [f for f in os.listdir(DIR) if os.path.isdir(os.path.join(DIR, f)) and (ID is None or endhead.match(f))] + PGS = [re.sub("_head", "", p) for p in PGS if "_head" in p] + return PGS + + +# return a sorted list of unique PGs given a directory +def get_pgs(DIR, ID): + OSDS = [f for f in os.listdir(DIR) if os.path.isdir(os.path.join(DIR, f)) and f.find("osd") == 0] + PGS = [] + for d in OSDS: + SUBDIR = os.path.join(DIR, d) + PGS += get_osd_pgs(SUBDIR, ID) + return sorted(set(PGS)) + + +# return a sorted list of PGS a subset of ALLPGS that contain objects with prefix specified +def get_objs(ALLPGS, prefix, DIR, ID): + OSDS = [f for f in os.listdir(DIR) if os.path.isdir(os.path.join(DIR, f)) and f.find("osd") == 0] + PGS = [] + for d in OSDS: + DIRL2 = os.path.join(DIR, d) + SUBDIR = os.path.join(DIRL2, "current") + for p in ALLPGS: + PGDIR = p + "_head" + if not os.path.isdir(os.path.join(SUBDIR, PGDIR)): + continue + FINALDIR = os.path.join(SUBDIR, PGDIR) + # See if there are any objects there + if any(f for f in [val for _, _, fl in os.walk(FINALDIR) for val in fl] if f.startswith(prefix)): + PGS += [p] + return sorted(set(PGS)) + + +# return a sorted list of OSDS which have data from a given PG +def get_osds(PG, DIR): + ALLOSDS = [f for f in os.listdir(DIR) if os.path.isdir(os.path.join(DIR, f)) and f.find("osd") == 0] + OSDS = [] + for d in ALLOSDS: + DIRL2 = os.path.join(DIR, d) + SUBDIR = os.path.join(DIRL2, "current") + PGDIR = PG + "_head" + if not os.path.isdir(os.path.join(SUBDIR, PGDIR)): + continue + OSDS += [d] + return sorted(OSDS) + + +def get_lines(filename): + tmpfd = open(filename, "r") + line = True + lines = [] + while line: + line = tmpfd.readline().rstrip('\n') + if line: + lines += [line] + tmpfd.close() + os.unlink(filename) + return lines + + +def cat_file(level, filename): + if level < logging.getLogger().getEffectiveLevel(): + return + print("File: " + filename) + with open(filename, "r") as f: + while True: + line = f.readline().rstrip('\n') + if not line: + break + print(line) + print("") + + +def vstart(new, opt=""): + print("vstarting....", end="") + NEW = new and "-n" or "-N" + call("MON=1 OSD=4 MDS=0 MGR=1 CEPH_PORT=7400 MGR_PYTHON_PATH={path}/src/pybind/mgr {path}/src/vstart.sh --filestore --short -l {new} -d {opt} > /dev/null 2>&1".format(new=NEW, opt=opt, path=CEPH_ROOT), shell=True) + print("DONE") + + +def test_failure(cmd, errmsg, tty=False): + if tty: + try: + ttyfd = open("/dev/tty", "rwb") + except Exception as e: + logging.info(str(e)) + logging.info("SKIP " + cmd) + return 0 + TMPFILE = r"/tmp/tmp.{pid}".format(pid=os.getpid()) + tmpfd = open(TMPFILE, "wb") + + logging.debug(cmd) + if tty: + ret = call(cmd, shell=True, stdin=ttyfd, stdout=ttyfd, stderr=tmpfd) + ttyfd.close() + else: + ret = call(cmd, shell=True, stderr=tmpfd) + tmpfd.close() + if ret == 0: + logging.error(cmd) + logging.error("Should have failed, but got exit 0") + return 1 + lines = get_lines(TMPFILE) + matched = [ l for l in lines if errmsg in l ] + if any(matched): + logging.info("Correctly failed with message \"" + matched[0] + "\"") + return 0 + else: + logging.error("Command: " + cmd ) + logging.error("Bad messages to stderr \"" + str(lines) + "\"") + logging.error("Expected \"" + errmsg + "\"") + return 1 + + +def get_nspace(num): + if num == 0: + return "" + return "ns{num}".format(num=num) + + +def verify(DATADIR, POOL, NAME_PREFIX, db): + TMPFILE = r"/tmp/tmp.{pid}".format(pid=os.getpid()) + ERRORS = 0 + for rawnsfile in [f for f in os.listdir(DATADIR) if f.split('-')[1].find(NAME_PREFIX) == 0]: + nsfile = rawnsfile.split("__")[0] + clone = rawnsfile.split("__")[1] + nspace = nsfile.split("-")[0] + file = nsfile.split("-")[1] + # Skip clones + if clone != "head": + continue + path = os.path.join(DATADIR, rawnsfile) + try: + os.unlink(TMPFILE) + except: + pass + cmd = "{path}/rados -p {pool} -N '{nspace}' get {file} {out}".format(pool=POOL, file=file, out=TMPFILE, nspace=nspace, path=CEPH_BIN) + logging.debug(cmd) + call(cmd, shell=True, stdout=DEVNULL, stderr=DEVNULL) + cmd = "diff -q {src} {result}".format(src=path, result=TMPFILE) + logging.debug(cmd) + ret = call(cmd, shell=True) + if ret != 0: + logging.error("{file} data not imported properly".format(file=file)) + ERRORS += 1 + try: + os.unlink(TMPFILE) + except: + pass + for key, val in db[nspace][file]["xattr"].items(): + cmd = "{path}/rados -p {pool} -N '{nspace}' getxattr {name} {key}".format(pool=POOL, name=file, key=key, nspace=nspace, path=CEPH_BIN) + logging.debug(cmd) + getval = check_output(cmd, shell=True, stderr=DEVNULL) + logging.debug("getxattr {key} {val}".format(key=key, val=getval)) + if getval != val: + logging.error("getxattr of key {key} returned wrong val: {get} instead of {orig}".format(key=key, get=getval, orig=val)) + ERRORS += 1 + continue + hdr = db[nspace][file].get("omapheader", "") + cmd = "{path}/rados -p {pool} -N '{nspace}' getomapheader {name} {file}".format(pool=POOL, name=file, nspace=nspace, file=TMPFILE, path=CEPH_BIN) + logging.debug(cmd) + ret = call(cmd, shell=True, stderr=DEVNULL) + if ret != 0: + logging.error("rados getomapheader returned {ret}".format(ret=ret)) + ERRORS += 1 + else: + getlines = get_lines(TMPFILE) + assert(len(getlines) == 0 or len(getlines) == 1) + if len(getlines) == 0: + gethdr = "" + else: + gethdr = getlines[0] + logging.debug("header: {hdr}".format(hdr=gethdr)) + if gethdr != hdr: + logging.error("getomapheader returned wrong val: {get} instead of {orig}".format(get=gethdr, orig=hdr)) + ERRORS += 1 + for key, val in db[nspace][file]["omap"].items(): + cmd = "{path}/rados -p {pool} -N '{nspace}' getomapval {name} {key} {file}".format(pool=POOL, name=file, key=key, nspace=nspace, file=TMPFILE, path=CEPH_BIN) + logging.debug(cmd) + ret = call(cmd, shell=True, stderr=DEVNULL) + if ret != 0: + logging.error("getomapval returned {ret}".format(ret=ret)) + ERRORS += 1 + continue + getlines = get_lines(TMPFILE) + if len(getlines) != 1: + logging.error("Bad data from getomapval {lines}".format(lines=getlines)) + ERRORS += 1 + continue + getval = getlines[0] + logging.debug("getomapval {key} {val}".format(key=key, val=getval)) + if getval != val: + logging.error("getomapval returned wrong val: {get} instead of {orig}".format(get=getval, orig=val)) + ERRORS += 1 + try: + os.unlink(TMPFILE) + except: + pass + return ERRORS + + +def check_journal(jsondict): + errors = 0 + if 'header' not in jsondict: + logging.error("Key 'header' not in dump-journal") + errors += 1 + elif 'max_size' not in jsondict['header']: + logging.error("Key 'max_size' not in dump-journal header") + errors += 1 + else: + print("\tJournal max_size = {size}".format(size=jsondict['header']['max_size'])) + if 'entries' not in jsondict: + logging.error("Key 'entries' not in dump-journal output") + errors += 1 + elif len(jsondict['entries']) == 0: + logging.info("No entries in journal found") + else: + errors += check_journal_entries(jsondict['entries']) + return errors + + +def check_journal_entries(entries): + errors = 0 + for enum in range(len(entries)): + if 'offset' not in entries[enum]: + logging.error("No 'offset' key in entry {e}".format(e=enum)) + errors += 1 + if 'seq' not in entries[enum]: + logging.error("No 'seq' key in entry {e}".format(e=enum)) + errors += 1 + if 'transactions' not in entries[enum]: + logging.error("No 'transactions' key in entry {e}".format(e=enum)) + errors += 1 + elif len(entries[enum]['transactions']) == 0: + logging.error("No transactions found in entry {e}".format(e=enum)) + errors += 1 + else: + errors += check_entry_transactions(entries[enum], enum) + return errors + + +def check_entry_transactions(entry, enum): + errors = 0 + for tnum in range(len(entry['transactions'])): + if 'trans_num' not in entry['transactions'][tnum]: + logging.error("Key 'trans_num' missing from entry {e} trans {t}".format(e=enum, t=tnum)) + errors += 1 + elif entry['transactions'][tnum]['trans_num'] != tnum: + ft = entry['transactions'][tnum]['trans_num'] + logging.error("Bad trans_num ({ft}) entry {e} trans {t}".format(ft=ft, e=enum, t=tnum)) + errors += 1 + if 'ops' not in entry['transactions'][tnum]: + logging.error("Key 'ops' missing from entry {e} trans {t}".format(e=enum, t=tnum)) + errors += 1 + else: + errors += check_transaction_ops(entry['transactions'][tnum]['ops'], enum, tnum) + return errors + + +def check_transaction_ops(ops, enum, tnum): + if len(ops) == 0: + logging.warning("No ops found in entry {e} trans {t}".format(e=enum, t=tnum)) + errors = 0 + for onum in range(len(ops)): + if 'op_num' not in ops[onum]: + logging.error("Key 'op_num' missing from entry {e} trans {t} op {o}".format(e=enum, t=tnum, o=onum)) + errors += 1 + elif ops[onum]['op_num'] != onum: + fo = ops[onum]['op_num'] + logging.error("Bad op_num ({fo}) from entry {e} trans {t} op {o}".format(fo=fo, e=enum, t=tnum, o=onum)) + errors += 1 + if 'op_name' not in ops[onum]: + logging.error("Key 'op_name' missing from entry {e} trans {t} op {o}".format(e=enum, t=tnum, o=onum)) + errors += 1 + return errors + + +def test_dump_journal(CFSD_PREFIX, osds): + ERRORS = 0 + pid = os.getpid() + TMPFILE = r"/tmp/tmp.{pid}".format(pid=pid) + + for osd in osds: + # Test --op dump-journal by loading json + cmd = (CFSD_PREFIX + "--op dump-journal --format json").format(osd=osd) + logging.debug(cmd) + tmpfd = open(TMPFILE, "wb") + ret = call(cmd, shell=True, stdout=tmpfd) + if ret != 0: + logging.error("Bad exit status {ret} from {cmd}".format(ret=ret, cmd=cmd)) + ERRORS += 1 + continue + tmpfd.close() + tmpfd = open(TMPFILE, "r") + jsondict = json.load(tmpfd) + tmpfd.close() + os.unlink(TMPFILE) + + journal_errors = check_journal(jsondict) + if journal_errors != 0: + logging.error(jsondict) + ERRORS += journal_errors + + return ERRORS + +CEPH_BUILD_DIR = os.environ.get('CEPH_BUILD_DIR') +CEPH_BIN = os.environ.get('CEPH_BIN') +CEPH_ROOT = os.environ.get('CEPH_ROOT') + +if not CEPH_BUILD_DIR: + CEPH_BUILD_DIR=os.getcwd() + os.putenv('CEPH_BUILD_DIR', CEPH_BUILD_DIR) + CEPH_BIN=os.path.join(CEPH_BUILD_DIR, 'bin') + os.putenv('CEPH_BIN', CEPH_BIN) + CEPH_ROOT=os.path.dirname(CEPH_BUILD_DIR) + os.putenv('CEPH_ROOT', CEPH_ROOT) + CEPH_LIB=os.path.join(CEPH_BUILD_DIR, 'lib') + os.putenv('CEPH_LIB', CEPH_LIB) + +try: + os.mkdir("td") +except: + pass # ok if this is already there +CEPH_DIR = os.path.join(CEPH_BUILD_DIR, os.path.join("td", "cot_dir")) +CEPH_CONF = os.path.join(CEPH_DIR, 'ceph.conf') + +def kill_daemons(): + call("{path}/init-ceph -c {conf} stop > /dev/null 2>&1".format(conf=CEPH_CONF, path=CEPH_BIN), shell=True) + + +def check_data(DATADIR, TMPFILE, OSDDIR, SPLIT_NAME): + repcount = 0 + ERRORS = 0 + for rawnsfile in [f for f in os.listdir(DATADIR) if f.split('-')[1].find(SPLIT_NAME) == 0]: + nsfile = rawnsfile.split("__")[0] + clone = rawnsfile.split("__")[1] + nspace = nsfile.split("-")[0] + file = nsfile.split("-")[1] + "__" + clone + # Skip clones + if clone != "head": + continue + path = os.path.join(DATADIR, rawnsfile) + tmpfd = open(TMPFILE, "wb") + cmd = "find {dir} -name '{file}_*_{nspace}_*'".format(dir=OSDDIR, file=file, nspace=nspace) + logging.debug(cmd) + ret = call(cmd, shell=True, stdout=tmpfd) + if ret: + logging.critical("INTERNAL ERROR") + return 1 + tmpfd.close() + obj_locs = get_lines(TMPFILE) + if len(obj_locs) == 0: + logging.error("Can't find imported object {name}".format(name=file)) + ERRORS += 1 + for obj_loc in obj_locs: + # For btrfs skip snap_* dirs + if re.search("/snap_[0-9]*/", obj_loc) is not None: + continue + repcount += 1 + cmd = "diff -q {src} {obj_loc}".format(src=path, obj_loc=obj_loc) + logging.debug(cmd) + ret = call(cmd, shell=True) + if ret != 0: + logging.error("{file} data not imported properly into {obj}".format(file=file, obj=obj_loc)) + ERRORS += 1 + return ERRORS, repcount + + +def set_osd_weight(CFSD_PREFIX, osd_ids, osd_path, weight): + # change the weight of osd.0 to math.pi in the newest osdmap of given osd + osdmap_file = tempfile.NamedTemporaryFile(delete=True) + cmd = (CFSD_PREFIX + "--op get-osdmap --file {osdmap_file}").format(osd=osd_path, + osdmap_file=osdmap_file.name) + output = check_output(cmd, shell=True) + epoch = int(re.findall('#(\d+)', output)[0]) + + new_crush_file = tempfile.NamedTemporaryFile(delete=True) + old_crush_file = tempfile.NamedTemporaryFile(delete=True) + ret = call("{path}/osdmaptool --export-crush {crush_file} {osdmap_file}".format(osdmap_file=osdmap_file.name, + crush_file=old_crush_file.name, path=CEPH_BIN), + stdout=DEVNULL, + stderr=DEVNULL, + shell=True) + assert(ret == 0) + + for osd_id in osd_ids: + cmd = "{path}/crushtool -i {crush_file} --reweight-item osd.{osd} {weight} -o {new_crush_file}".format(osd=osd_id, + crush_file=old_crush_file.name, + weight=weight, + new_crush_file=new_crush_file.name, path=CEPH_BIN) + ret = call(cmd, stdout=DEVNULL, shell=True) + assert(ret == 0) + old_crush_file, new_crush_file = new_crush_file, old_crush_file + + # change them back, since we don't need to preapre for another round + old_crush_file, new_crush_file = new_crush_file, old_crush_file + old_crush_file.close() + + ret = call("{path}/osdmaptool --import-crush {crush_file} {osdmap_file}".format(osdmap_file=osdmap_file.name, + crush_file=new_crush_file.name, path=CEPH_BIN), + stdout=DEVNULL, + stderr=DEVNULL, + shell=True) + assert(ret == 0) + + # Minimum test of --dry-run by using it, but not checking anything + cmd = CFSD_PREFIX + "--op set-osdmap --file {osdmap_file} --epoch {epoch} --force --dry-run" + cmd = cmd.format(osd=osd_path, osdmap_file=osdmap_file.name, epoch=epoch) + ret = call(cmd, stdout=DEVNULL, shell=True) + assert(ret == 0) + + # osdmaptool increases the epoch of the changed osdmap, so we need to force the tool + # to use use a different epoch than the one in osdmap + cmd = CFSD_PREFIX + "--op set-osdmap --file {osdmap_file} --epoch {epoch} --force" + cmd = cmd.format(osd=osd_path, osdmap_file=osdmap_file.name, epoch=epoch) + ret = call(cmd, stdout=DEVNULL, shell=True) + + return ret == 0 + +def get_osd_weights(CFSD_PREFIX, osd_ids, osd_path): + osdmap_file = tempfile.NamedTemporaryFile(delete=True) + cmd = (CFSD_PREFIX + "--op get-osdmap --file {osdmap_file}").format(osd=osd_path, + osdmap_file=osdmap_file.name) + ret = call(cmd, stdout=DEVNULL, shell=True) + if ret != 0: + return None + # we have to read the weights from the crush map, even we can query the weights using + # osdmaptool, but please keep in mind, they are different: + # item weights in crush map versus weight associated with each osd in osdmap + crush_file = tempfile.NamedTemporaryFile(delete=True) + ret = call("{path}/osdmaptool --export-crush {crush_file} {osdmap_file}".format(osdmap_file=osdmap_file.name, + crush_file=crush_file.name, path=CEPH_BIN), + stdout=DEVNULL, + shell=True) + assert(ret == 0) + output = check_output("{path}/crushtool --tree -i {crush_file} | tail -n {num_osd}".format(crush_file=crush_file.name, + num_osd=len(osd_ids), path=CEPH_BIN), + stderr=DEVNULL, + shell=True) + weights = [] + for line in output.strip().split('\n'): + print(line) + linev = re.split('\s+', line) + if linev[0] == '': + linev.pop(0) + print('linev %s' % linev) + weights.append(float(linev[2])) + + return weights + + +def test_get_set_osdmap(CFSD_PREFIX, osd_ids, osd_paths): + print("Testing get-osdmap and set-osdmap") + errors = 0 + kill_daemons() + weight = 1 / math.e # just some magic number in [0, 1] + changed = [] + for osd_path in osd_paths: + if set_osd_weight(CFSD_PREFIX, osd_ids, osd_path, weight): + changed.append(osd_path) + else: + logging.warning("Failed to change the weights: {0}".format(osd_path)) + # i am pissed off if none of the store gets changed + if not changed: + errors += 1 + + for osd_path in changed: + weights = get_osd_weights(CFSD_PREFIX, osd_ids, osd_path) + if not weights: + errors += 1 + continue + if any(abs(w - weight) > 1e-5 for w in weights): + logging.warning("Weight is not changed: {0} != {1}".format(weights, weight)) + errors += 1 + return errors + +def test_get_set_inc_osdmap(CFSD_PREFIX, osd_path): + # incrementals are not used unless we need to build an MOSDMap to update + # OSD's peers, so an obvious way to test it is simply overwrite an epoch + # with a different copy, and read it back to see if it matches. + kill_daemons() + file_e2 = tempfile.NamedTemporaryFile(delete=True) + cmd = (CFSD_PREFIX + "--op get-inc-osdmap --file {file}").format(osd=osd_path, + file=file_e2.name) + output = check_output(cmd, shell=True) + epoch = int(re.findall('#(\d+)', output)[0]) + # backup e1 incremental before overwriting it + epoch -= 1 + file_e1_backup = tempfile.NamedTemporaryFile(delete=True) + cmd = CFSD_PREFIX + "--op get-inc-osdmap --epoch {epoch} --file {file}" + ret = call(cmd.format(osd=osd_path, epoch=epoch, file=file_e1_backup.name), shell=True) + if ret: return 1 + # overwrite e1 with e2 + cmd = CFSD_PREFIX + "--op set-inc-osdmap --force --epoch {epoch} --file {file}" + ret = call(cmd.format(osd=osd_path, epoch=epoch, file=file_e2.name), shell=True) + if ret: return 1 + # Use dry-run to set back to e1 which shouldn't happen + cmd = CFSD_PREFIX + "--op set-inc-osdmap --dry-run --epoch {epoch} --file {file}" + ret = call(cmd.format(osd=osd_path, epoch=epoch, file=file_e1_backup.name), shell=True) + if ret: return 1 + # read from e1 + file_e1_read = tempfile.NamedTemporaryFile(delete=True) + cmd = CFSD_PREFIX + "--op get-inc-osdmap --epoch {epoch} --file {file}" + ret = call(cmd.format(osd=osd_path, epoch=epoch, file=file_e1_read.name), shell=True) + if ret: return 1 + errors = 0 + try: + if not filecmp.cmp(file_e2.name, file_e1_read.name, shallow=False): + logging.error("{{get,set}}-inc-osdmap mismatch {0} != {1}".format(file_e2.name, file_e1_read.name)) + errors += 1 + finally: + # revert the change with file_e1_backup + cmd = CFSD_PREFIX + "--op set-inc-osdmap --epoch {epoch} --file {file}" + ret = call(cmd.format(osd=osd_path, epoch=epoch, file=file_e1_backup.name), shell=True) + if ret: + logging.error("Failed to revert the changed inc-osdmap") + errors += 1 + + return errors + + +def test_removeall(CFSD_PREFIX, db, OBJREPPGS, REP_POOL, CEPH_BIN, OSDDIR, REP_NAME, NUM_CLONED_REP_OBJECTS): + # Test removeall + TMPFILE = r"/tmp/tmp.{pid}".format(pid=os.getpid()) + nullfd = open(os.devnull, "w") + errors=0 + print("Test removeall") + kill_daemons() + test_force_remove = 0 + for nspace in db.keys(): + for basename in db[nspace].keys(): + JSON = db[nspace][basename]['json'] + for pg in OBJREPPGS: + OSDS = get_osds(pg, OSDDIR) + for osd in OSDS: + DIR = os.path.join(OSDDIR, os.path.join(osd, os.path.join("current", "{pg}_head".format(pg=pg)))) + fnames = [f for f in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, f)) + and f.split("_")[0] == basename and f.split("_")[4] == nspace] + if not fnames: + continue + + if int(basename.split(REP_NAME)[1]) <= int(NUM_CLONED_REP_OBJECTS): + cmd = (CFSD_PREFIX + "'{json}' remove").format(osd=osd, json=JSON) + errors += test_failure(cmd, "Snapshots are present, use removeall to delete everything") + if not test_force_remove: + + cmd = (CFSD_PREFIX + " '{json}' set-attr snapset /dev/null").format(osd=osd, json=JSON) + logging.debug(cmd) + ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd) + if ret != 0: + logging.error("Test set-up to corrupt snapset failed for {json}".format(json=JSON)) + errors += 1 + # Do the removeall since this test failed to set-up + else: + test_force_remove = 1 + + cmd = (CFSD_PREFIX + " '{json}' --force remove").format(osd=osd, json=JSON) + logging.debug(cmd) + ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd) + if ret != 0: + logging.error("forced remove with corrupt snapset failed for {json}".format(json=JSON)) + errors += 1 + continue + + cmd = (CFSD_PREFIX + " --force --dry-run '{json}' remove").format(osd=osd, json=JSON) + logging.debug(cmd) + ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd) + if ret != 0: + logging.error("remove with --force failed for {json}".format(json=JSON)) + errors += 1 + + cmd = (CFSD_PREFIX + " --dry-run '{json}' removeall").format(osd=osd, json=JSON) + logging.debug(cmd) + ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd) + if ret != 0: + logging.error("removeall failed for {json}".format(json=JSON)) + errors += 1 + + cmd = (CFSD_PREFIX + " '{json}' removeall").format(osd=osd, json=JSON) + logging.debug(cmd) + ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd) + if ret != 0: + logging.error("removeall failed for {json}".format(json=JSON)) + errors += 1 + + tmpfd = open(TMPFILE, "w") + cmd = (CFSD_PREFIX + "--op list --pgid {pg} --namespace {ns} {name}").format(osd=osd, pg=pg, ns=nspace, name=basename) + logging.debug(cmd) + ret = call(cmd, shell=True, stdout=tmpfd) + if ret != 0: + logging.error("Bad exit status {ret} from {cmd}".format(ret=ret, cmd=cmd)) + errors += 1 + tmpfd.close() + lines = get_lines(TMPFILE) + if len(lines) != 0: + logging.error("Removeall didn't remove all objects {ns}/{name} : {lines}".format(ns=nspace, name=basename, lines=lines)) + errors += 1 + vstart(new=False) + wait_for_health() + cmd = "{path}/rados -p {pool} rmsnap snap1".format(pool=REP_POOL, path=CEPH_BIN) + logging.debug(cmd) + ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd) + if ret != 0: + logging.error("rados rmsnap failed") + errors += 1 + time.sleep(2) + wait_for_health() + return errors + + +def main(argv): + if sys.version_info[0] < 3: + sys.stdout = stdout = os.fdopen(sys.stdout.fileno(), 'wb', 0) + else: + stdout = sys.stdout.buffer + if len(argv) > 1 and argv[1] == "debug": + nullfd = stdout + else: + nullfd = DEVNULL + + call("rm -fr {dir}; mkdir -p {dir}".format(dir=CEPH_DIR), shell=True) + os.chdir(CEPH_DIR) + os.environ["CEPH_DIR"] = CEPH_DIR + OSDDIR = "dev" + REP_POOL = "rep_pool" + REP_NAME = "REPobject" + EC_POOL = "ec_pool" + EC_NAME = "ECobject" + if len(argv) > 0 and argv[0] == 'large': + PG_COUNT = 12 + NUM_REP_OBJECTS = 200 + NUM_CLONED_REP_OBJECTS = 50 + NUM_EC_OBJECTS = 12 + NUM_NSPACES = 4 + # Larger data sets for first object per namespace + DATALINECOUNT = 50000 + # Number of objects to do xattr/omap testing on + ATTR_OBJS = 10 + else: + PG_COUNT = 4 + NUM_REP_OBJECTS = 2 + NUM_CLONED_REP_OBJECTS = 2 + NUM_EC_OBJECTS = 2 + NUM_NSPACES = 2 + # Larger data sets for first object per namespace + DATALINECOUNT = 10 + # Number of objects to do xattr/omap testing on + ATTR_OBJS = 2 + ERRORS = 0 + pid = os.getpid() + TESTDIR = "/tmp/test.{pid}".format(pid=pid) + DATADIR = "/tmp/data.{pid}".format(pid=pid) + CFSD_PREFIX = CEPH_BIN + "/ceph-objectstore-tool --no-mon-config --data-path " + OSDDIR + "/{osd} " + PROFNAME = "testecprofile" + + os.environ['CEPH_CONF'] = CEPH_CONF + vstart(new=True) + wait_for_health() + + cmd = "{path}/ceph osd pool create {pool} {pg} {pg} replicated".format(pool=REP_POOL, pg=PG_COUNT, path=CEPH_BIN) + logging.debug(cmd) + call(cmd, shell=True, stdout=nullfd, stderr=nullfd) + time.sleep(2) + REPID = get_pool_id(REP_POOL, nullfd) + + print("Created Replicated pool #{repid}".format(repid=REPID)) + + cmd = "{path}/ceph osd erasure-code-profile set {prof} crush-failure-domain=osd".format(prof=PROFNAME, path=CEPH_BIN) + logging.debug(cmd) + call(cmd, shell=True, stdout=nullfd, stderr=nullfd) + cmd = "{path}/ceph osd erasure-code-profile get {prof}".format(prof=PROFNAME, path=CEPH_BIN) + logging.debug(cmd) + call(cmd, shell=True, stdout=nullfd, stderr=nullfd) + cmd = "{path}/ceph osd pool create {pool} {pg} {pg} erasure {prof}".format(pool=EC_POOL, prof=PROFNAME, pg=PG_COUNT, path=CEPH_BIN) + logging.debug(cmd) + call(cmd, shell=True, stdout=nullfd, stderr=nullfd) + ECID = get_pool_id(EC_POOL, nullfd) + + print("Created Erasure coded pool #{ecid}".format(ecid=ECID)) + + print("Creating {objs} objects in replicated pool".format(objs=(NUM_REP_OBJECTS*NUM_NSPACES))) + cmd = "mkdir -p {datadir}".format(datadir=DATADIR) + logging.debug(cmd) + call(cmd, shell=True) + + db = {} + + objects = range(1, NUM_REP_OBJECTS + 1) + nspaces = range(NUM_NSPACES) + for n in nspaces: + nspace = get_nspace(n) + + db[nspace] = {} + + for i in objects: + NAME = REP_NAME + "{num}".format(num=i) + LNAME = nspace + "-" + NAME + DDNAME = os.path.join(DATADIR, LNAME) + DDNAME += "__head" + + cmd = "rm -f " + DDNAME + logging.debug(cmd) + call(cmd, shell=True) + + if i == 1: + dataline = range(DATALINECOUNT) + else: + dataline = range(1) + fd = open(DDNAME, "w") + data = "This is the replicated data for " + LNAME + "\n" + for _ in dataline: + fd.write(data) + fd.close() + + cmd = "{path}/rados -p {pool} -N '{nspace}' put {name} {ddname}".format(pool=REP_POOL, name=NAME, ddname=DDNAME, nspace=nspace, path=CEPH_BIN) + logging.debug(cmd) + ret = call(cmd, shell=True, stderr=nullfd) + if ret != 0: + logging.critical("Rados put command failed with {ret}".format(ret=ret)) + return 1 + + db[nspace][NAME] = {} + + if i < ATTR_OBJS + 1: + keys = range(i) + else: + keys = range(0) + db[nspace][NAME]["xattr"] = {} + for k in keys: + if k == 0: + continue + mykey = "key{i}-{k}".format(i=i, k=k) + myval = "val{i}-{k}".format(i=i, k=k) + cmd = "{path}/rados -p {pool} -N '{nspace}' setxattr {name} {key} {val}".format(pool=REP_POOL, name=NAME, key=mykey, val=myval, nspace=nspace, path=CEPH_BIN) + logging.debug(cmd) + ret = call(cmd, shell=True) + if ret != 0: + logging.error("setxattr failed with {ret}".format(ret=ret)) + ERRORS += 1 + db[nspace][NAME]["xattr"][mykey] = myval + + # Create omap header in all objects but REPobject1 + if i < ATTR_OBJS + 1 and i != 1: + myhdr = "hdr{i}".format(i=i) + cmd = "{path}/rados -p {pool} -N '{nspace}' setomapheader {name} {hdr}".format(pool=REP_POOL, name=NAME, hdr=myhdr, nspace=nspace, path=CEPH_BIN) + logging.debug(cmd) + ret = call(cmd, shell=True) + if ret != 0: + logging.critical("setomapheader failed with {ret}".format(ret=ret)) + ERRORS += 1 + db[nspace][NAME]["omapheader"] = myhdr + + db[nspace][NAME]["omap"] = {} + for k in keys: + if k == 0: + continue + mykey = "okey{i}-{k}".format(i=i, k=k) + myval = "oval{i}-{k}".format(i=i, k=k) + cmd = "{path}/rados -p {pool} -N '{nspace}' setomapval {name} {key} {val}".format(pool=REP_POOL, name=NAME, key=mykey, val=myval, nspace=nspace, path=CEPH_BIN) + logging.debug(cmd) + ret = call(cmd, shell=True) + if ret != 0: + logging.critical("setomapval failed with {ret}".format(ret=ret)) + db[nspace][NAME]["omap"][mykey] = myval + + # Create some clones + cmd = "{path}/rados -p {pool} mksnap snap1".format(pool=REP_POOL, path=CEPH_BIN) + logging.debug(cmd) + call(cmd, shell=True) + + objects = range(1, NUM_CLONED_REP_OBJECTS + 1) + nspaces = range(NUM_NSPACES) + for n in nspaces: + nspace = get_nspace(n) + + for i in objects: + NAME = REP_NAME + "{num}".format(num=i) + LNAME = nspace + "-" + NAME + DDNAME = os.path.join(DATADIR, LNAME) + # First clone + CLONENAME = DDNAME + "__1" + DDNAME += "__head" + + cmd = "mv -f " + DDNAME + " " + CLONENAME + logging.debug(cmd) + call(cmd, shell=True) + + if i == 1: + dataline = range(DATALINECOUNT) + else: + dataline = range(1) + fd = open(DDNAME, "w") + data = "This is the replicated data after a snapshot for " + LNAME + "\n" + for _ in dataline: + fd.write(data) + fd.close() + + cmd = "{path}/rados -p {pool} -N '{nspace}' put {name} {ddname}".format(pool=REP_POOL, name=NAME, ddname=DDNAME, nspace=nspace, path=CEPH_BIN) + logging.debug(cmd) + ret = call(cmd, shell=True, stderr=nullfd) + if ret != 0: + logging.critical("Rados put command failed with {ret}".format(ret=ret)) + return 1 + + print("Creating {objs} objects in erasure coded pool".format(objs=(NUM_EC_OBJECTS*NUM_NSPACES))) + + objects = range(1, NUM_EC_OBJECTS + 1) + nspaces = range(NUM_NSPACES) + for n in nspaces: + nspace = get_nspace(n) + + for i in objects: + NAME = EC_NAME + "{num}".format(num=i) + LNAME = nspace + "-" + NAME + DDNAME = os.path.join(DATADIR, LNAME) + DDNAME += "__head" + + cmd = "rm -f " + DDNAME + logging.debug(cmd) + call(cmd, shell=True) + + if i == 1: + dataline = range(DATALINECOUNT) + else: + dataline = range(1) + fd = open(DDNAME, "w") + data = "This is the erasure coded data for " + LNAME + "\n" + for j in dataline: + fd.write(data) + fd.close() + + cmd = "{path}/rados -p {pool} -N '{nspace}' put {name} {ddname}".format(pool=EC_POOL, name=NAME, ddname=DDNAME, nspace=nspace, path=CEPH_BIN) + logging.debug(cmd) + ret = call(cmd, shell=True, stderr=nullfd) + if ret != 0: + logging.critical("Erasure coded pool creation failed with {ret}".format(ret=ret)) + return 1 + + db[nspace][NAME] = {} + + db[nspace][NAME]["xattr"] = {} + if i < ATTR_OBJS + 1: + keys = range(i) + else: + keys = range(0) + for k in keys: + if k == 0: + continue + mykey = "key{i}-{k}".format(i=i, k=k) + myval = "val{i}-{k}".format(i=i, k=k) + cmd = "{path}/rados -p {pool} -N '{nspace}' setxattr {name} {key} {val}".format(pool=EC_POOL, name=NAME, key=mykey, val=myval, nspace=nspace, path=CEPH_BIN) + logging.debug(cmd) + ret = call(cmd, shell=True) + if ret != 0: + logging.error("setxattr failed with {ret}".format(ret=ret)) + ERRORS += 1 + db[nspace][NAME]["xattr"][mykey] = myval + + # Omap isn't supported in EC pools + db[nspace][NAME]["omap"] = {} + + logging.debug(db) + + kill_daemons() + + if ERRORS: + logging.critical("Unable to set up test") + return 1 + + ALLREPPGS = get_pgs(OSDDIR, REPID) + logging.debug(ALLREPPGS) + ALLECPGS = get_pgs(OSDDIR, ECID) + logging.debug(ALLECPGS) + + OBJREPPGS = get_objs(ALLREPPGS, REP_NAME, OSDDIR, REPID) + logging.debug(OBJREPPGS) + OBJECPGS = get_objs(ALLECPGS, EC_NAME, OSDDIR, ECID) + logging.debug(OBJECPGS) + + ONEPG = ALLREPPGS[0] + logging.debug(ONEPG) + osds = get_osds(ONEPG, OSDDIR) + ONEOSD = osds[0] + logging.debug(ONEOSD) + + print("Test invalid parameters") + # On export can't use stdout to a terminal + cmd = (CFSD_PREFIX + "--op export --pgid {pg}").format(osd=ONEOSD, pg=ONEPG) + ERRORS += test_failure(cmd, "stdout is a tty and no --file filename specified", tty=True) + + # On export can't use stdout to a terminal + cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file -").format(osd=ONEOSD, pg=ONEPG) + ERRORS += test_failure(cmd, "stdout is a tty and no --file filename specified", tty=True) + + # Prep a valid ec export file for import failure tests + ONEECPG = ALLECPGS[0] + osds = get_osds(ONEECPG, OSDDIR) + ONEECOSD = osds[0] + OTHERFILE = "/tmp/foo.{pid}".format(pid=pid) + cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file {file}").format(osd=ONEECOSD, pg=ONEECPG, file=OTHERFILE) + logging.debug(cmd) + call(cmd, shell=True, stdout=nullfd, stderr=nullfd) + + os.unlink(OTHERFILE) + + # Prep a valid export file for import failure tests + OTHERFILE = "/tmp/foo.{pid}".format(pid=pid) + cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file {file}").format(osd=ONEOSD, pg=ONEPG, file=OTHERFILE) + logging.debug(cmd) + call(cmd, shell=True, stdout=nullfd, stderr=nullfd) + + # On import can't specify a different pgid than the file + TMPPG="{pool}.80".format(pool=REPID) + cmd = (CFSD_PREFIX + "--op import --pgid 12.dd --file {file}").format(osd=ONEOSD, pg=TMPPG, file=OTHERFILE) + ERRORS += test_failure(cmd, "specified pgid 12.dd does not match actual pgid") + + os.unlink(OTHERFILE) + cmd = (CFSD_PREFIX + "--op import --file {FOO}").format(osd=ONEOSD, FOO=OTHERFILE) + ERRORS += test_failure(cmd, "file: {FOO}: No such file or directory".format(FOO=OTHERFILE)) + + cmd = "{path}/ceph-objectstore-tool --no-mon-config --data-path BAD_DATA_PATH --op list".format(osd=ONEOSD, path=CEPH_BIN) + ERRORS += test_failure(cmd, "data-path: BAD_DATA_PATH: No such file or directory") + + cmd = (CFSD_PREFIX + "--journal-path BAD_JOURNAL_PATH --op list").format(osd=ONEOSD) + ERRORS += test_failure(cmd, "journal-path: BAD_JOURNAL_PATH: No such file or directory") + + cmd = (CFSD_PREFIX + "--journal-path /bin --op list").format(osd=ONEOSD) + ERRORS += test_failure(cmd, "journal-path: /bin: (21) Is a directory") + + # On import can't use stdin from a terminal + cmd = (CFSD_PREFIX + "--op import --pgid {pg}").format(osd=ONEOSD, pg=ONEPG) + ERRORS += test_failure(cmd, "stdin is a tty and no --file filename specified", tty=True) + + # On import can't use stdin from a terminal + cmd = (CFSD_PREFIX + "--op import --pgid {pg} --file -").format(osd=ONEOSD, pg=ONEPG) + ERRORS += test_failure(cmd, "stdin is a tty and no --file filename specified", tty=True) + + # Specify a bad --type + os.mkdir(OSDDIR + "/fakeosd") + cmd = ("{path}/ceph-objectstore-tool --no-mon-config --data-path " + OSDDIR + "/{osd} --type foobar --op list --pgid {pg}").format(osd="fakeosd", pg=ONEPG, path=CEPH_BIN) + ERRORS += test_failure(cmd, "Unable to create store of type foobar") + + # Don't specify a data-path + cmd = "{path}/ceph-objectstore-tool --no-mon-config --type memstore --op list --pgid {pg}".format(dir=OSDDIR, osd=ONEOSD, pg=ONEPG, path=CEPH_BIN) + ERRORS += test_failure(cmd, "Must provide --data-path") + + cmd = (CFSD_PREFIX + "--op remove --pgid 2.0").format(osd=ONEOSD) + ERRORS += test_failure(cmd, "Please use export-remove or you must use --force option") + + cmd = (CFSD_PREFIX + "--force --op remove").format(osd=ONEOSD) + ERRORS += test_failure(cmd, "Must provide pgid") + + # Don't secify a --op nor object command + cmd = CFSD_PREFIX.format(osd=ONEOSD) + ERRORS += test_failure(cmd, "Must provide --op or object command...") + + # Specify a bad --op command + cmd = (CFSD_PREFIX + "--op oops").format(osd=ONEOSD) + ERRORS += test_failure(cmd, "Must provide --op (info, log, remove, mkfs, fsck, repair, export, export-remove, import, list, fix-lost, list-pgs, dump-journal, dump-super, meta-list, get-osdmap, set-osdmap, get-inc-osdmap, set-inc-osdmap, mark-complete, reset-last-complete, dump-export, trim-pg-log, statfs)") + + # Provide just the object param not a command + cmd = (CFSD_PREFIX + "object").format(osd=ONEOSD) + ERRORS += test_failure(cmd, "Invalid syntax, missing command") + + # Provide an object name that doesn't exist + cmd = (CFSD_PREFIX + "NON_OBJECT get-bytes").format(osd=ONEOSD) + ERRORS += test_failure(cmd, "No object id 'NON_OBJECT' found") + + # Provide an invalid object command + cmd = (CFSD_PREFIX + "--pgid {pg} '' notacommand").format(osd=ONEOSD, pg=ONEPG) + ERRORS += test_failure(cmd, "Unknown object command 'notacommand'") + + cmd = (CFSD_PREFIX + "foo list-omap").format(osd=ONEOSD, pg=ONEPG) + ERRORS += test_failure(cmd, "No object id 'foo' found or invalid JSON specified") + + cmd = (CFSD_PREFIX + "'{{\"oid\":\"obj4\",\"key\":\"\",\"snapid\":-1,\"hash\":2826278768,\"max\":0,\"pool\":1,\"namespace\":\"\"}}' list-omap").format(osd=ONEOSD, pg=ONEPG) + ERRORS += test_failure(cmd, "Without --pgid the object '{\"oid\":\"obj4\",\"key\":\"\",\"snapid\":-1,\"hash\":2826278768,\"max\":0,\"pool\":1,\"namespace\":\"\"}' must be a JSON array") + + cmd = (CFSD_PREFIX + "'[]' list-omap").format(osd=ONEOSD, pg=ONEPG) + ERRORS += test_failure(cmd, "Object '[]' must be a JSON array with 2 elements") + + cmd = (CFSD_PREFIX + "'[\"1.0\"]' list-omap").format(osd=ONEOSD, pg=ONEPG) + ERRORS += test_failure(cmd, "Object '[\"1.0\"]' must be a JSON array with 2 elements") + + cmd = (CFSD_PREFIX + "'[\"1.0\", 5, 8, 9]' list-omap").format(osd=ONEOSD, pg=ONEPG) + ERRORS += test_failure(cmd, "Object '[\"1.0\", 5, 8, 9]' must be a JSON array with 2 elements") + + cmd = (CFSD_PREFIX + "'[1, 2]' list-omap").format(osd=ONEOSD, pg=ONEPG) + ERRORS += test_failure(cmd, "Object '[1, 2]' must be a JSON array with the first element a string") + + cmd = (CFSD_PREFIX + "'[\"1.3\",{{\"snapid\":\"not an int\"}}]' list-omap").format(osd=ONEOSD, pg=ONEPG) + ERRORS += test_failure(cmd, "Decode object JSON error: value type is 2 not 4") + + TMPFILE = r"/tmp/tmp.{pid}".format(pid=pid) + ALLPGS = OBJREPPGS + OBJECPGS + OSDS = get_osds(ALLPGS[0], OSDDIR) + osd = OSDS[0] + + print("Test all --op dump-journal") + ALLOSDS = [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0] + ERRORS += test_dump_journal(CFSD_PREFIX, ALLOSDS) + + # Test --op list and generate json for all objects + print("Test --op list variants") + + # retrieve all objects from all PGs + tmpfd = open(TMPFILE, "wb") + cmd = (CFSD_PREFIX + "--op list --format json").format(osd=osd) + logging.debug(cmd) + ret = call(cmd, shell=True, stdout=tmpfd) + if ret != 0: + logging.error("Bad exit status {ret} from {cmd}".format(ret=ret, cmd=cmd)) + ERRORS += 1 + tmpfd.close() + lines = get_lines(TMPFILE) + JSONOBJ = sorted(set(lines)) + (pgid, coll, jsondict) = json.loads(JSONOBJ[0])[0] + + # retrieve all objects in a given PG + tmpfd = open(OTHERFILE, "ab") + cmd = (CFSD_PREFIX + "--op list --pgid {pg} --format json").format(osd=osd, pg=pgid) + logging.debug(cmd) + ret = call(cmd, shell=True, stdout=tmpfd) + if ret != 0: + logging.error("Bad exit status {ret} from {cmd}".format(ret=ret, cmd=cmd)) + ERRORS += 1 + tmpfd.close() + lines = get_lines(OTHERFILE) + JSONOBJ = sorted(set(lines)) + (other_pgid, other_coll, other_jsondict) = json.loads(JSONOBJ[0])[0] + + if pgid != other_pgid or jsondict != other_jsondict or coll != other_coll: + logging.error("the first line of --op list is different " + "from the first line of --op list --pgid {pg}".format(pg=pgid)) + ERRORS += 1 + + # retrieve all objects with a given name in a given PG + tmpfd = open(OTHERFILE, "wb") + cmd = (CFSD_PREFIX + "--op list --pgid {pg} {object} --format json").format(osd=osd, pg=pgid, object=jsondict['oid']) + logging.debug(cmd) + ret = call(cmd, shell=True, stdout=tmpfd) + if ret != 0: + logging.error("Bad exit status {ret} from {cmd}".format(ret=ret, cmd=cmd)) + ERRORS += 1 + tmpfd.close() + lines = get_lines(OTHERFILE) + JSONOBJ = sorted(set(lines)) + (other_pgid, other_coll, other_jsondict) in json.loads(JSONOBJ[0])[0] + + if pgid != other_pgid or jsondict != other_jsondict or coll != other_coll: + logging.error("the first line of --op list is different " + "from the first line of --op list --pgid {pg} {object}".format(pg=pgid, object=jsondict['oid'])) + ERRORS += 1 + + print("Test --op list by generating json for all objects using default format") + for pg in ALLPGS: + OSDS = get_osds(pg, OSDDIR) + for osd in OSDS: + tmpfd = open(TMPFILE, "ab") + cmd = (CFSD_PREFIX + "--op list --pgid {pg}").format(osd=osd, pg=pg) + logging.debug(cmd) + ret = call(cmd, shell=True, stdout=tmpfd) + if ret != 0: + logging.error("Bad exit status {ret} from --op list request".format(ret=ret)) + ERRORS += 1 + + tmpfd.close() + lines = get_lines(TMPFILE) + JSONOBJ = sorted(set(lines)) + for JSON in JSONOBJ: + (pgid, jsondict) = json.loads(JSON) + # Skip clones for now + if jsondict['snapid'] != -2: + continue + db[jsondict['namespace']][jsondict['oid']]['json'] = json.dumps((pgid, jsondict)) + # print db[jsondict['namespace']][jsondict['oid']]['json'] + if jsondict['oid'].find(EC_NAME) == 0 and 'shard_id' not in jsondict: + logging.error("Malformed JSON {json}".format(json=JSON)) + ERRORS += 1 + + # Test get-bytes + print("Test get-bytes and set-bytes") + for nspace in db.keys(): + for basename in db[nspace].keys(): + file = os.path.join(DATADIR, nspace + "-" + basename + "__head") + JSON = db[nspace][basename]['json'] + GETNAME = "/tmp/getbytes.{pid}".format(pid=pid) + TESTNAME = "/tmp/testbytes.{pid}".format(pid=pid) + SETNAME = "/tmp/setbytes.{pid}".format(pid=pid) + BADNAME = "/tmp/badbytes.{pid}".format(pid=pid) + for pg in OBJREPPGS: + OSDS = get_osds(pg, OSDDIR) + for osd in OSDS: + DIR = os.path.join(OSDDIR, os.path.join(osd, os.path.join("current", "{pg}_head".format(pg=pg)))) + fnames = [f for f in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, f)) + and f.split("_")[0] == basename and f.split("_")[4] == nspace] + if not fnames: + continue + try: + os.unlink(GETNAME) + except: + pass + cmd = (CFSD_PREFIX + " --pgid {pg} '{json}' get-bytes {fname}").format(osd=osd, pg=pg, json=JSON, fname=GETNAME) + logging.debug(cmd) + ret = call(cmd, shell=True) + if ret != 0: + logging.error("Bad exit status {ret}".format(ret=ret)) + ERRORS += 1 + continue + cmd = "diff -q {file} {getfile}".format(file=file, getfile=GETNAME) + ret = call(cmd, shell=True) + if ret != 0: + logging.error("Data from get-bytes differ") + logging.debug("Got:") + cat_file(logging.DEBUG, GETNAME) + logging.debug("Expected:") + cat_file(logging.DEBUG, file) + ERRORS += 1 + fd = open(SETNAME, "w") + data = "put-bytes going into {file}\n".format(file=file) + fd.write(data) + fd.close() + cmd = (CFSD_PREFIX + "--pgid {pg} '{json}' set-bytes {sname}").format(osd=osd, pg=pg, json=JSON, sname=SETNAME) + logging.debug(cmd) + ret = call(cmd, shell=True) + if ret != 0: + logging.error("Bad exit status {ret} from set-bytes".format(ret=ret)) + ERRORS += 1 + fd = open(TESTNAME, "wb") + cmd = (CFSD_PREFIX + "--pgid {pg} '{json}' get-bytes -").format(osd=osd, pg=pg, json=JSON) + logging.debug(cmd) + ret = call(cmd, shell=True, stdout=fd) + fd.close() + if ret != 0: + logging.error("Bad exit status {ret} from get-bytes".format(ret=ret)) + ERRORS += 1 + cmd = "diff -q {setfile} {testfile}".format(setfile=SETNAME, testfile=TESTNAME) + logging.debug(cmd) + ret = call(cmd, shell=True) + if ret != 0: + logging.error("Data after set-bytes differ") + logging.debug("Got:") + cat_file(logging.DEBUG, TESTNAME) + logging.debug("Expected:") + cat_file(logging.DEBUG, SETNAME) + ERRORS += 1 + + # Use set-bytes with --dry-run and make sure contents haven't changed + fd = open(BADNAME, "w") + data = "Bad data for --dry-run in {file}\n".format(file=file) + fd.write(data) + fd.close() + cmd = (CFSD_PREFIX + "--dry-run --pgid {pg} '{json}' set-bytes {sname}").format(osd=osd, pg=pg, json=JSON, sname=BADNAME) + logging.debug(cmd) + ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd) + if ret != 0: + logging.error("Bad exit status {ret} from set-bytes --dry-run".format(ret=ret)) + ERRORS += 1 + fd = open(TESTNAME, "wb") + cmd = (CFSD_PREFIX + "--pgid {pg} '{json}' get-bytes -").format(osd=osd, pg=pg, json=JSON) + logging.debug(cmd) + ret = call(cmd, shell=True, stdout=fd) + fd.close() + if ret != 0: + logging.error("Bad exit status {ret} from get-bytes".format(ret=ret)) + ERRORS += 1 + cmd = "diff -q {setfile} {testfile}".format(setfile=SETNAME, testfile=TESTNAME) + logging.debug(cmd) + ret = call(cmd, shell=True) + if ret != 0: + logging.error("Data after set-bytes --dry-run changed!") + logging.debug("Got:") + cat_file(logging.DEBUG, TESTNAME) + logging.debug("Expected:") + cat_file(logging.DEBUG, SETNAME) + ERRORS += 1 + + fd = open(file, "rb") + cmd = (CFSD_PREFIX + "--pgid {pg} '{json}' set-bytes").format(osd=osd, pg=pg, json=JSON) + logging.debug(cmd) + ret = call(cmd, shell=True, stdin=fd) + if ret != 0: + logging.error("Bad exit status {ret} from set-bytes to restore object".format(ret=ret)) + ERRORS += 1 + fd.close() + + try: + os.unlink(GETNAME) + except: + pass + try: + os.unlink(TESTNAME) + except: + pass + try: + os.unlink(SETNAME) + except: + pass + try: + os.unlink(BADNAME) + except: + pass + + # Test get-attr, set-attr, rm-attr, get-omaphdr, set-omaphdr, get-omap, set-omap, rm-omap + print("Test get-attr, set-attr, rm-attr, get-omaphdr, set-omaphdr, get-omap, set-omap, rm-omap") + for nspace in db.keys(): + for basename in db[nspace].keys(): + file = os.path.join(DATADIR, nspace + "-" + basename + "__head") + JSON = db[nspace][basename]['json'] + for pg in OBJREPPGS: + OSDS = get_osds(pg, OSDDIR) + for osd in OSDS: + DIR = os.path.join(OSDDIR, os.path.join(osd, os.path.join("current", "{pg}_head".format(pg=pg)))) + fnames = [f for f in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, f)) + and f.split("_")[0] == basename and f.split("_")[4] == nspace] + if not fnames: + continue + for key, val in db[nspace][basename]["xattr"].items(): + attrkey = "_" + key + cmd = (CFSD_PREFIX + " '{json}' get-attr {key}").format(osd=osd, json=JSON, key=attrkey) + logging.debug(cmd) + getval = check_output(cmd, shell=True) + if getval != val: + logging.error("get-attr of key {key} returned wrong val: {get} instead of {orig}".format(key=attrkey, get=getval, orig=val)) + ERRORS += 1 + continue + # set-attr to bogus value "foobar" + cmd = ("echo -n foobar | " + CFSD_PREFIX + " --pgid {pg} '{json}' set-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey) + logging.debug(cmd) + ret = call(cmd, shell=True) + if ret != 0: + logging.error("Bad exit status {ret} from set-attr".format(ret=ret)) + ERRORS += 1 + continue + # Test set-attr with dry-run + cmd = ("echo -n dryrunbroken | " + CFSD_PREFIX + "--dry-run '{json}' set-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey) + logging.debug(cmd) + ret = call(cmd, shell=True, stdout=nullfd) + if ret != 0: + logging.error("Bad exit status {ret} from set-attr".format(ret=ret)) + ERRORS += 1 + continue + # Check the set-attr + cmd = (CFSD_PREFIX + " --pgid {pg} '{json}' get-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey) + logging.debug(cmd) + getval = check_output(cmd, shell=True) + if ret != 0: + logging.error("Bad exit status {ret} from get-attr".format(ret=ret)) + ERRORS += 1 + continue + if getval != "foobar": + logging.error("Check of set-attr failed because we got {val}".format(val=getval)) + ERRORS += 1 + continue + # Test rm-attr + cmd = (CFSD_PREFIX + "'{json}' rm-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey) + logging.debug(cmd) + ret = call(cmd, shell=True) + if ret != 0: + logging.error("Bad exit status {ret} from rm-attr".format(ret=ret)) + ERRORS += 1 + continue + # Check rm-attr with dry-run + cmd = (CFSD_PREFIX + "--dry-run '{json}' rm-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey) + logging.debug(cmd) + ret = call(cmd, shell=True, stdout=nullfd) + if ret != 0: + logging.error("Bad exit status {ret} from rm-attr".format(ret=ret)) + ERRORS += 1 + continue + cmd = (CFSD_PREFIX + "'{json}' get-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey) + logging.debug(cmd) + ret = call(cmd, shell=True, stderr=nullfd, stdout=nullfd) + if ret == 0: + logging.error("For rm-attr expect get-attr to fail, but it succeeded") + ERRORS += 1 + # Put back value + cmd = ("echo -n {val} | " + CFSD_PREFIX + " --pgid {pg} '{json}' set-attr {key}").format(osd=osd, pg=pg, json=JSON, key=attrkey, val=val) + logging.debug(cmd) + ret = call(cmd, shell=True) + if ret != 0: + logging.error("Bad exit status {ret} from set-attr".format(ret=ret)) + ERRORS += 1 + continue + + hdr = db[nspace][basename].get("omapheader", "") + cmd = (CFSD_PREFIX + "'{json}' get-omaphdr").format(osd=osd, json=JSON) + logging.debug(cmd) + gethdr = check_output(cmd, shell=True) + if gethdr != hdr: + logging.error("get-omaphdr was wrong: {get} instead of {orig}".format(get=gethdr, orig=hdr)) + ERRORS += 1 + continue + # set-omaphdr to bogus value "foobar" + cmd = ("echo -n foobar | " + CFSD_PREFIX + "'{json}' set-omaphdr").format(osd=osd, pg=pg, json=JSON) + logging.debug(cmd) + ret = call(cmd, shell=True) + if ret != 0: + logging.error("Bad exit status {ret} from set-omaphdr".format(ret=ret)) + ERRORS += 1 + continue + # Check the set-omaphdr + cmd = (CFSD_PREFIX + "'{json}' get-omaphdr").format(osd=osd, pg=pg, json=JSON) + logging.debug(cmd) + gethdr = check_output(cmd, shell=True) + if ret != 0: + logging.error("Bad exit status {ret} from get-omaphdr".format(ret=ret)) + ERRORS += 1 + continue + if gethdr != "foobar": + logging.error("Check of set-omaphdr failed because we got {val}".format(val=getval)) + ERRORS += 1 + continue + # Test dry-run with set-omaphdr + cmd = ("echo -n dryrunbroken | " + CFSD_PREFIX + "--dry-run '{json}' set-omaphdr").format(osd=osd, pg=pg, json=JSON) + logging.debug(cmd) + ret = call(cmd, shell=True, stdout=nullfd) + if ret != 0: + logging.error("Bad exit status {ret} from set-omaphdr".format(ret=ret)) + ERRORS += 1 + continue + # Put back value + cmd = ("echo -n {val} | " + CFSD_PREFIX + "'{json}' set-omaphdr").format(osd=osd, pg=pg, json=JSON, val=hdr) + logging.debug(cmd) + ret = call(cmd, shell=True) + if ret != 0: + logging.error("Bad exit status {ret} from set-omaphdr".format(ret=ret)) + ERRORS += 1 + continue + + for omapkey, val in db[nspace][basename]["omap"].items(): + cmd = (CFSD_PREFIX + " '{json}' get-omap {key}").format(osd=osd, json=JSON, key=omapkey) + logging.debug(cmd) + getval = check_output(cmd, shell=True) + if getval != val: + logging.error("get-omap of key {key} returned wrong val: {get} instead of {orig}".format(key=omapkey, get=getval, orig=val)) + ERRORS += 1 + continue + # set-omap to bogus value "foobar" + cmd = ("echo -n foobar | " + CFSD_PREFIX + " --pgid {pg} '{json}' set-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey) + logging.debug(cmd) + ret = call(cmd, shell=True) + if ret != 0: + logging.error("Bad exit status {ret} from set-omap".format(ret=ret)) + ERRORS += 1 + continue + # Check set-omap with dry-run + cmd = ("echo -n dryrunbroken | " + CFSD_PREFIX + "--dry-run --pgid {pg} '{json}' set-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey) + logging.debug(cmd) + ret = call(cmd, shell=True, stdout=nullfd) + if ret != 0: + logging.error("Bad exit status {ret} from set-omap".format(ret=ret)) + ERRORS += 1 + continue + # Check the set-omap + cmd = (CFSD_PREFIX + " --pgid {pg} '{json}' get-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey) + logging.debug(cmd) + getval = check_output(cmd, shell=True) + if ret != 0: + logging.error("Bad exit status {ret} from get-omap".format(ret=ret)) + ERRORS += 1 + continue + if getval != "foobar": + logging.error("Check of set-omap failed because we got {val}".format(val=getval)) + ERRORS += 1 + continue + # Test rm-omap + cmd = (CFSD_PREFIX + "'{json}' rm-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey) + logging.debug(cmd) + ret = call(cmd, shell=True) + if ret != 0: + logging.error("Bad exit status {ret} from rm-omap".format(ret=ret)) + ERRORS += 1 + # Check rm-omap with dry-run + cmd = (CFSD_PREFIX + "--dry-run '{json}' rm-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey) + logging.debug(cmd) + ret = call(cmd, shell=True, stdout=nullfd) + if ret != 0: + logging.error("Bad exit status {ret} from rm-omap".format(ret=ret)) + ERRORS += 1 + cmd = (CFSD_PREFIX + "'{json}' get-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey) + logging.debug(cmd) + ret = call(cmd, shell=True, stderr=nullfd, stdout=nullfd) + if ret == 0: + logging.error("For rm-omap expect get-omap to fail, but it succeeded") + ERRORS += 1 + # Put back value + cmd = ("echo -n {val} | " + CFSD_PREFIX + " --pgid {pg} '{json}' set-omap {key}").format(osd=osd, pg=pg, json=JSON, key=omapkey, val=val) + logging.debug(cmd) + ret = call(cmd, shell=True) + if ret != 0: + logging.error("Bad exit status {ret} from set-omap".format(ret=ret)) + ERRORS += 1 + continue + + # Test dump + print("Test dump") + for nspace in db.keys(): + for basename in db[nspace].keys(): + file = os.path.join(DATADIR, nspace + "-" + basename + "__head") + JSON = db[nspace][basename]['json'] + jsondict = json.loads(JSON) + for pg in OBJREPPGS: + OSDS = get_osds(pg, OSDDIR) + for osd in OSDS: + DIR = os.path.join(OSDDIR, os.path.join(osd, os.path.join("current", "{pg}_head".format(pg=pg)))) + fnames = [f for f in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, f)) + and f.split("_")[0] == basename and f.split("_")[4] == nspace] + if not fnames: + continue + if int(basename.split(REP_NAME)[1]) > int(NUM_CLONED_REP_OBJECTS): + continue + logging.debug("REPobject " + JSON) + cmd = (CFSD_PREFIX + " '{json}' dump | grep '\"snap\": 1,' > /dev/null").format(osd=osd, json=JSON) + logging.debug(cmd) + ret = call(cmd, shell=True) + if ret != 0: + logging.error("Invalid dump for {json}".format(json=JSON)) + ERRORS += 1 + if 'shard_id' in jsondict[1]: + logging.debug("ECobject " + JSON) + for pg in OBJECPGS: + OSDS = get_osds(pg, OSDDIR) + jsondict = json.loads(JSON) + for osd in OSDS: + DIR = os.path.join(OSDDIR, os.path.join(osd, os.path.join("current", "{pg}_head".format(pg=pg)))) + fnames = [f for f in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, f)) + and f.split("_")[0] == basename and f.split("_")[4] == nspace] + if not fnames: + continue + if int(basename.split(EC_NAME)[1]) > int(NUM_EC_OBJECTS): + continue + # Fix shard_id since we only have one json instance for each object + jsondict[1]['shard_id'] = int(pg.split('s')[1]) + cmd = (CFSD_PREFIX + " '{json}' dump | grep '\"hinfo\": [{{]' > /dev/null").format(osd=osd, json=json.dumps((pg, jsondict[1]))) + logging.debug(cmd) + ret = call(cmd, shell=True) + if ret != 0: + logging.error("Invalid dump for {json}".format(json=JSON)) + + print("Test list-attrs get-attr") + ATTRFILE = r"/tmp/attrs.{pid}".format(pid=pid) + VALFILE = r"/tmp/val.{pid}".format(pid=pid) + for nspace in db.keys(): + for basename in db[nspace].keys(): + file = os.path.join(DATADIR, nspace + "-" + basename) + JSON = db[nspace][basename]['json'] + jsondict = json.loads(JSON) + + if 'shard_id' in jsondict[1]: + logging.debug("ECobject " + JSON) + found = 0 + for pg in OBJECPGS: + OSDS = get_osds(pg, OSDDIR) + # Fix shard_id since we only have one json instance for each object + jsondict[1]['shard_id'] = int(pg.split('s')[1]) + JSON = json.dumps((pg, jsondict[1])) + for osd in OSDS: + cmd = (CFSD_PREFIX + " '{json}' get-attr hinfo_key").format(osd=osd, json=JSON) + logging.debug("TRY: " + cmd) + try: + out = check_output(cmd, shell=True, stderr=subprocess.STDOUT) + logging.debug("FOUND: {json} in {osd} has value '{val}'".format(osd=osd, json=JSON, val=out)) + found += 1 + except subprocess.CalledProcessError as e: + if "No such file or directory" not in e.output and "No data available" not in e.output: + raise + # Assuming k=2 m=1 for the default ec pool + if found != 3: + logging.error("{json} hinfo_key found {found} times instead of 3".format(json=JSON, found=found)) + ERRORS += 1 + + for pg in ALLPGS: + # Make sure rep obj with rep pg or ec obj with ec pg + if ('shard_id' in jsondict[1]) != (pg.find('s') > 0): + continue + if 'shard_id' in jsondict[1]: + # Fix shard_id since we only have one json instance for each object + jsondict[1]['shard_id'] = int(pg.split('s')[1]) + JSON = json.dumps((pg, jsondict[1])) + OSDS = get_osds(pg, OSDDIR) + for osd in OSDS: + DIR = os.path.join(OSDDIR, os.path.join(osd, os.path.join("current", "{pg}_head".format(pg=pg)))) + fnames = [f for f in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, f)) + and f.split("_")[0] == basename and f.split("_")[4] == nspace] + if not fnames: + continue + afd = open(ATTRFILE, "wb") + cmd = (CFSD_PREFIX + " '{json}' list-attrs").format(osd=osd, json=JSON) + logging.debug(cmd) + ret = call(cmd, shell=True, stdout=afd) + afd.close() + if ret != 0: + logging.error("list-attrs failed with {ret}".format(ret=ret)) + ERRORS += 1 + continue + keys = get_lines(ATTRFILE) + values = dict(db[nspace][basename]["xattr"]) + for key in keys: + if key == "_" or key == "snapset" or key == "hinfo_key": + continue + key = key.strip("_") + if key not in values: + logging.error("Unexpected key {key} present".format(key=key)) + ERRORS += 1 + continue + exp = values.pop(key) + vfd = open(VALFILE, "wb") + cmd = (CFSD_PREFIX + " '{json}' get-attr {key}").format(osd=osd, json=JSON, key="_" + key) + logging.debug(cmd) + ret = call(cmd, shell=True, stdout=vfd) + vfd.close() + if ret != 0: + logging.error("get-attr failed with {ret}".format(ret=ret)) + ERRORS += 1 + continue + lines = get_lines(VALFILE) + val = lines[0] + if exp != val: + logging.error("For key {key} got value {got} instead of {expected}".format(key=key, got=val, expected=exp)) + ERRORS += 1 + if len(values) != 0: + logging.error("Not all keys found, remaining keys:") + print(values) + + print("Test --op meta-list") + tmpfd = open(TMPFILE, "wb") + cmd = (CFSD_PREFIX + "--op meta-list").format(osd=ONEOSD) + logging.debug(cmd) + ret = call(cmd, shell=True, stdout=tmpfd) + if ret != 0: + logging.error("Bad exit status {ret} from --op meta-list request".format(ret=ret)) + ERRORS += 1 + + print("Test get-bytes on meta") + tmpfd.close() + lines = get_lines(TMPFILE) + JSONOBJ = sorted(set(lines)) + for JSON in JSONOBJ: + (pgid, jsondict) = json.loads(JSON) + if pgid != "meta": + logging.error("pgid incorrect for --op meta-list {pgid}".format(pgid=pgid)) + ERRORS += 1 + if jsondict['namespace'] != "": + logging.error("namespace non null --op meta-list {ns}".format(ns=jsondict['namespace'])) + ERRORS += 1 + logging.info(JSON) + try: + os.unlink(GETNAME) + except: + pass + cmd = (CFSD_PREFIX + "'{json}' get-bytes {fname}").format(osd=ONEOSD, json=JSON, fname=GETNAME) + logging.debug(cmd) + ret = call(cmd, shell=True) + if ret != 0: + logging.error("Bad exit status {ret}".format(ret=ret)) + ERRORS += 1 + + try: + os.unlink(GETNAME) + except: + pass + try: + os.unlink(TESTNAME) + except: + pass + + print("Test pg info") + for pg in ALLREPPGS + ALLECPGS: + for osd in get_osds(pg, OSDDIR): + cmd = (CFSD_PREFIX + "--op info --pgid {pg} | grep '\"pgid\": \"{pg}\"'").format(osd=osd, pg=pg) + logging.debug(cmd) + ret = call(cmd, shell=True, stdout=nullfd) + if ret != 0: + logging.error("Getting info failed for pg {pg} from {osd} with {ret}".format(pg=pg, osd=osd, ret=ret)) + ERRORS += 1 + + print("Test pg logging") + if len(ALLREPPGS + ALLECPGS) == len(OBJREPPGS + OBJECPGS): + logging.warning("All PGs have objects, so no log without modify entries") + for pg in ALLREPPGS + ALLECPGS: + for osd in get_osds(pg, OSDDIR): + tmpfd = open(TMPFILE, "wb") + cmd = (CFSD_PREFIX + "--op log --pgid {pg}").format(osd=osd, pg=pg) + logging.debug(cmd) + ret = call(cmd, shell=True, stdout=tmpfd) + if ret != 0: + logging.error("Getting log failed for pg {pg} from {osd} with {ret}".format(pg=pg, osd=osd, ret=ret)) + ERRORS += 1 + HASOBJ = pg in OBJREPPGS + OBJECPGS + MODOBJ = False + for line in get_lines(TMPFILE): + if line.find("modify") != -1: + MODOBJ = True + break + if HASOBJ != MODOBJ: + logging.error("Bad log for pg {pg} from {osd}".format(pg=pg, osd=osd)) + MSG = (HASOBJ and [""] or ["NOT "])[0] + print("Log should {msg}have a modify entry".format(msg=MSG)) + ERRORS += 1 + + try: + os.unlink(TMPFILE) + except: + pass + + print("Test list-pgs") + for osd in [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]: + + CHECK_PGS = get_osd_pgs(os.path.join(OSDDIR, osd), None) + CHECK_PGS = sorted(CHECK_PGS) + + cmd = (CFSD_PREFIX + "--op list-pgs").format(osd=osd) + logging.debug(cmd) + TEST_PGS = check_output(cmd, shell=True).split("\n") + TEST_PGS = sorted(TEST_PGS)[1:] # Skip extra blank line + + if TEST_PGS != CHECK_PGS: + logging.error("list-pgs got wrong result for osd.{osd}".format(osd=osd)) + logging.error("Expected {pgs}".format(pgs=CHECK_PGS)) + logging.error("Got {pgs}".format(pgs=TEST_PGS)) + ERRORS += 1 + + EXP_ERRORS = 0 + print("Test pg export --dry-run") + pg = ALLREPPGS[0] + osd = get_osds(pg, OSDDIR)[0] + fname = "/tmp/fname.{pid}".format(pid=pid) + cmd = (CFSD_PREFIX + "--dry-run --op export --pgid {pg} --file {file}").format(osd=osd, pg=pg, file=fname) + logging.debug(cmd) + ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd) + if ret != 0: + logging.error("Exporting --dry-run failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret)) + EXP_ERRORS += 1 + elif os.path.exists(fname): + logging.error("Exporting --dry-run created file") + EXP_ERRORS += 1 + + cmd = (CFSD_PREFIX + "--dry-run --op export --pgid {pg} > {file}").format(osd=osd, pg=pg, file=fname) + logging.debug(cmd) + ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd) + if ret != 0: + logging.error("Exporting --dry-run failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret)) + EXP_ERRORS += 1 + else: + outdata = get_lines(fname) + if len(outdata) > 0: + logging.error("Exporting --dry-run to stdout not empty") + logging.error("Data: " + outdata) + EXP_ERRORS += 1 + + os.mkdir(TESTDIR) + for osd in [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]: + os.mkdir(os.path.join(TESTDIR, osd)) + print("Test pg export") + for pg in ALLREPPGS + ALLECPGS: + for osd in get_osds(pg, OSDDIR): + mydir = os.path.join(TESTDIR, osd) + fname = os.path.join(mydir, pg) + if pg == ALLREPPGS[0]: + cmd = (CFSD_PREFIX + "--op export --pgid {pg} > {file}").format(osd=osd, pg=pg, file=fname) + elif pg == ALLREPPGS[1]: + cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file - > {file}").format(osd=osd, pg=pg, file=fname) + else: + cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file {file}").format(osd=osd, pg=pg, file=fname) + logging.debug(cmd) + ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd) + if ret != 0: + logging.error("Exporting failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret)) + EXP_ERRORS += 1 + + ERRORS += EXP_ERRORS + + print("Test clear-data-digest") + for nspace in db.keys(): + for basename in db[nspace].keys(): + JSON = db[nspace][basename]['json'] + cmd = (CFSD_PREFIX + "'{json}' clear-data-digest").format(osd='osd0', json=JSON) + logging.debug(cmd) + ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd) + if ret != 0: + logging.error("Clearing data digest failed for {json}".format(json=JSON)) + ERRORS += 1 + break + cmd = (CFSD_PREFIX + "'{json}' dump | grep '\"data_digest\": \"0xff'").format(osd='osd0', json=JSON) + logging.debug(cmd) + ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd) + if ret != 0: + logging.error("Data digest not cleared for {json}".format(json=JSON)) + ERRORS += 1 + break + break + break + + print("Test pg removal") + RM_ERRORS = 0 + for pg in ALLREPPGS + ALLECPGS: + for osd in get_osds(pg, OSDDIR): + # This should do nothing + cmd = (CFSD_PREFIX + "--op remove --pgid {pg} --dry-run").format(pg=pg, osd=osd) + logging.debug(cmd) + ret = call(cmd, shell=True, stdout=nullfd) + if ret != 0: + logging.error("Removing --dry-run failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret)) + RM_ERRORS += 1 + cmd = (CFSD_PREFIX + "--force --op remove --pgid {pg}").format(pg=pg, osd=osd) + logging.debug(cmd) + ret = call(cmd, shell=True, stdout=nullfd) + if ret != 0: + logging.error("Removing failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret)) + RM_ERRORS += 1 + + ERRORS += RM_ERRORS + + IMP_ERRORS = 0 + if EXP_ERRORS == 0 and RM_ERRORS == 0: + print("Test pg import") + for osd in [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]: + dir = os.path.join(TESTDIR, osd) + PGS = [f for f in os.listdir(dir) if os.path.isfile(os.path.join(dir, f))] + for pg in PGS: + file = os.path.join(dir, pg) + # Make sure this doesn't crash + cmd = (CFSD_PREFIX + "--op dump-export --file {file}").format(osd=osd, file=file) + logging.debug(cmd) + ret = call(cmd, shell=True, stdout=nullfd) + if ret != 0: + logging.error("Dump-export failed from {file} with {ret}".format(file=file, ret=ret)) + IMP_ERRORS += 1 + # This should do nothing + cmd = (CFSD_PREFIX + "--op import --file {file} --dry-run").format(osd=osd, file=file) + logging.debug(cmd) + ret = call(cmd, shell=True, stdout=nullfd) + if ret != 0: + logging.error("Import failed from {file} with {ret}".format(file=file, ret=ret)) + IMP_ERRORS += 1 + if pg == PGS[0]: + cmd = ("cat {file} |".format(file=file) + CFSD_PREFIX + "--op import").format(osd=osd) + elif pg == PGS[1]: + cmd = (CFSD_PREFIX + "--op import --file - --pgid {pg} < {file}").format(osd=osd, file=file, pg=pg) + else: + cmd = (CFSD_PREFIX + "--op import --file {file}").format(osd=osd, file=file) + logging.debug(cmd) + ret = call(cmd, shell=True, stdout=nullfd) + if ret != 0: + logging.error("Import failed from {file} with {ret}".format(file=file, ret=ret)) + IMP_ERRORS += 1 + else: + logging.warning("SKIPPING IMPORT TESTS DUE TO PREVIOUS FAILURES") + + ERRORS += IMP_ERRORS + logging.debug(cmd) + + if EXP_ERRORS == 0 and RM_ERRORS == 0 and IMP_ERRORS == 0: + print("Verify replicated import data") + data_errors, _ = check_data(DATADIR, TMPFILE, OSDDIR, REP_NAME) + ERRORS += data_errors + else: + logging.warning("SKIPPING CHECKING IMPORT DATA DUE TO PREVIOUS FAILURES") + + print("Test all --op dump-journal again") + ALLOSDS = [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0] + ERRORS += test_dump_journal(CFSD_PREFIX, ALLOSDS) + + vstart(new=False) + wait_for_health() + + if EXP_ERRORS == 0 and RM_ERRORS == 0 and IMP_ERRORS == 0: + print("Verify erasure coded import data") + ERRORS += verify(DATADIR, EC_POOL, EC_NAME, db) + # Check replicated data/xattr/omap using rados + print("Verify replicated import data using rados") + ERRORS += verify(DATADIR, REP_POOL, REP_NAME, db) + + if EXP_ERRORS == 0: + NEWPOOL = "rados-import-pool" + cmd = "{path}/ceph osd pool create {pool} 8".format(pool=NEWPOOL, path=CEPH_BIN) + logging.debug(cmd) + ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd) + + print("Test rados import") + first = True + for osd in [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]: + dir = os.path.join(TESTDIR, osd) + for pg in [f for f in os.listdir(dir) if os.path.isfile(os.path.join(dir, f))]: + if pg.find("{id}.".format(id=REPID)) != 0: + continue + file = os.path.join(dir, pg) + if first: + first = False + # This should do nothing + cmd = "{path}/rados import -p {pool} --dry-run {file}".format(pool=NEWPOOL, file=file, path=CEPH_BIN) + logging.debug(cmd) + ret = call(cmd, shell=True, stdout=nullfd) + if ret != 0: + logging.error("Rados import --dry-run failed from {file} with {ret}".format(file=file, ret=ret)) + ERRORS += 1 + cmd = "{path}/rados -p {pool} ls".format(pool=NEWPOOL, path=CEPH_BIN) + logging.debug(cmd) + data = check_output(cmd, shell=True) + if data: + logging.error("'{data}'".format(data=data)) + logging.error("Found objects after dry-run") + ERRORS += 1 + cmd = "{path}/rados import -p {pool} {file}".format(pool=NEWPOOL, file=file, path=CEPH_BIN) + logging.debug(cmd) + ret = call(cmd, shell=True, stdout=nullfd) + if ret != 0: + logging.error("Rados import failed from {file} with {ret}".format(file=file, ret=ret)) + ERRORS += 1 + cmd = "{path}/rados import -p {pool} --no-overwrite {file}".format(pool=NEWPOOL, file=file, path=CEPH_BIN) + logging.debug(cmd) + ret = call(cmd, shell=True, stdout=nullfd) + if ret != 0: + logging.error("Rados import --no-overwrite failed from {file} with {ret}".format(file=file, ret=ret)) + ERRORS += 1 + + ERRORS += verify(DATADIR, NEWPOOL, REP_NAME, db) + else: + logging.warning("SKIPPING IMPORT-RADOS TESTS DUE TO PREVIOUS FAILURES") + + # Clear directories of previous portion + call("/bin/rm -rf {dir}".format(dir=TESTDIR), shell=True) + call("/bin/rm -rf {dir}".format(dir=DATADIR), shell=True) + os.mkdir(TESTDIR) + os.mkdir(DATADIR) + + # Cause SPLIT_POOL to split and test import with object/log filtering + print("Testing import all objects after a split") + SPLIT_POOL = "split_pool" + PG_COUNT = 1 + SPLIT_OBJ_COUNT = 5 + SPLIT_NSPACE_COUNT = 2 + SPLIT_NAME = "split" + cmd = "{path}/ceph osd pool create {pool} {pg} {pg} replicated".format(pool=SPLIT_POOL, pg=PG_COUNT, path=CEPH_BIN) + logging.debug(cmd) + call(cmd, shell=True, stdout=nullfd, stderr=nullfd) + SPLITID = get_pool_id(SPLIT_POOL, nullfd) + pool_size = int(check_output("{path}/ceph osd pool get {pool} size".format(pool=SPLIT_POOL, path=CEPH_BIN), shell=True, stderr=nullfd).split(" ")[1]) + EXP_ERRORS = 0 + RM_ERRORS = 0 + IMP_ERRORS = 0 + + objects = range(1, SPLIT_OBJ_COUNT + 1) + nspaces = range(SPLIT_NSPACE_COUNT) + for n in nspaces: + nspace = get_nspace(n) + + for i in objects: + NAME = SPLIT_NAME + "{num}".format(num=i) + LNAME = nspace + "-" + NAME + DDNAME = os.path.join(DATADIR, LNAME) + DDNAME += "__head" + + cmd = "rm -f " + DDNAME + logging.debug(cmd) + call(cmd, shell=True) + + if i == 1: + dataline = range(DATALINECOUNT) + else: + dataline = range(1) + fd = open(DDNAME, "w") + data = "This is the split data for " + LNAME + "\n" + for _ in dataline: + fd.write(data) + fd.close() + + cmd = "{path}/rados -p {pool} -N '{nspace}' put {name} {ddname}".format(pool=SPLIT_POOL, name=NAME, ddname=DDNAME, nspace=nspace, path=CEPH_BIN) + logging.debug(cmd) + ret = call(cmd, shell=True, stderr=nullfd) + if ret != 0: + logging.critical("Rados put command failed with {ret}".format(ret=ret)) + return 1 + + wait_for_health() + kill_daemons() + + for osd in [f for f in os.listdir(OSDDIR) if os.path.isdir(os.path.join(OSDDIR, f)) and f.find("osd") == 0]: + os.mkdir(os.path.join(TESTDIR, osd)) + + pg = "{pool}.0".format(pool=SPLITID) + EXPORT_PG = pg + + export_osds = get_osds(pg, OSDDIR) + for osd in export_osds: + mydir = os.path.join(TESTDIR, osd) + fname = os.path.join(mydir, pg) + cmd = (CFSD_PREFIX + "--op export --pgid {pg} --file {file}").format(osd=osd, pg=pg, file=fname) + logging.debug(cmd) + ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd) + if ret != 0: + logging.error("Exporting failed for pg {pg} on {osd} with {ret}".format(pg=pg, osd=osd, ret=ret)) + EXP_ERRORS += 1 + + ERRORS += EXP_ERRORS + + if EXP_ERRORS == 0: + vstart(new=False) + wait_for_health() + + cmd = "{path}/ceph osd pool set {pool} pg_num 2".format(pool=SPLIT_POOL, path=CEPH_BIN) + logging.debug(cmd) + ret = call(cmd, shell=True, stdout=nullfd, stderr=nullfd) + time.sleep(5) + wait_for_health() + + kill_daemons() + + # Now 2 PGs, poolid.0 and poolid.1 + # make note of pgs before we remove the pgs... + osds = get_osds("{pool}.0".format(pool=SPLITID), OSDDIR); + for seed in range(2): + pg = "{pool}.{seed}".format(pool=SPLITID, seed=seed) + + for osd in osds: + cmd = (CFSD_PREFIX + "--force --op remove --pgid {pg}").format(pg=pg, osd=osd) + logging.debug(cmd) + ret = call(cmd, shell=True, stdout=nullfd) + + which = 0 + for osd in osds: + # This is weird. The export files are based on only the EXPORT_PG + # and where that pg was before the split. Use 'which' to use all + # export copies in import. + mydir = os.path.join(TESTDIR, export_osds[which]) + fname = os.path.join(mydir, EXPORT_PG) + which += 1 + cmd = (CFSD_PREFIX + "--op import --pgid {pg} --file {file}").format(osd=osd, pg=EXPORT_PG, file=fname) + logging.debug(cmd) + ret = call(cmd, shell=True, stdout=nullfd) + if ret != 0: + logging.error("Import failed from {file} with {ret}".format(file=file, ret=ret)) + IMP_ERRORS += 1 + + ERRORS += IMP_ERRORS + + # Start up again to make sure imports didn't corrupt anything + if IMP_ERRORS == 0: + print("Verify split import data") + data_errors, count = check_data(DATADIR, TMPFILE, OSDDIR, SPLIT_NAME) + ERRORS += data_errors + if count != (SPLIT_OBJ_COUNT * SPLIT_NSPACE_COUNT * pool_size): + logging.error("Incorrect number of replicas seen {count}".format(count=count)) + ERRORS += 1 + vstart(new=False) + wait_for_health() + + call("/bin/rm -rf {dir}".format(dir=TESTDIR), shell=True) + call("/bin/rm -rf {dir}".format(dir=DATADIR), shell=True) + + ERRORS += test_removeall(CFSD_PREFIX, db, OBJREPPGS, REP_POOL, CEPH_BIN, OSDDIR, REP_NAME, NUM_CLONED_REP_OBJECTS) + + # vstart() starts 4 OSDs + ERRORS += test_get_set_osdmap(CFSD_PREFIX, list(range(4)), ALLOSDS) + ERRORS += test_get_set_inc_osdmap(CFSD_PREFIX, ALLOSDS[0]) + + kill_daemons() + CORES = [f for f in os.listdir(CEPH_DIR) if f.startswith("core.")] + if CORES: + CORE_DIR = os.path.join("/tmp", "cores.{pid}".format(pid=os.getpid())) + os.mkdir(CORE_DIR) + call("/bin/mv {ceph_dir}/core.* {core_dir}".format(ceph_dir=CEPH_DIR, core_dir=CORE_DIR), shell=True) + logging.error("Failure due to cores found") + logging.error("See {core_dir} for cores".format(core_dir=CORE_DIR)) + ERRORS += len(CORES) + + if ERRORS == 0: + print("TEST PASSED") + return 0 + else: + print("TEST FAILED WITH {errcount} ERRORS".format(errcount=ERRORS)) + return 1 + + +def remove_btrfs_subvolumes(path): + if platform.system() == "FreeBSD": + return + result = subprocess.Popen("stat -f -c '%%T' %s" % path, shell=True, stdout=subprocess.PIPE) + for line in result.stdout: + filesystem = decode(line).rstrip('\n') + if filesystem == "btrfs": + result = subprocess.Popen("sudo btrfs subvolume list %s" % path, shell=True, stdout=subprocess.PIPE) + for line in result.stdout: + subvolume = decode(line).split()[8] + # extracting the relative volume name + m = re.search(".*(%s.*)" % path, subvolume) + if m: + found = m.group(1) + call("sudo btrfs subvolume delete %s" % found, shell=True) + + +if __name__ == "__main__": + status = 1 + try: + status = main(sys.argv[1:]) + finally: + kill_daemons() + os.chdir(CEPH_BUILD_DIR) + remove_btrfs_subvolumes(CEPH_DIR) + call("/bin/rm -fr {dir}".format(dir=CEPH_DIR), shell=True) + sys.exit(status) diff --git a/qa/standalone/special/test-failure.sh b/qa/standalone/special/test-failure.sh new file mode 100755 index 00000000..cede887d --- /dev/null +++ b/qa/standalone/special/test-failure.sh @@ -0,0 +1,48 @@ +#!/usr/bin/env bash +set -ex + +source $CEPH_ROOT/qa/standalone/ceph-helpers.sh + +function run() { + local dir=$1 + shift + + export CEPH_MON="127.0.0.1:7202" # git grep '\<7202\>' : there must be only one + export CEPH_ARGS + CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none " + CEPH_ARGS+="--mon-host=$CEPH_MON " + + local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')} + for func in $funcs ; do + setup $dir || return 1 + $func $dir || return 1 + teardown $dir || return 1 + done +} + +function TEST_failure_log() { + local dir=$1 + + cat > $dir/test_failure.log << EOF +This is a fake log file +* +* +* +* +* +This ends the fake log file +EOF + + # Test fails + return 1 +} + +function TEST_failure_core_only() { + local dir=$1 + + run_mon $dir a || return 1 + kill_daemons $dir SEGV mon 5 + return 0 +} + +main test_failure "$@" diff --git a/qa/suites/.qa b/qa/suites/.qa new file mode 120000 index 00000000..b870225a --- /dev/null +++ b/qa/suites/.qa @@ -0,0 +1 @@ +../ \ No newline at end of file diff --git a/qa/suites/big/.qa b/qa/suites/big/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/big/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/big/rados-thrash/% b/qa/suites/big/rados-thrash/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/big/rados-thrash/.qa b/qa/suites/big/rados-thrash/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/big/rados-thrash/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/big/rados-thrash/ceph/.qa b/qa/suites/big/rados-thrash/ceph/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/big/rados-thrash/ceph/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/big/rados-thrash/ceph/ceph.yaml b/qa/suites/big/rados-thrash/ceph/ceph.yaml new file mode 100644 index 00000000..2030acb9 --- /dev/null +++ b/qa/suites/big/rados-thrash/ceph/ceph.yaml @@ -0,0 +1,3 @@ +tasks: +- install: +- ceph: diff --git a/qa/suites/big/rados-thrash/clusters/.qa b/qa/suites/big/rados-thrash/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/big/rados-thrash/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/big/rados-thrash/clusters/big.yaml b/qa/suites/big/rados-thrash/clusters/big.yaml new file mode 100644 index 00000000..fd8c2174 --- /dev/null +++ b/qa/suites/big/rados-thrash/clusters/big.yaml @@ -0,0 +1,68 @@ +roles: +- [osd.0, osd.1, osd.2, client.0, mon.a, mgr.x] +- [osd.3, osd.4, osd.5, client.1, mon.b, mgr.y] +- [osd.6, osd.7, osd.8, client.2, mon.c, mgr.z] +- [osd.9, osd.10, osd.11, client.3, mon.d] +- [osd.12, osd.13, osd.14, client.4, mon.e] +- [osd.15, osd.16, osd.17, client.5] +- [osd.18, osd.19, osd.20, client.6] +- [osd.21, osd.22, osd.23, client.7] +- [osd.24, osd.25, osd.26, client.8] +- [osd.27, osd.28, osd.29, client.9] +- [osd.30, osd.31, osd.32, client.10] +- [osd.33, osd.34, osd.35, client.11] +- [osd.36, osd.37, osd.38, client.12] +- [osd.39, osd.40, osd.41, client.13] +- [osd.42, osd.43, osd.44, client.14] +- [osd.45, osd.46, osd.47, client.15] +- [osd.48, osd.49, osd.50, client.16] +- [osd.51, osd.52, osd.53, client.17] +- [osd.54, osd.55, osd.56, client.18] +- [osd.57, osd.58, osd.59, client.19] +- [osd.60, osd.61, osd.62, client.20] +- [osd.63, osd.64, osd.65, client.21] +- [osd.66, osd.67, osd.68, client.22] +- [osd.69, osd.70, osd.71, client.23] +- [osd.72, osd.73, osd.74, client.24] +- [osd.75, osd.76, osd.77, client.25] +- [osd.78, osd.79, osd.80, client.26] +- [osd.81, osd.82, osd.83, client.27] +- [osd.84, osd.85, osd.86, client.28] +- [osd.87, osd.88, osd.89, client.29] +- [osd.90, osd.91, osd.92, client.30] +- [osd.93, osd.94, osd.95, client.31] +- [osd.96, osd.97, osd.98, client.32] +- [osd.99, osd.100, osd.101, client.33] +- [osd.102, osd.103, osd.104, client.34] +- [osd.105, osd.106, osd.107, client.35] +- [osd.108, osd.109, osd.110, client.36] +- [osd.111, osd.112, osd.113, client.37] +- [osd.114, osd.115, osd.116, client.38] +- [osd.117, osd.118, osd.119, client.39] +- [osd.120, osd.121, osd.122, client.40] +- [osd.123, osd.124, osd.125, client.41] +- [osd.126, osd.127, osd.128, client.42] +- [osd.129, osd.130, osd.131, client.43] +- [osd.132, osd.133, osd.134, client.44] +- [osd.135, osd.136, osd.137, client.45] +- [osd.138, osd.139, osd.140, client.46] +- [osd.141, osd.142, osd.143, client.47] +- [osd.144, osd.145, osd.146, client.48] +- [osd.147, osd.148, osd.149, client.49] +- [osd.150, osd.151, osd.152, client.50] +#- [osd.153, osd.154, osd.155, client.51] +#- [osd.156, osd.157, osd.158, client.52] +#- [osd.159, osd.160, osd.161, client.53] +#- [osd.162, osd.163, osd.164, client.54] +#- [osd.165, osd.166, osd.167, client.55] +#- [osd.168, osd.169, osd.170, client.56] +#- [osd.171, osd.172, osd.173, client.57] +#- [osd.174, osd.175, osd.176, client.58] +#- [osd.177, osd.178, osd.179, client.59] +#- [osd.180, osd.181, osd.182, client.60] +#- [osd.183, osd.184, osd.185, client.61] +#- [osd.186, osd.187, osd.188, client.62] +#- [osd.189, osd.190, osd.191, client.63] +#- [osd.192, osd.193, osd.194, client.64] +#- [osd.195, osd.196, osd.197, client.65] +#- [osd.198, osd.199, osd.200, client.66] diff --git a/qa/suites/big/rados-thrash/clusters/medium.yaml b/qa/suites/big/rados-thrash/clusters/medium.yaml new file mode 100644 index 00000000..ecded01b --- /dev/null +++ b/qa/suites/big/rados-thrash/clusters/medium.yaml @@ -0,0 +1,22 @@ +roles: +- [osd.0, osd.1, osd.2, client.0, mon.a, mgr.x] +- [osd.3, osd.4, osd.5, client.1, mon.b, mgr.y] +- [osd.6, osd.7, osd.8, client.2, mon.c, mgr.z] +- [osd.9, osd.10, osd.11, client.3, mon.d] +- [osd.12, osd.13, osd.14, client.4, mon.e] +- [osd.15, osd.16, osd.17, client.5] +- [osd.18, osd.19, osd.20, client.6] +- [osd.21, osd.22, osd.23, client.7] +- [osd.24, osd.25, osd.26, client.8] +- [osd.27, osd.28, osd.29, client.9] +- [osd.30, osd.31, osd.32, client.10] +- [osd.33, osd.34, osd.35, client.11] +- [osd.36, osd.37, osd.38, client.12] +- [osd.39, osd.40, osd.41, client.13] +- [osd.42, osd.43, osd.44, client.14] +- [osd.45, osd.46, osd.47, client.15] +- [osd.48, osd.49, osd.50, client.16] +- [osd.51, osd.52, osd.53, client.17] +- [osd.54, osd.55, osd.56, client.18] +- [osd.57, osd.58, osd.59, client.19] +- [osd.60, osd.61, osd.62, client.20] diff --git a/qa/suites/big/rados-thrash/clusters/small.yaml b/qa/suites/big/rados-thrash/clusters/small.yaml new file mode 100644 index 00000000..d0aecd00 --- /dev/null +++ b/qa/suites/big/rados-thrash/clusters/small.yaml @@ -0,0 +1,6 @@ +roles: +- [osd.0, osd.1, osd.2, client.0, mon.a, mgr.x] +- [osd.3, osd.4, osd.5, client.1, mon.b, mgr.y] +- [osd.6, osd.7, osd.8, client.2, mon.c, mgr.z] +- [osd.9, osd.10, osd.11, client.3, mon.d] +- [osd.12, osd.13, osd.14, client.4, mon.e] diff --git a/qa/suites/big/rados-thrash/objectstore b/qa/suites/big/rados-thrash/objectstore new file mode 120000 index 00000000..c40bd326 --- /dev/null +++ b/qa/suites/big/rados-thrash/objectstore @@ -0,0 +1 @@ +.qa/objectstore \ No newline at end of file diff --git a/qa/suites/big/rados-thrash/openstack.yaml b/qa/suites/big/rados-thrash/openstack.yaml new file mode 100644 index 00000000..4d6edcd0 --- /dev/null +++ b/qa/suites/big/rados-thrash/openstack.yaml @@ -0,0 +1,8 @@ +openstack: + - machine: + disk: 40 # GB + ram: 8000 # MB + cpus: 1 + volumes: # attached to each instance + count: 3 + size: 10 # GB diff --git a/qa/suites/big/rados-thrash/thrashers/.qa b/qa/suites/big/rados-thrash/thrashers/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/big/rados-thrash/thrashers/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/big/rados-thrash/thrashers/default.yaml b/qa/suites/big/rados-thrash/thrashers/default.yaml new file mode 100644 index 00000000..8f2b2667 --- /dev/null +++ b/qa/suites/big/rados-thrash/thrashers/default.yaml @@ -0,0 +1,11 @@ +overrides: + ceph: + log-whitelist: + - but it is still running + - objects unfound and apparently lost +tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgnum_shrink: 1 + chance_pgpnum_fix: 1 diff --git a/qa/suites/big/rados-thrash/workloads/.qa b/qa/suites/big/rados-thrash/workloads/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/big/rados-thrash/workloads/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/big/rados-thrash/workloads/snaps-few-objects.yaml b/qa/suites/big/rados-thrash/workloads/snaps-few-objects.yaml new file mode 100644 index 00000000..b73bb678 --- /dev/null +++ b/qa/suites/big/rados-thrash/workloads/snaps-few-objects.yaml @@ -0,0 +1,13 @@ +tasks: +- rados: + ops: 4000 + max_seconds: 3600 + objects: 50 + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 diff --git a/qa/suites/buildpackages/.qa b/qa/suites/buildpackages/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/buildpackages/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/buildpackages/any/% b/qa/suites/buildpackages/any/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/buildpackages/any/.qa b/qa/suites/buildpackages/any/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/buildpackages/any/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/buildpackages/any/distros b/qa/suites/buildpackages/any/distros new file mode 120000 index 00000000..0e1f1303 --- /dev/null +++ b/qa/suites/buildpackages/any/distros @@ -0,0 +1 @@ +.qa/distros/all \ No newline at end of file diff --git a/qa/suites/buildpackages/any/tasks/.qa b/qa/suites/buildpackages/any/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/buildpackages/any/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/buildpackages/any/tasks/release.yaml b/qa/suites/buildpackages/any/tasks/release.yaml new file mode 100644 index 00000000..d7a3b62c --- /dev/null +++ b/qa/suites/buildpackages/any/tasks/release.yaml @@ -0,0 +1,8 @@ +# --suite buildpackages/any --ceph v10.0.1 --filter centos_7,ubuntu_14.04 +roles: + - [client.0] +tasks: + - install: + - exec: + client.0: + - ceph --version | grep 'version ' diff --git a/qa/suites/buildpackages/tests/% b/qa/suites/buildpackages/tests/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/buildpackages/tests/.qa b/qa/suites/buildpackages/tests/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/buildpackages/tests/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/buildpackages/tests/distros b/qa/suites/buildpackages/tests/distros new file mode 120000 index 00000000..0e1f1303 --- /dev/null +++ b/qa/suites/buildpackages/tests/distros @@ -0,0 +1 @@ +.qa/distros/all \ No newline at end of file diff --git a/qa/suites/buildpackages/tests/tasks/.qa b/qa/suites/buildpackages/tests/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/buildpackages/tests/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/buildpackages/tests/tasks/release.yaml b/qa/suites/buildpackages/tests/tasks/release.yaml new file mode 100644 index 00000000..05e87789 --- /dev/null +++ b/qa/suites/buildpackages/tests/tasks/release.yaml @@ -0,0 +1,20 @@ +# --suite buildpackages/tests --ceph v10.0.1 --filter centos_7.2,ubuntu_14.04 +overrides: + ansible.cephlab: + playbook: users.yml + buildpackages: + good_machine: + disk: 20 # GB + ram: 2000 # MB + cpus: 2 + min_machine: + disk: 10 # GB + ram: 1000 # MB + cpus: 1 +roles: + - [client.0] +tasks: + - install: + - exec: + client.0: + - ceph --version | grep 'version ' diff --git a/qa/suites/ceph-ansible/.qa b/qa/suites/ceph-ansible/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/ceph-ansible/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/ceph-ansible/smoke/.qa b/qa/suites/ceph-ansible/smoke/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/ceph-ansible/smoke/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/ceph-ansible/smoke/basic/% b/qa/suites/ceph-ansible/smoke/basic/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/ceph-ansible/smoke/basic/.qa b/qa/suites/ceph-ansible/smoke/basic/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/ceph-ansible/smoke/basic/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/ceph-ansible/smoke/basic/0-clusters/.qa b/qa/suites/ceph-ansible/smoke/basic/0-clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/ceph-ansible/smoke/basic/0-clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/ceph-ansible/smoke/basic/0-clusters/3-node.yaml b/qa/suites/ceph-ansible/smoke/basic/0-clusters/3-node.yaml new file mode 100644 index 00000000..86dd366b --- /dev/null +++ b/qa/suites/ceph-ansible/smoke/basic/0-clusters/3-node.yaml @@ -0,0 +1,12 @@ +meta: +- desc: | + 3-node cluster + install and run ceph-ansible on a mon.a node alone with ceph +roles: +- [mon.a, mds.a, osd.0, osd.1, osd.2] +- [mon.b, mgr.x, osd.3, osd.4, osd.5] +- [mon.c, mgr.y, osd.6, osd.7, osd.8, client.0] +openstack: +- volumes: # attached to each instance + count: 3 + size: 10 # GB diff --git a/qa/suites/ceph-ansible/smoke/basic/0-clusters/4-node.yaml b/qa/suites/ceph-ansible/smoke/basic/0-clusters/4-node.yaml new file mode 100644 index 00000000..b1754432 --- /dev/null +++ b/qa/suites/ceph-ansible/smoke/basic/0-clusters/4-node.yaml @@ -0,0 +1,13 @@ +meta: +- desc: | + 4-node cluster + install and run ceph-ansible on installer.0 stand alone node +roles: +- [mon.a, mds.a, osd.0, osd.1, osd.2] +- [mon.b, mgr.x, osd.3, osd.4, osd.5] +- [mon.c, mgr.y, osd.6, osd.7, osd.8, client.0] +- [installer.0] +openstack: +- volumes: # attached to each instance + count: 3 + size: 10 # GB diff --git a/qa/suites/ceph-ansible/smoke/basic/1-distros/.qa b/qa/suites/ceph-ansible/smoke/basic/1-distros/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/ceph-ansible/smoke/basic/1-distros/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/ceph-ansible/smoke/basic/1-distros/centos_latest.yaml b/qa/suites/ceph-ansible/smoke/basic/1-distros/centos_latest.yaml new file mode 120000 index 00000000..bd9854e7 --- /dev/null +++ b/qa/suites/ceph-ansible/smoke/basic/1-distros/centos_latest.yaml @@ -0,0 +1 @@ +.qa/distros/supported/centos_latest.yaml \ No newline at end of file diff --git a/qa/suites/ceph-ansible/smoke/basic/1-distros/ubuntu_latest.yaml b/qa/suites/ceph-ansible/smoke/basic/1-distros/ubuntu_latest.yaml new file mode 120000 index 00000000..3a09f9ab --- /dev/null +++ b/qa/suites/ceph-ansible/smoke/basic/1-distros/ubuntu_latest.yaml @@ -0,0 +1 @@ +.qa/distros/supported/ubuntu_latest.yaml \ No newline at end of file diff --git a/qa/suites/ceph-ansible/smoke/basic/2-ceph/.qa b/qa/suites/ceph-ansible/smoke/basic/2-ceph/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/ceph-ansible/smoke/basic/2-ceph/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/ceph-ansible/smoke/basic/2-ceph/ceph_ansible.yaml b/qa/suites/ceph-ansible/smoke/basic/2-ceph/ceph_ansible.yaml new file mode 100644 index 00000000..b13ab61a --- /dev/null +++ b/qa/suites/ceph-ansible/smoke/basic/2-ceph/ceph_ansible.yaml @@ -0,0 +1,35 @@ +meta: +- desc: "Build the ceph cluster using ceph-ansible" + +overrides: + ceph_ansible: + ansible-version: '2.9' + branch: stable-4.0 + vars: + ceph_conf_overrides: + global: + osd default pool size: 2 + mon pg warn min per osd: 2 + osd pool default pg num: 64 + osd pool default pgp num: 64 + mon_max_pg_per_osd: 1024 + ceph_test: true + ceph_stable_release: mimic + osd_scenario: lvm + journal_size: 1024 + osd_auto_discovery: false + ceph_origin: repository + ceph_repository: dev + ceph_mgr_modules: + - status + - restful + cephfs_pools: + - name: "cephfs_data" + pg_num: "64" + - name: "cephfs_metadata" + pg_num: "64" + dashboard_enabled: false +tasks: +- ssh-keys: +- ceph_ansible: +- install.ship_utilities: diff --git a/qa/suites/ceph-ansible/smoke/basic/3-config/.qa b/qa/suites/ceph-ansible/smoke/basic/3-config/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/ceph-ansible/smoke/basic/3-config/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/ceph-ansible/smoke/basic/3-config/bluestore_with_dmcrypt.yaml b/qa/suites/ceph-ansible/smoke/basic/3-config/bluestore_with_dmcrypt.yaml new file mode 100644 index 00000000..604e757a --- /dev/null +++ b/qa/suites/ceph-ansible/smoke/basic/3-config/bluestore_with_dmcrypt.yaml @@ -0,0 +1,8 @@ +meta: +- desc: "use bluestore + dmcrypt option" + +overrides: + ceph_ansible: + vars: + osd_objectstore: bluestore + dmcrypt: True diff --git a/qa/suites/ceph-ansible/smoke/basic/3-config/dmcrypt_off.yaml b/qa/suites/ceph-ansible/smoke/basic/3-config/dmcrypt_off.yaml new file mode 100644 index 00000000..4bbd1c7c --- /dev/null +++ b/qa/suites/ceph-ansible/smoke/basic/3-config/dmcrypt_off.yaml @@ -0,0 +1,7 @@ +meta: +- desc: "without dmcrypt" + +overrides: + ceph_ansible: + vars: + dmcrypt: False diff --git a/qa/suites/ceph-ansible/smoke/basic/3-config/dmcrypt_on.yaml b/qa/suites/ceph-ansible/smoke/basic/3-config/dmcrypt_on.yaml new file mode 100644 index 00000000..12d63d32 --- /dev/null +++ b/qa/suites/ceph-ansible/smoke/basic/3-config/dmcrypt_on.yaml @@ -0,0 +1,7 @@ +meta: +- desc: "use dmcrypt option" + +overrides: + ceph_ansible: + vars: + dmcrypt: True diff --git a/qa/suites/ceph-ansible/smoke/basic/4-tasks/.qa b/qa/suites/ceph-ansible/smoke/basic/4-tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/ceph-ansible/smoke/basic/4-tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/ceph-ansible/smoke/basic/4-tasks/ceph-admin-commands.yaml b/qa/suites/ceph-ansible/smoke/basic/4-tasks/ceph-admin-commands.yaml new file mode 100644 index 00000000..33642d5c --- /dev/null +++ b/qa/suites/ceph-ansible/smoke/basic/4-tasks/ceph-admin-commands.yaml @@ -0,0 +1,7 @@ +meta: +- desc: "Run ceph-admin-commands.sh" +tasks: +- workunit: + clients: + client.0: + - ceph-tests/ceph-admin-commands.sh diff --git a/qa/suites/ceph-ansible/smoke/basic/4-tasks/rbd_import_export.yaml b/qa/suites/ceph-ansible/smoke/basic/4-tasks/rbd_import_export.yaml new file mode 100644 index 00000000..9495934e --- /dev/null +++ b/qa/suites/ceph-ansible/smoke/basic/4-tasks/rbd_import_export.yaml @@ -0,0 +1,7 @@ +meta: +- desc: "Run the rbd import/export tests" +tasks: +- workunit: + clients: + client.0: + - rbd/import_export.sh diff --git a/qa/suites/ceph-ansible/smoke/basic/4-tasks/rest.yaml b/qa/suites/ceph-ansible/smoke/basic/4-tasks/rest.yaml new file mode 100644 index 00000000..8e389134 --- /dev/null +++ b/qa/suites/ceph-ansible/smoke/basic/4-tasks/rest.yaml @@ -0,0 +1,15 @@ +tasks: +- exec: + mgr.x: + - systemctl stop ceph-mgr.target + - sleep 5 + - ceph -s +- exec: + mon.a: + - ceph restful create-key admin + - ceph restful create-self-signed-cert + - ceph restful restart +- workunit: + clients: + client.0: + - rest/test-restful.sh diff --git a/qa/suites/ceph-deploy/% b/qa/suites/ceph-deploy/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/ceph-deploy/.qa b/qa/suites/ceph-deploy/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/ceph-deploy/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/ceph-deploy/cluster/.qa b/qa/suites/ceph-deploy/cluster/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/ceph-deploy/cluster/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/ceph-deploy/cluster/4node.yaml b/qa/suites/ceph-deploy/cluster/4node.yaml new file mode 100644 index 00000000..bf4a7f98 --- /dev/null +++ b/qa/suites/ceph-deploy/cluster/4node.yaml @@ -0,0 +1,15 @@ +overrides: + ansible.cephlab: + vars: + quick_lvs_to_create: 4 +openstack: + - machine: + disk: 10 + volumes: + count: 4 + size: 20 +roles: +- [mon.a, mgr.y, osd.0, osd.1] +- [mon.b, osd.2, osd.3] +- [mon.c, osd.4, osd.5] +- [mgr.x, client.0] diff --git a/qa/suites/ceph-deploy/config/.qa b/qa/suites/ceph-deploy/config/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/ceph-deploy/config/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/ceph-deploy/config/ceph_volume_bluestore.yaml b/qa/suites/ceph-deploy/config/ceph_volume_bluestore.yaml new file mode 100644 index 00000000..e484e612 --- /dev/null +++ b/qa/suites/ceph-deploy/config/ceph_volume_bluestore.yaml @@ -0,0 +1,7 @@ +overrides: + ceph-deploy: + use-ceph-volume: True + bluestore: True + conf: + osd: + bluestore fsck on mount: true diff --git a/qa/suites/ceph-deploy/config/ceph_volume_bluestore_dmcrypt.yaml b/qa/suites/ceph-deploy/config/ceph_volume_bluestore_dmcrypt.yaml new file mode 100644 index 00000000..d424b642 --- /dev/null +++ b/qa/suites/ceph-deploy/config/ceph_volume_bluestore_dmcrypt.yaml @@ -0,0 +1,8 @@ +overrides: + ceph-deploy: + use-ceph-volume: True + bluestore: True + dmcrypt: True + conf: + osd: + bluestore fsck on mount: true diff --git a/qa/suites/ceph-deploy/config/ceph_volume_dmcrypt_off.yaml b/qa/suites/ceph-deploy/config/ceph_volume_dmcrypt_off.yaml new file mode 100644 index 00000000..09701441 --- /dev/null +++ b/qa/suites/ceph-deploy/config/ceph_volume_dmcrypt_off.yaml @@ -0,0 +1,3 @@ +overrides: + ceph-deploy: + use-ceph-volume: True diff --git a/qa/suites/ceph-deploy/config/ceph_volume_filestore.yaml b/qa/suites/ceph-deploy/config/ceph_volume_filestore.yaml new file mode 100644 index 00000000..d7185631 --- /dev/null +++ b/qa/suites/ceph-deploy/config/ceph_volume_filestore.yaml @@ -0,0 +1,4 @@ +overrides: + ceph-deploy: + use-ceph-volume: True + filestore: True diff --git a/qa/suites/ceph-deploy/distros/.qa b/qa/suites/ceph-deploy/distros/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/ceph-deploy/distros/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/ceph-deploy/distros/centos_latest.yaml b/qa/suites/ceph-deploy/distros/centos_latest.yaml new file mode 120000 index 00000000..bd9854e7 --- /dev/null +++ b/qa/suites/ceph-deploy/distros/centos_latest.yaml @@ -0,0 +1 @@ +.qa/distros/supported/centos_latest.yaml \ No newline at end of file diff --git a/qa/suites/ceph-deploy/distros/ubuntu_latest.yaml b/qa/suites/ceph-deploy/distros/ubuntu_latest.yaml new file mode 120000 index 00000000..3a09f9ab --- /dev/null +++ b/qa/suites/ceph-deploy/distros/ubuntu_latest.yaml @@ -0,0 +1 @@ +.qa/distros/supported/ubuntu_latest.yaml \ No newline at end of file diff --git a/qa/suites/ceph-deploy/python_versions/.qa b/qa/suites/ceph-deploy/python_versions/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/ceph-deploy/python_versions/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/ceph-deploy/python_versions/python_2.yaml b/qa/suites/ceph-deploy/python_versions/python_2.yaml new file mode 100644 index 00000000..51c865bf --- /dev/null +++ b/qa/suites/ceph-deploy/python_versions/python_2.yaml @@ -0,0 +1,3 @@ +overrides: + ceph-deploy: + python_version: "2" diff --git a/qa/suites/ceph-deploy/python_versions/python_3.yaml b/qa/suites/ceph-deploy/python_versions/python_3.yaml new file mode 100644 index 00000000..22deecae --- /dev/null +++ b/qa/suites/ceph-deploy/python_versions/python_3.yaml @@ -0,0 +1,3 @@ +overrides: + ceph-deploy: + python_version: "3" diff --git a/qa/suites/ceph-deploy/tasks/.qa b/qa/suites/ceph-deploy/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/ceph-deploy/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/ceph-deploy/tasks/ceph-admin-commands.yaml b/qa/suites/ceph-deploy/tasks/ceph-admin-commands.yaml new file mode 100644 index 00000000..b7dbfe1a --- /dev/null +++ b/qa/suites/ceph-deploy/tasks/ceph-admin-commands.yaml @@ -0,0 +1,12 @@ +meta: +- desc: "test basic ceph admin commands" +tasks: +- ssh_keys: +- print: "**** done ssh_keys" +- ceph-deploy: +- print: "**** done ceph-deploy" +- workunit: + clients: + client.0: + - ceph-tests/ceph-admin-commands.sh +- print: "**** done ceph-tests/ceph-admin-commands.sh" diff --git a/qa/suites/ceph-deploy/tasks/rbd_import_export.yaml b/qa/suites/ceph-deploy/tasks/rbd_import_export.yaml new file mode 100644 index 00000000..1c09735a --- /dev/null +++ b/qa/suites/ceph-deploy/tasks/rbd_import_export.yaml @@ -0,0 +1,9 @@ +meta: +- desc: "Setup cluster using ceph-deploy, Run the rbd import/export tests" +tasks: +- ssh-keys: +- ceph-deploy: +- workunit: + clients: + client.0: + - rbd/import_export.sh diff --git a/qa/suites/cephmetrics/% b/qa/suites/cephmetrics/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/cephmetrics/.qa b/qa/suites/cephmetrics/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/cephmetrics/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/cephmetrics/0-clusters/.qa b/qa/suites/cephmetrics/0-clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/cephmetrics/0-clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/cephmetrics/0-clusters/3-node.yaml b/qa/suites/cephmetrics/0-clusters/3-node.yaml new file mode 100644 index 00000000..3935e7cc --- /dev/null +++ b/qa/suites/cephmetrics/0-clusters/3-node.yaml @@ -0,0 +1,11 @@ +meta: +- desc: "4-node cluster" +roles: +- [mon.a, mds.a, osd.0, osd.1, osd.2] +- [mon.b, mgr.x, osd.3, osd.4, osd.5] +- [mon.c, mgr.y, osd.6, osd.7, osd.8, client.0] +- [cephmetrics.0] +openstack: +- volumes: # attached to each instance + count: 3 + size: 10 # GB diff --git a/qa/suites/cephmetrics/1-distros/.qa b/qa/suites/cephmetrics/1-distros/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/cephmetrics/1-distros/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/cephmetrics/1-distros/centos_latest.yaml b/qa/suites/cephmetrics/1-distros/centos_latest.yaml new file mode 120000 index 00000000..bd9854e7 --- /dev/null +++ b/qa/suites/cephmetrics/1-distros/centos_latest.yaml @@ -0,0 +1 @@ +.qa/distros/supported/centos_latest.yaml \ No newline at end of file diff --git a/qa/suites/cephmetrics/1-distros/ubuntu_latest.yaml b/qa/suites/cephmetrics/1-distros/ubuntu_latest.yaml new file mode 120000 index 00000000..3a09f9ab --- /dev/null +++ b/qa/suites/cephmetrics/1-distros/ubuntu_latest.yaml @@ -0,0 +1 @@ +.qa/distros/supported/ubuntu_latest.yaml \ No newline at end of file diff --git a/qa/suites/cephmetrics/2-ceph/.qa b/qa/suites/cephmetrics/2-ceph/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/cephmetrics/2-ceph/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/cephmetrics/2-ceph/ceph_ansible.yaml b/qa/suites/cephmetrics/2-ceph/ceph_ansible.yaml new file mode 100644 index 00000000..309f5060 --- /dev/null +++ b/qa/suites/cephmetrics/2-ceph/ceph_ansible.yaml @@ -0,0 +1,32 @@ +meta: +- desc: "Build the ceph cluster using ceph-ansible" + +overrides: + ceph_ansible: + vars: + ceph_conf_overrides: + global: + osd default pool size: 2 + mon pg warn min per osd: 2 + osd pool default pg num: 64 + osd pool default pgp num: 64 + mon_max_pg_per_osd: 1024 + ceph_test: true + ceph_stable_release: luminous + osd_scenario: collocated + journal_size: 1024 + osd_auto_discovery: false + ceph_origin: repository + ceph_repository: dev + ceph_mgr_modules: + - status + - restful + cephfs_pools: + - name: "cephfs_data" + pg_num: "64" + - name: "cephfs_metadata" + pg_num: "64" +tasks: +- ssh-keys: +- ceph_ansible: +- install.ship_utilities: diff --git a/qa/suites/cephmetrics/3-ceph-config/.qa b/qa/suites/cephmetrics/3-ceph-config/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/cephmetrics/3-ceph-config/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/cephmetrics/3-ceph-config/bluestore_with_dmcrypt.yaml b/qa/suites/cephmetrics/3-ceph-config/bluestore_with_dmcrypt.yaml new file mode 100644 index 00000000..16db8ab2 --- /dev/null +++ b/qa/suites/cephmetrics/3-ceph-config/bluestore_with_dmcrypt.yaml @@ -0,0 +1,8 @@ +meta: +- desc: "use bluestore + dmcrypt" + +overrides: + ceph_ansible: + vars: + osd_objectstore: bluestore + dmcrypt: True diff --git a/qa/suites/cephmetrics/3-ceph-config/bluestore_without_dmcrypt.yaml b/qa/suites/cephmetrics/3-ceph-config/bluestore_without_dmcrypt.yaml new file mode 100644 index 00000000..fc879fc8 --- /dev/null +++ b/qa/suites/cephmetrics/3-ceph-config/bluestore_without_dmcrypt.yaml @@ -0,0 +1,8 @@ +meta: +- desc: "use bluestore without dmcrypt" + +overrides: + ceph_ansible: + vars: + osd_objectstore: bluestore + dmcrypt: False diff --git a/qa/suites/cephmetrics/3-ceph-config/dmcrypt_off.yaml b/qa/suites/cephmetrics/3-ceph-config/dmcrypt_off.yaml new file mode 100644 index 00000000..4bbd1c7c --- /dev/null +++ b/qa/suites/cephmetrics/3-ceph-config/dmcrypt_off.yaml @@ -0,0 +1,7 @@ +meta: +- desc: "without dmcrypt" + +overrides: + ceph_ansible: + vars: + dmcrypt: False diff --git a/qa/suites/cephmetrics/3-ceph-config/dmcrypt_on.yaml b/qa/suites/cephmetrics/3-ceph-config/dmcrypt_on.yaml new file mode 100644 index 00000000..519ad1d7 --- /dev/null +++ b/qa/suites/cephmetrics/3-ceph-config/dmcrypt_on.yaml @@ -0,0 +1,7 @@ +meta: +- desc: "with dmcrypt" + +overrides: + ceph_ansible: + vars: + dmcrypt: True diff --git a/qa/suites/cephmetrics/4-epel/.qa b/qa/suites/cephmetrics/4-epel/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/cephmetrics/4-epel/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/cephmetrics/4-epel/no_epel.yaml b/qa/suites/cephmetrics/4-epel/no_epel.yaml new file mode 100644 index 00000000..1538fd7f --- /dev/null +++ b/qa/suites/cephmetrics/4-epel/no_epel.yaml @@ -0,0 +1,7 @@ +meta: + - desc: "Without EPEL" +overrides: + cephmetrics: + group_vars: + all: + use_epel: false diff --git a/qa/suites/cephmetrics/4-epel/use_epel.yaml b/qa/suites/cephmetrics/4-epel/use_epel.yaml new file mode 100644 index 00000000..d496a43e --- /dev/null +++ b/qa/suites/cephmetrics/4-epel/use_epel.yaml @@ -0,0 +1,7 @@ +meta: + - desc: "Using EPEL" +overrides: + cephmetrics: + group_vars: + all: + use_epel: true diff --git a/qa/suites/cephmetrics/5-containers/.qa b/qa/suites/cephmetrics/5-containers/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/cephmetrics/5-containers/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/cephmetrics/5-containers/containerized.yaml b/qa/suites/cephmetrics/5-containers/containerized.yaml new file mode 100644 index 00000000..686de08a --- /dev/null +++ b/qa/suites/cephmetrics/5-containers/containerized.yaml @@ -0,0 +1,10 @@ +meta: + - desc: "Containerized prometheus and grafana" +overrides: + cephmetrics: + group_vars: + all: + prometheus: + containerized: true + grafana: + containerized: true diff --git a/qa/suites/cephmetrics/5-containers/no_containers.yaml b/qa/suites/cephmetrics/5-containers/no_containers.yaml new file mode 100644 index 00000000..29c69093 --- /dev/null +++ b/qa/suites/cephmetrics/5-containers/no_containers.yaml @@ -0,0 +1,10 @@ +meta: + - desc: "Packaged prometheus and grafana" +overrides: + cephmetrics: + group_vars: + all: + prometheus: + containerized: false + grafana: + containerized: false diff --git a/qa/suites/cephmetrics/6-tasks/.qa b/qa/suites/cephmetrics/6-tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/cephmetrics/6-tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/cephmetrics/6-tasks/cephmetrics.yaml b/qa/suites/cephmetrics/6-tasks/cephmetrics.yaml new file mode 100644 index 00000000..15f90394 --- /dev/null +++ b/qa/suites/cephmetrics/6-tasks/cephmetrics.yaml @@ -0,0 +1,4 @@ +meta: +- desc: "Deploy cephmetrics and run integration tests" +tasks: +- cephmetrics: diff --git a/qa/suites/dummy/% b/qa/suites/dummy/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/dummy/.qa b/qa/suites/dummy/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/dummy/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/dummy/all/.qa b/qa/suites/dummy/all/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/dummy/all/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/dummy/all/nop.yaml b/qa/suites/dummy/all/nop.yaml new file mode 100644 index 00000000..0f00ffc8 --- /dev/null +++ b/qa/suites/dummy/all/nop.yaml @@ -0,0 +1,6 @@ +roles: + - [mon.a, mgr.x, mds.a, osd.0, osd.1, client.0] + +tasks: + - nop: + diff --git a/qa/suites/experimental/.qa b/qa/suites/experimental/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/experimental/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/experimental/multimds/% b/qa/suites/experimental/multimds/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/experimental/multimds/.qa b/qa/suites/experimental/multimds/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/experimental/multimds/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/experimental/multimds/clusters/.qa b/qa/suites/experimental/multimds/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/experimental/multimds/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/experimental/multimds/clusters/7-multimds.yaml b/qa/suites/experimental/multimds/clusters/7-multimds.yaml new file mode 100644 index 00000000..7b2763f1 --- /dev/null +++ b/qa/suites/experimental/multimds/clusters/7-multimds.yaml @@ -0,0 +1,8 @@ +roles: +- [mon.a, mgr.x, mds.a, mds.d] +- [mon.b, mgr.y, mds.b, mds.e] +- [mon.c, mgr.z, mds.c, mds.f] +- [osd.0] +- [osd.1] +- [osd.2] +- [client.0] diff --git a/qa/suites/experimental/multimds/tasks/.qa b/qa/suites/experimental/multimds/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/experimental/multimds/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/experimental/multimds/tasks/fsstress_thrash_subtrees.yaml b/qa/suites/experimental/multimds/tasks/fsstress_thrash_subtrees.yaml new file mode 100644 index 00000000..bee01a83 --- /dev/null +++ b/qa/suites/experimental/multimds/tasks/fsstress_thrash_subtrees.yaml @@ -0,0 +1,15 @@ +tasks: +- install: +- ceph: + conf: + mds: + mds thrash exports: 1 + mds debug subtrees: 1 + mds debug scatterstat: 1 + mds verify scatter: 1 +- ceph-fuse: +- workunit: + clients: + client.0: + - suites/fsstress.sh + diff --git a/qa/suites/fs/.qa b/qa/suites/fs/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/32bits/% b/qa/suites/fs/32bits/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/fs/32bits/.qa b/qa/suites/fs/32bits/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/32bits/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/32bits/begin.yaml b/qa/suites/fs/32bits/begin.yaml new file mode 120000 index 00000000..311d404f --- /dev/null +++ b/qa/suites/fs/32bits/begin.yaml @@ -0,0 +1 @@ +.qa/cephfs/begin.yaml \ No newline at end of file diff --git a/qa/suites/fs/32bits/clusters/.qa b/qa/suites/fs/32bits/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/32bits/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/32bits/clusters/fixed-2-ucephfs.yaml b/qa/suites/fs/32bits/clusters/fixed-2-ucephfs.yaml new file mode 120000 index 00000000..b0c41a89 --- /dev/null +++ b/qa/suites/fs/32bits/clusters/fixed-2-ucephfs.yaml @@ -0,0 +1 @@ +.qa/cephfs/clusters/fixed-2-ucephfs.yaml \ No newline at end of file diff --git a/qa/suites/fs/32bits/conf b/qa/suites/fs/32bits/conf new file mode 120000 index 00000000..16e8cc44 --- /dev/null +++ b/qa/suites/fs/32bits/conf @@ -0,0 +1 @@ +.qa/cephfs/conf \ No newline at end of file diff --git a/qa/suites/fs/32bits/mount/.qa b/qa/suites/fs/32bits/mount/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/32bits/mount/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/32bits/mount/fuse.yaml b/qa/suites/fs/32bits/mount/fuse.yaml new file mode 120000 index 00000000..0e55da9f --- /dev/null +++ b/qa/suites/fs/32bits/mount/fuse.yaml @@ -0,0 +1 @@ +.qa/cephfs/mount/fuse.yaml \ No newline at end of file diff --git a/qa/suites/fs/32bits/objectstore-ec b/qa/suites/fs/32bits/objectstore-ec new file mode 120000 index 00000000..affe2949 --- /dev/null +++ b/qa/suites/fs/32bits/objectstore-ec @@ -0,0 +1 @@ +.qa/cephfs/objectstore-ec \ No newline at end of file diff --git a/qa/suites/fs/32bits/overrides/+ b/qa/suites/fs/32bits/overrides/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/fs/32bits/overrides/.qa b/qa/suites/fs/32bits/overrides/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/32bits/overrides/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/32bits/overrides/faked-ino.yaml b/qa/suites/fs/32bits/overrides/faked-ino.yaml new file mode 100644 index 00000000..102df684 --- /dev/null +++ b/qa/suites/fs/32bits/overrides/faked-ino.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + client: + client use faked inos: true diff --git a/qa/suites/fs/32bits/overrides/frag_enable.yaml b/qa/suites/fs/32bits/overrides/frag_enable.yaml new file mode 120000 index 00000000..34a39a36 --- /dev/null +++ b/qa/suites/fs/32bits/overrides/frag_enable.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/frag_enable.yaml \ No newline at end of file diff --git a/qa/suites/fs/32bits/overrides/whitelist_health.yaml b/qa/suites/fs/32bits/overrides/whitelist_health.yaml new file mode 120000 index 00000000..74f39a49 --- /dev/null +++ b/qa/suites/fs/32bits/overrides/whitelist_health.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/whitelist_health.yaml \ No newline at end of file diff --git a/qa/suites/fs/32bits/overrides/whitelist_wrongly_marked_down.yaml b/qa/suites/fs/32bits/overrides/whitelist_wrongly_marked_down.yaml new file mode 120000 index 00000000..b4528c0f --- /dev/null +++ b/qa/suites/fs/32bits/overrides/whitelist_wrongly_marked_down.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/whitelist_wrongly_marked_down.yaml \ No newline at end of file diff --git a/qa/suites/fs/32bits/supported-random-distros$ b/qa/suites/fs/32bits/supported-random-distros$ new file mode 120000 index 00000000..0862b445 --- /dev/null +++ b/qa/suites/fs/32bits/supported-random-distros$ @@ -0,0 +1 @@ +.qa/distros/supported-random-distro$ \ No newline at end of file diff --git a/qa/suites/fs/32bits/tasks/.qa b/qa/suites/fs/32bits/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/32bits/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/32bits/tasks/cfuse_workunit_suites_fsstress.yaml b/qa/suites/fs/32bits/tasks/cfuse_workunit_suites_fsstress.yaml new file mode 120000 index 00000000..c2e859ff --- /dev/null +++ b/qa/suites/fs/32bits/tasks/cfuse_workunit_suites_fsstress.yaml @@ -0,0 +1 @@ +.qa/cephfs/tasks/cfuse_workunit_suites_fsstress.yaml \ No newline at end of file diff --git a/qa/suites/fs/32bits/tasks/cfuse_workunit_suites_pjd.yaml b/qa/suites/fs/32bits/tasks/cfuse_workunit_suites_pjd.yaml new file mode 100644 index 00000000..37e315f7 --- /dev/null +++ b/qa/suites/fs/32bits/tasks/cfuse_workunit_suites_pjd.yaml @@ -0,0 +1,12 @@ +overrides: + ceph: + conf: + client: + fuse set user groups: true + fuse default permissions: false +tasks: +- workunit: + timeout: 6h + clients: + all: + - suites/pjd.sh diff --git a/qa/suites/fs/basic_functional/% b/qa/suites/fs/basic_functional/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/fs/basic_functional/.qa b/qa/suites/fs/basic_functional/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/basic_functional/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/basic_functional/begin.yaml b/qa/suites/fs/basic_functional/begin.yaml new file mode 120000 index 00000000..311d404f --- /dev/null +++ b/qa/suites/fs/basic_functional/begin.yaml @@ -0,0 +1 @@ +.qa/cephfs/begin.yaml \ No newline at end of file diff --git a/qa/suites/fs/basic_functional/clusters/.qa b/qa/suites/fs/basic_functional/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/basic_functional/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/basic_functional/clusters/1-mds-4-client-coloc.yaml b/qa/suites/fs/basic_functional/clusters/1-mds-4-client-coloc.yaml new file mode 120000 index 00000000..e5444ae2 --- /dev/null +++ b/qa/suites/fs/basic_functional/clusters/1-mds-4-client-coloc.yaml @@ -0,0 +1 @@ +.qa/cephfs/clusters/1-mds-4-client-coloc.yaml \ No newline at end of file diff --git a/qa/suites/fs/basic_functional/conf b/qa/suites/fs/basic_functional/conf new file mode 120000 index 00000000..16e8cc44 --- /dev/null +++ b/qa/suites/fs/basic_functional/conf @@ -0,0 +1 @@ +.qa/cephfs/conf \ No newline at end of file diff --git a/qa/suites/fs/basic_functional/mount/.qa b/qa/suites/fs/basic_functional/mount/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/basic_functional/mount/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/basic_functional/mount/fuse.yaml b/qa/suites/fs/basic_functional/mount/fuse.yaml new file mode 120000 index 00000000..0e55da9f --- /dev/null +++ b/qa/suites/fs/basic_functional/mount/fuse.yaml @@ -0,0 +1 @@ +.qa/cephfs/mount/fuse.yaml \ No newline at end of file diff --git a/qa/suites/fs/basic_functional/objectstore/.qa b/qa/suites/fs/basic_functional/objectstore/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/basic_functional/objectstore/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/basic_functional/objectstore/bluestore-bitmap.yaml b/qa/suites/fs/basic_functional/objectstore/bluestore-bitmap.yaml new file mode 120000 index 00000000..a59cf517 --- /dev/null +++ b/qa/suites/fs/basic_functional/objectstore/bluestore-bitmap.yaml @@ -0,0 +1 @@ +.qa/objectstore/bluestore-bitmap.yaml \ No newline at end of file diff --git a/qa/suites/fs/basic_functional/objectstore/bluestore-ec-root.yaml b/qa/suites/fs/basic_functional/objectstore/bluestore-ec-root.yaml new file mode 120000 index 00000000..4edebd68 --- /dev/null +++ b/qa/suites/fs/basic_functional/objectstore/bluestore-ec-root.yaml @@ -0,0 +1 @@ +.qa/cephfs/objectstore-ec/bluestore-ec-root.yaml \ No newline at end of file diff --git a/qa/suites/fs/basic_functional/overrides/+ b/qa/suites/fs/basic_functional/overrides/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/fs/basic_functional/overrides/.qa b/qa/suites/fs/basic_functional/overrides/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/basic_functional/overrides/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/basic_functional/overrides/frag_enable.yaml b/qa/suites/fs/basic_functional/overrides/frag_enable.yaml new file mode 120000 index 00000000..34a39a36 --- /dev/null +++ b/qa/suites/fs/basic_functional/overrides/frag_enable.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/frag_enable.yaml \ No newline at end of file diff --git a/qa/suites/fs/basic_functional/overrides/no_client_pidfile.yaml b/qa/suites/fs/basic_functional/overrides/no_client_pidfile.yaml new file mode 120000 index 00000000..8888f332 --- /dev/null +++ b/qa/suites/fs/basic_functional/overrides/no_client_pidfile.yaml @@ -0,0 +1 @@ +.qa/overrides/no_client_pidfile.yaml \ No newline at end of file diff --git a/qa/suites/fs/basic_functional/overrides/whitelist_health.yaml b/qa/suites/fs/basic_functional/overrides/whitelist_health.yaml new file mode 120000 index 00000000..74f39a49 --- /dev/null +++ b/qa/suites/fs/basic_functional/overrides/whitelist_health.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/whitelist_health.yaml \ No newline at end of file diff --git a/qa/suites/fs/basic_functional/overrides/whitelist_wrongly_marked_down.yaml b/qa/suites/fs/basic_functional/overrides/whitelist_wrongly_marked_down.yaml new file mode 120000 index 00000000..b4528c0f --- /dev/null +++ b/qa/suites/fs/basic_functional/overrides/whitelist_wrongly_marked_down.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/whitelist_wrongly_marked_down.yaml \ No newline at end of file diff --git a/qa/suites/fs/basic_functional/supported-random-distros$ b/qa/suites/fs/basic_functional/supported-random-distros$ new file mode 120000 index 00000000..0862b445 --- /dev/null +++ b/qa/suites/fs/basic_functional/supported-random-distros$ @@ -0,0 +1 @@ +.qa/distros/supported-random-distro$ \ No newline at end of file diff --git a/qa/suites/fs/basic_functional/tasks/.qa b/qa/suites/fs/basic_functional/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/basic_functional/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/basic_functional/tasks/admin.yaml b/qa/suites/fs/basic_functional/tasks/admin.yaml new file mode 100644 index 00000000..ef40ef93 --- /dev/null +++ b/qa/suites/fs/basic_functional/tasks/admin.yaml @@ -0,0 +1,11 @@ + +overrides: + ceph: + conf: + global: + lockdep: true + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_admin diff --git a/qa/suites/fs/basic_functional/tasks/alternate-pool.yaml b/qa/suites/fs/basic_functional/tasks/alternate-pool.yaml new file mode 100644 index 00000000..94d5cc6f --- /dev/null +++ b/qa/suites/fs/basic_functional/tasks/alternate-pool.yaml @@ -0,0 +1,20 @@ + +overrides: + ceph: + log-whitelist: + - bad backtrace + - object missing on disk + - error reading table object + - error reading sessionmap + - unmatched fragstat + - unmatched rstat + - was unreadable, recreating it now + - Scrub error on inode + - Metadata damage detected + - MDS_FAILED + - MDS_DAMAGE + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_recovery_pool diff --git a/qa/suites/fs/basic_functional/tasks/asok_dump_tree.yaml b/qa/suites/fs/basic_functional/tasks/asok_dump_tree.yaml new file mode 100644 index 00000000..7fa56147 --- /dev/null +++ b/qa/suites/fs/basic_functional/tasks/asok_dump_tree.yaml @@ -0,0 +1,4 @@ +tasks: +- cephfs_test_runner: + modules: + - tasks.cephfs.test_dump_tree diff --git a/qa/suites/fs/basic_functional/tasks/auto-repair.yaml b/qa/suites/fs/basic_functional/tasks/auto-repair.yaml new file mode 100644 index 00000000..90d0e7bc --- /dev/null +++ b/qa/suites/fs/basic_functional/tasks/auto-repair.yaml @@ -0,0 +1,13 @@ +overrides: + ceph: + log-whitelist: + - force file system read-only + - bad backtrace + - MDS in read-only mode + - \(MDS_READ_ONLY\) + + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_auto_repair diff --git a/qa/suites/fs/basic_functional/tasks/backtrace.yaml b/qa/suites/fs/basic_functional/tasks/backtrace.yaml new file mode 100644 index 00000000..d740a5f6 --- /dev/null +++ b/qa/suites/fs/basic_functional/tasks/backtrace.yaml @@ -0,0 +1,5 @@ + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_backtrace diff --git a/qa/suites/fs/basic_functional/tasks/cap-flush.yaml b/qa/suites/fs/basic_functional/tasks/cap-flush.yaml new file mode 100644 index 00000000..f063654a --- /dev/null +++ b/qa/suites/fs/basic_functional/tasks/cap-flush.yaml @@ -0,0 +1,8 @@ +overrides: + ceph: + log-ignorelist: + - Replacing daemon mds.a +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_cap_flush diff --git a/qa/suites/fs/basic_functional/tasks/cephfs-shell.yaml b/qa/suites/fs/basic_functional/tasks/cephfs-shell.yaml new file mode 100644 index 00000000..93c1eb54 --- /dev/null +++ b/qa/suites/fs/basic_functional/tasks/cephfs-shell.yaml @@ -0,0 +1,8 @@ +# Right now, cephfs-shell is only available as a package on Ubuntu +# This overrides the random distribution that's chosen in the other yaml fragments. +os_type: ubuntu +os_version: "18.04" +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_cephfs_shell diff --git a/qa/suites/fs/basic_functional/tasks/cephfs_scrub_tests.yaml b/qa/suites/fs/basic_functional/tasks/cephfs_scrub_tests.yaml new file mode 100644 index 00000000..a8661214 --- /dev/null +++ b/qa/suites/fs/basic_functional/tasks/cephfs_scrub_tests.yaml @@ -0,0 +1,19 @@ +overrides: + ceph: + log-whitelist: + - Replacing daemon mds + - Scrub error on inode + - Behind on trimming + - Metadata damage detected + - bad backtrace on inode + - overall HEALTH_ + - \(MDS_TRIM\) + conf: + mds: + mds log max segments: 1 + mds cache max size: 1000 +tasks: +- cephfs_test_runner: + modules: + - tasks.cephfs.test_scrub_checks + - tasks.cephfs.test_scrub diff --git a/qa/suites/fs/basic_functional/tasks/cfuse_workunit_quota.yaml b/qa/suites/fs/basic_functional/tasks/cfuse_workunit_quota.yaml new file mode 100644 index 00000000..7ac8714c --- /dev/null +++ b/qa/suites/fs/basic_functional/tasks/cfuse_workunit_quota.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + all: + - fs/quota diff --git a/qa/suites/fs/basic_functional/tasks/client-limits.yaml b/qa/suites/fs/basic_functional/tasks/client-limits.yaml new file mode 100644 index 00000000..635d0b6d --- /dev/null +++ b/qa/suites/fs/basic_functional/tasks/client-limits.yaml @@ -0,0 +1,19 @@ + +overrides: + ceph: + log-whitelist: + - responding to mclientcaps\(revoke\) + - not advance its oldest_client_tid + - failing to advance its oldest client/flush tid + - Too many inodes in cache + - failing to respond to cache pressure + - slow requests are blocked + - failing to respond to capability release + - MDS cache is too large + - \(MDS_CLIENT_OLDEST_TID\) + - \(MDS_CACHE_OVERSIZED\) + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_client_limits diff --git a/qa/suites/fs/basic_functional/tasks/client-readahad.yaml b/qa/suites/fs/basic_functional/tasks/client-readahad.yaml new file mode 100644 index 00000000..1d178e52 --- /dev/null +++ b/qa/suites/fs/basic_functional/tasks/client-readahad.yaml @@ -0,0 +1,4 @@ +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_readahead diff --git a/qa/suites/fs/basic_functional/tasks/client-recovery.yaml b/qa/suites/fs/basic_functional/tasks/client-recovery.yaml new file mode 100644 index 00000000..d1cef802 --- /dev/null +++ b/qa/suites/fs/basic_functional/tasks/client-recovery.yaml @@ -0,0 +1,17 @@ + +# The task interferes with the network, so we need +# to permit OSDs to complain about that. +overrides: + ceph: + log-whitelist: + - evicting unresponsive client + - but it is still running + - slow request + - MDS_CLIENT_LATE_RELEASE + - t responding to mclientcaps + +tasks: + - cephfs_test_runner: + fail_on_skip: false + modules: + - tasks.cephfs.test_client_recovery diff --git a/qa/suites/fs/basic_functional/tasks/damage.yaml b/qa/suites/fs/basic_functional/tasks/damage.yaml new file mode 100644 index 00000000..9ae738f0 --- /dev/null +++ b/qa/suites/fs/basic_functional/tasks/damage.yaml @@ -0,0 +1,27 @@ + +overrides: + ceph: + log-whitelist: + - bad backtrace + - object missing on disk + - error reading table object + - error reading sessionmap + - Error loading MDS rank + - missing journal object + - Error recovering journal + - error decoding table object + - failed to read JournalPointer + - Corrupt directory entry + - Corrupt fnode header + - corrupt sessionmap header + - Corrupt dentry + - Scrub error on inode + - Metadata damage detected + - MDS_READ_ONLY + - force file system read-only + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_damage + diff --git a/qa/suites/fs/basic_functional/tasks/data-scan.yaml b/qa/suites/fs/basic_functional/tasks/data-scan.yaml new file mode 100644 index 00000000..0a2eb0d4 --- /dev/null +++ b/qa/suites/fs/basic_functional/tasks/data-scan.yaml @@ -0,0 +1,20 @@ + +overrides: + ceph: + log-whitelist: + - bad backtrace + - object missing on disk + - error reading table object + - error reading sessionmap + - unmatched fragstat + - unmatched rstat + - was unreadable, recreating it now + - Scrub error on inode + - Metadata damage detected + - inconsistent rstat on inode + - Error recovering journal + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_data_scan diff --git a/qa/suites/fs/basic_functional/tasks/forward-scrub.yaml b/qa/suites/fs/basic_functional/tasks/forward-scrub.yaml new file mode 100644 index 00000000..b92cf105 --- /dev/null +++ b/qa/suites/fs/basic_functional/tasks/forward-scrub.yaml @@ -0,0 +1,14 @@ + +overrides: + ceph: + log-whitelist: + - inode wrongly marked free + - bad backtrace on inode + - inode table repaired for inode + - Scrub error on inode + - Metadata damage detected + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_forward_scrub diff --git a/qa/suites/fs/basic_functional/tasks/fragment.yaml b/qa/suites/fs/basic_functional/tasks/fragment.yaml new file mode 100644 index 00000000..482caad8 --- /dev/null +++ b/qa/suites/fs/basic_functional/tasks/fragment.yaml @@ -0,0 +1,5 @@ + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_fragment diff --git a/qa/suites/fs/basic_functional/tasks/journal-repair.yaml b/qa/suites/fs/basic_functional/tasks/journal-repair.yaml new file mode 100644 index 00000000..66f819d0 --- /dev/null +++ b/qa/suites/fs/basic_functional/tasks/journal-repair.yaml @@ -0,0 +1,14 @@ + +overrides: + ceph: + log-whitelist: + - bad backtrace on directory inode + - error reading table object + - Metadata damage detected + - slow requests are blocked + - Behind on trimming + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_journal_repair diff --git a/qa/suites/fs/basic_functional/tasks/libcephfs_python.yaml b/qa/suites/fs/basic_functional/tasks/libcephfs_python.yaml new file mode 100644 index 00000000..e5cbb14b --- /dev/null +++ b/qa/suites/fs/basic_functional/tasks/libcephfs_python.yaml @@ -0,0 +1,10 @@ +overrides: + ceph-fuse: + disabled: true + kclient: + disabled: true +tasks: +- workunit: + clients: + client.0: + - fs/test_python.sh diff --git a/qa/suites/fs/basic_functional/tasks/mds-flush.yaml b/qa/suites/fs/basic_functional/tasks/mds-flush.yaml new file mode 100644 index 00000000..d59a8ad5 --- /dev/null +++ b/qa/suites/fs/basic_functional/tasks/mds-flush.yaml @@ -0,0 +1,5 @@ + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_flush diff --git a/qa/suites/fs/basic_functional/tasks/mds-full.yaml b/qa/suites/fs/basic_functional/tasks/mds-full.yaml new file mode 100644 index 00000000..7e57dc6b --- /dev/null +++ b/qa/suites/fs/basic_functional/tasks/mds-full.yaml @@ -0,0 +1,37 @@ + +overrides: + ceph: + cephfs_ec_profile: + - disabled + log-whitelist: + - OSD full dropping all updates + - OSD near full + - pausewr flag + - failsafe engaged, dropping updates + - failsafe disengaged, no longer dropping + - is full \(reached quota + - POOL_FULL + - POOL_BACKFILLFULL + conf: + mon: + mon osd nearfull ratio: 0.6 + mon osd backfillfull ratio: 0.6 + mon osd full ratio: 0.7 + osd: + osd mon report interval: 5 + osd objectstore: memstore + osd failsafe full ratio: 1.0 + memstore device bytes: 200000000 + client.0: + debug client: 20 + debug objecter: 20 + debug objectcacher: 20 + client.1: + debug client: 20 + debug objecter: 20 + debug objectcacher: 20 + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_full diff --git a/qa/suites/fs/basic_functional/tasks/mds_creation_retry.yaml b/qa/suites/fs/basic_functional/tasks/mds_creation_retry.yaml new file mode 100644 index 00000000..fd23aa8b --- /dev/null +++ b/qa/suites/fs/basic_functional/tasks/mds_creation_retry.yaml @@ -0,0 +1,6 @@ +tasks: +-mds_creation_failure: +- workunit: + clients: + all: [fs/misc/trivial_sync.sh] + diff --git a/qa/suites/fs/basic_functional/tasks/openfiletable.yaml b/qa/suites/fs/basic_functional/tasks/openfiletable.yaml new file mode 100644 index 00000000..ad90e8bc --- /dev/null +++ b/qa/suites/fs/basic_functional/tasks/openfiletable.yaml @@ -0,0 +1,5 @@ + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_openfiletable diff --git a/qa/suites/fs/basic_functional/tasks/pool-perm.yaml b/qa/suites/fs/basic_functional/tasks/pool-perm.yaml new file mode 100644 index 00000000..f220626d --- /dev/null +++ b/qa/suites/fs/basic_functional/tasks/pool-perm.yaml @@ -0,0 +1,5 @@ + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_pool_perm diff --git a/qa/suites/fs/basic_functional/tasks/quota.yaml b/qa/suites/fs/basic_functional/tasks/quota.yaml new file mode 100644 index 00000000..89b10ce2 --- /dev/null +++ b/qa/suites/fs/basic_functional/tasks/quota.yaml @@ -0,0 +1,5 @@ + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_quota diff --git a/qa/suites/fs/basic_functional/tasks/sessionmap/+ b/qa/suites/fs/basic_functional/tasks/sessionmap/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/fs/basic_functional/tasks/sessionmap/.qa b/qa/suites/fs/basic_functional/tasks/sessionmap/.qa new file mode 120000 index 00000000..fea2489f --- /dev/null +++ b/qa/suites/fs/basic_functional/tasks/sessionmap/.qa @@ -0,0 +1 @@ +../.qa \ No newline at end of file diff --git a/qa/suites/fs/basic_functional/tasks/sessionmap/sessionmap.yaml b/qa/suites/fs/basic_functional/tasks/sessionmap/sessionmap.yaml new file mode 100644 index 00000000..1d72301b --- /dev/null +++ b/qa/suites/fs/basic_functional/tasks/sessionmap/sessionmap.yaml @@ -0,0 +1,10 @@ + +overrides: + ceph: + log-whitelist: + - client session with non-allowable root + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_sessionmap diff --git a/qa/suites/fs/basic_functional/tasks/strays.yaml b/qa/suites/fs/basic_functional/tasks/strays.yaml new file mode 100644 index 00000000..2809fc14 --- /dev/null +++ b/qa/suites/fs/basic_functional/tasks/strays.yaml @@ -0,0 +1,5 @@ + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_strays diff --git a/qa/suites/fs/basic_functional/tasks/test_journal_migration.yaml b/qa/suites/fs/basic_functional/tasks/test_journal_migration.yaml new file mode 100644 index 00000000..183ef388 --- /dev/null +++ b/qa/suites/fs/basic_functional/tasks/test_journal_migration.yaml @@ -0,0 +1,5 @@ + +tasks: +- cephfs_test_runner: + modules: + - tasks.cephfs.test_journal_migration diff --git a/qa/suites/fs/basic_functional/tasks/volume-client/% b/qa/suites/fs/basic_functional/tasks/volume-client/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/fs/basic_functional/tasks/volume-client/.qa b/qa/suites/fs/basic_functional/tasks/volume-client/.qa new file mode 120000 index 00000000..fea2489f --- /dev/null +++ b/qa/suites/fs/basic_functional/tasks/volume-client/.qa @@ -0,0 +1 @@ +../.qa \ No newline at end of file diff --git a/qa/suites/fs/basic_functional/tasks/volume-client/task/.qa b/qa/suites/fs/basic_functional/tasks/volume-client/task/.qa new file mode 120000 index 00000000..fea2489f --- /dev/null +++ b/qa/suites/fs/basic_functional/tasks/volume-client/task/.qa @@ -0,0 +1 @@ +../.qa \ No newline at end of file diff --git a/qa/suites/fs/basic_functional/tasks/volume-client/task/test/+ b/qa/suites/fs/basic_functional/tasks/volume-client/task/test/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/fs/basic_functional/tasks/volume-client/task/test/.qa b/qa/suites/fs/basic_functional/tasks/volume-client/task/test/.qa new file mode 120000 index 00000000..fea2489f --- /dev/null +++ b/qa/suites/fs/basic_functional/tasks/volume-client/task/test/.qa @@ -0,0 +1 @@ +../.qa \ No newline at end of file diff --git a/qa/suites/fs/basic_functional/tasks/volume-client/task/test/test.yaml b/qa/suites/fs/basic_functional/tasks/volume-client/task/test/test.yaml new file mode 100644 index 00000000..2ad97a00 --- /dev/null +++ b/qa/suites/fs/basic_functional/tasks/volume-client/task/test/test.yaml @@ -0,0 +1,8 @@ +overrides: + ceph: + log-whitelist: + - MON_DOWN +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_volume_client diff --git a/qa/suites/fs/basic_functional/tasks/volumes.yaml b/qa/suites/fs/basic_functional/tasks/volumes.yaml new file mode 100644 index 00000000..1315980e --- /dev/null +++ b/qa/suites/fs/basic_functional/tasks/volumes.yaml @@ -0,0 +1,20 @@ +overrides: + ceph: + conf: + mgr: + debug client: 10 + log-whitelist: + - OSD full dropping all updates + - OSD near full + - pausewr flag + - failsafe engaged, dropping updates + - failsafe disengaged, no longer dropping + - is full \(reached quota + - POOL_FULL + - POOL_BACKFILLFULL + +tasks: + - cephfs_test_runner: + fail_on_skip: false + modules: + - tasks.cephfs.test_volumes diff --git a/qa/suites/fs/basic_workload/% b/qa/suites/fs/basic_workload/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/fs/basic_workload/.qa b/qa/suites/fs/basic_workload/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/basic_workload/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/basic_workload/begin.yaml b/qa/suites/fs/basic_workload/begin.yaml new file mode 120000 index 00000000..311d404f --- /dev/null +++ b/qa/suites/fs/basic_workload/begin.yaml @@ -0,0 +1 @@ +.qa/cephfs/begin.yaml \ No newline at end of file diff --git a/qa/suites/fs/basic_workload/clusters/.qa b/qa/suites/fs/basic_workload/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/basic_workload/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/basic_workload/clusters/fixed-2-ucephfs.yaml b/qa/suites/fs/basic_workload/clusters/fixed-2-ucephfs.yaml new file mode 120000 index 00000000..b0c41a89 --- /dev/null +++ b/qa/suites/fs/basic_workload/clusters/fixed-2-ucephfs.yaml @@ -0,0 +1 @@ +.qa/cephfs/clusters/fixed-2-ucephfs.yaml \ No newline at end of file diff --git a/qa/suites/fs/basic_workload/conf b/qa/suites/fs/basic_workload/conf new file mode 120000 index 00000000..16e8cc44 --- /dev/null +++ b/qa/suites/fs/basic_workload/conf @@ -0,0 +1 @@ +.qa/cephfs/conf \ No newline at end of file diff --git a/qa/suites/fs/basic_workload/inline/.qa b/qa/suites/fs/basic_workload/inline/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/basic_workload/inline/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/basic_workload/inline/no.yaml b/qa/suites/fs/basic_workload/inline/no.yaml new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/fs/basic_workload/inline/yes.yaml b/qa/suites/fs/basic_workload/inline/yes.yaml new file mode 100644 index 00000000..da8677a5 --- /dev/null +++ b/qa/suites/fs/basic_workload/inline/yes.yaml @@ -0,0 +1,4 @@ +tasks: +- exec: + client.0: + - sudo ceph fs set cephfs inline_data true --yes-i-really-mean-it diff --git a/qa/suites/fs/basic_workload/mount/.qa b/qa/suites/fs/basic_workload/mount/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/basic_workload/mount/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/basic_workload/mount/fuse.yaml b/qa/suites/fs/basic_workload/mount/fuse.yaml new file mode 120000 index 00000000..0e55da9f --- /dev/null +++ b/qa/suites/fs/basic_workload/mount/fuse.yaml @@ -0,0 +1 @@ +.qa/cephfs/mount/fuse.yaml \ No newline at end of file diff --git a/qa/suites/fs/basic_workload/objectstore-ec b/qa/suites/fs/basic_workload/objectstore-ec new file mode 120000 index 00000000..affe2949 --- /dev/null +++ b/qa/suites/fs/basic_workload/objectstore-ec @@ -0,0 +1 @@ +.qa/cephfs/objectstore-ec \ No newline at end of file diff --git a/qa/suites/fs/basic_workload/omap_limit/.qa b/qa/suites/fs/basic_workload/omap_limit/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/basic_workload/omap_limit/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/basic_workload/omap_limit/10.yaml b/qa/suites/fs/basic_workload/omap_limit/10.yaml new file mode 100644 index 00000000..0cd2c6f8 --- /dev/null +++ b/qa/suites/fs/basic_workload/omap_limit/10.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + osd: + osd_max_omap_entries_per_request: 10 \ No newline at end of file diff --git a/qa/suites/fs/basic_workload/omap_limit/10000.yaml b/qa/suites/fs/basic_workload/omap_limit/10000.yaml new file mode 100644 index 00000000..0c7e4cf9 --- /dev/null +++ b/qa/suites/fs/basic_workload/omap_limit/10000.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + osd: + osd_max_omap_entries_per_request: 10000 \ No newline at end of file diff --git a/qa/suites/fs/basic_workload/overrides/+ b/qa/suites/fs/basic_workload/overrides/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/fs/basic_workload/overrides/.qa b/qa/suites/fs/basic_workload/overrides/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/basic_workload/overrides/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/basic_workload/overrides/frag_enable.yaml b/qa/suites/fs/basic_workload/overrides/frag_enable.yaml new file mode 120000 index 00000000..34a39a36 --- /dev/null +++ b/qa/suites/fs/basic_workload/overrides/frag_enable.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/frag_enable.yaml \ No newline at end of file diff --git a/qa/suites/fs/basic_workload/overrides/session_timeout.yaml b/qa/suites/fs/basic_workload/overrides/session_timeout.yaml new file mode 120000 index 00000000..fce0318c --- /dev/null +++ b/qa/suites/fs/basic_workload/overrides/session_timeout.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/session_timeout.yaml \ No newline at end of file diff --git a/qa/suites/fs/basic_workload/overrides/whitelist_health.yaml b/qa/suites/fs/basic_workload/overrides/whitelist_health.yaml new file mode 120000 index 00000000..74f39a49 --- /dev/null +++ b/qa/suites/fs/basic_workload/overrides/whitelist_health.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/whitelist_health.yaml \ No newline at end of file diff --git a/qa/suites/fs/basic_workload/overrides/whitelist_wrongly_marked_down.yaml b/qa/suites/fs/basic_workload/overrides/whitelist_wrongly_marked_down.yaml new file mode 120000 index 00000000..b4528c0f --- /dev/null +++ b/qa/suites/fs/basic_workload/overrides/whitelist_wrongly_marked_down.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/whitelist_wrongly_marked_down.yaml \ No newline at end of file diff --git a/qa/suites/fs/basic_workload/supported-random-distros$ b/qa/suites/fs/basic_workload/supported-random-distros$ new file mode 120000 index 00000000..0862b445 --- /dev/null +++ b/qa/suites/fs/basic_workload/supported-random-distros$ @@ -0,0 +1 @@ +.qa/distros/supported-random-distro$ \ No newline at end of file diff --git a/qa/suites/fs/basic_workload/tasks/.qa b/qa/suites/fs/basic_workload/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/basic_workload/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/basic_workload/tasks/cfuse_workunit_kernel_untar_build.yaml b/qa/suites/fs/basic_workload/tasks/cfuse_workunit_kernel_untar_build.yaml new file mode 100644 index 00000000..1e71bb40 --- /dev/null +++ b/qa/suites/fs/basic_workload/tasks/cfuse_workunit_kernel_untar_build.yaml @@ -0,0 +1,14 @@ +overrides: + ceph: + conf: + client: + fuse_default_permissions: 0 +tasks: +- check-counter: + counters: + mds: + - "mds.dir_split" +- workunit: + clients: + all: + - kernel_untar_build.sh diff --git a/qa/suites/fs/basic_workload/tasks/cfuse_workunit_misc.yaml b/qa/suites/fs/basic_workload/tasks/cfuse_workunit_misc.yaml new file mode 100644 index 00000000..d6c8140a --- /dev/null +++ b/qa/suites/fs/basic_workload/tasks/cfuse_workunit_misc.yaml @@ -0,0 +1,10 @@ +tasks: +- check-counter: + counters: + mds: + - "mds.dir_split" +- workunit: + clients: + all: + - fs/misc + diff --git a/qa/suites/fs/basic_workload/tasks/cfuse_workunit_misc_test_o_trunc.yaml b/qa/suites/fs/basic_workload/tasks/cfuse_workunit_misc_test_o_trunc.yaml new file mode 100644 index 00000000..c9de5c38 --- /dev/null +++ b/qa/suites/fs/basic_workload/tasks/cfuse_workunit_misc_test_o_trunc.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + all: + - fs/test_o_trunc.sh diff --git a/qa/suites/fs/basic_workload/tasks/cfuse_workunit_norstats.yaml b/qa/suites/fs/basic_workload/tasks/cfuse_workunit_norstats.yaml new file mode 100644 index 00000000..ea018c99 --- /dev/null +++ b/qa/suites/fs/basic_workload/tasks/cfuse_workunit_norstats.yaml @@ -0,0 +1,15 @@ +tasks: +- check-counter: + counters: + mds: + - "mds.dir_split" +- workunit: + clients: + all: + - fs/norstats + +overrides: + ceph: + conf: + client: + client dirsize rbytes: false diff --git a/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_blogbench.yaml b/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_blogbench.yaml new file mode 120000 index 00000000..8702f4f3 --- /dev/null +++ b/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_blogbench.yaml @@ -0,0 +1 @@ +.qa/cephfs/tasks/cfuse_workunit_suites_blogbench.yaml \ No newline at end of file diff --git a/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_dbench.yaml b/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_dbench.yaml new file mode 120000 index 00000000..b0f876c3 --- /dev/null +++ b/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_dbench.yaml @@ -0,0 +1 @@ +.qa/cephfs/tasks/cfuse_workunit_suites_dbench.yaml \ No newline at end of file diff --git a/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_ffsb.yaml b/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_ffsb.yaml new file mode 120000 index 00000000..01e889b2 --- /dev/null +++ b/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_ffsb.yaml @@ -0,0 +1 @@ +.qa/cephfs/tasks/cfuse_workunit_suites_ffsb.yaml \ No newline at end of file diff --git a/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_fsstress.yaml b/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_fsstress.yaml new file mode 120000 index 00000000..c2e859ff --- /dev/null +++ b/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_fsstress.yaml @@ -0,0 +1 @@ +.qa/cephfs/tasks/cfuse_workunit_suites_fsstress.yaml \ No newline at end of file diff --git a/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_fsx.yaml b/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_fsx.yaml new file mode 100644 index 00000000..b16cfb17 --- /dev/null +++ b/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_fsx.yaml @@ -0,0 +1,9 @@ +tasks: +- check-counter: + counters: + mds: + - "mds.dir_split" +- workunit: + clients: + all: + - suites/fsx.sh diff --git a/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_fsync.yaml b/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_fsync.yaml new file mode 100644 index 00000000..7efa1adb --- /dev/null +++ b/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_fsync.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + all: + - suites/fsync-tester.sh diff --git a/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_iogen.yaml b/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_iogen.yaml new file mode 100644 index 00000000..8d4c2710 --- /dev/null +++ b/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_iogen.yaml @@ -0,0 +1,6 @@ +tasks: +- workunit: + clients: + all: + - suites/iogen.sh + diff --git a/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_iozone.yaml b/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_iozone.yaml new file mode 100644 index 00000000..9270f3c5 --- /dev/null +++ b/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_iozone.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + all: + - suites/iozone.sh diff --git a/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_pjd.yaml b/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_pjd.yaml new file mode 100644 index 00000000..37e315f7 --- /dev/null +++ b/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_pjd.yaml @@ -0,0 +1,12 @@ +overrides: + ceph: + conf: + client: + fuse set user groups: true + fuse default permissions: false +tasks: +- workunit: + timeout: 6h + clients: + all: + - suites/pjd.sh diff --git a/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_truncate_delay.yaml b/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_truncate_delay.yaml new file mode 100644 index 00000000..b47b5656 --- /dev/null +++ b/qa/suites/fs/basic_workload/tasks/cfuse_workunit_suites_truncate_delay.yaml @@ -0,0 +1,14 @@ +overrides: + ceph: + conf: + client: + ms_inject_delay_probability: 1 + ms_inject_delay_type: osd + ms_inject_delay_max: 5 + client_oc_max_dirty_age: 1 +tasks: +- exec: + client.0: + - cd $TESTDIR/mnt.* && dd if=/dev/zero of=./foo count=100 + - sleep 2 + - cd $TESTDIR/mnt.* && truncate --size 0 ./foo diff --git a/qa/suites/fs/basic_workload/tasks/cfuse_workunit_trivial_sync.yaml b/qa/suites/fs/basic_workload/tasks/cfuse_workunit_trivial_sync.yaml new file mode 120000 index 00000000..a1df0327 --- /dev/null +++ b/qa/suites/fs/basic_workload/tasks/cfuse_workunit_trivial_sync.yaml @@ -0,0 +1 @@ +.qa/cephfs/tasks/cfuse_workunit_trivial_sync.yaml \ No newline at end of file diff --git a/qa/suites/fs/basic_workload/tasks/libcephfs_interface_tests.yaml b/qa/suites/fs/basic_workload/tasks/libcephfs_interface_tests.yaml new file mode 120000 index 00000000..84a88b59 --- /dev/null +++ b/qa/suites/fs/basic_workload/tasks/libcephfs_interface_tests.yaml @@ -0,0 +1 @@ +.qa/cephfs/tasks/libcephfs_interface_tests.yaml \ No newline at end of file diff --git a/qa/suites/fs/bugs/.qa b/qa/suites/fs/bugs/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/bugs/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/bugs/client_trim_caps/% b/qa/suites/fs/bugs/client_trim_caps/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/fs/bugs/client_trim_caps/.qa b/qa/suites/fs/bugs/client_trim_caps/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/bugs/client_trim_caps/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/bugs/client_trim_caps/begin.yaml b/qa/suites/fs/bugs/client_trim_caps/begin.yaml new file mode 120000 index 00000000..311d404f --- /dev/null +++ b/qa/suites/fs/bugs/client_trim_caps/begin.yaml @@ -0,0 +1 @@ +.qa/cephfs/begin.yaml \ No newline at end of file diff --git a/qa/suites/fs/bugs/client_trim_caps/clusters/.qa b/qa/suites/fs/bugs/client_trim_caps/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/bugs/client_trim_caps/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/bugs/client_trim_caps/clusters/small-cluster.yaml b/qa/suites/fs/bugs/client_trim_caps/clusters/small-cluster.yaml new file mode 100644 index 00000000..5cd97a3a --- /dev/null +++ b/qa/suites/fs/bugs/client_trim_caps/clusters/small-cluster.yaml @@ -0,0 +1,11 @@ +roles: +- [mon.a, mgr.x, osd.0, osd.1, osd.2, mds.a, mds.b, client.0] +openstack: +- volumes: # attached to each instance + count: 2 + size: 10 # GB +- machine: + disk: 100 # GB +log-rotate: + ceph-mds: 10G + ceph-osd: 10G diff --git a/qa/suites/fs/bugs/client_trim_caps/conf b/qa/suites/fs/bugs/client_trim_caps/conf new file mode 120000 index 00000000..16e8cc44 --- /dev/null +++ b/qa/suites/fs/bugs/client_trim_caps/conf @@ -0,0 +1 @@ +.qa/cephfs/conf \ No newline at end of file diff --git a/qa/suites/fs/bugs/client_trim_caps/objectstore/.qa b/qa/suites/fs/bugs/client_trim_caps/objectstore/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/bugs/client_trim_caps/objectstore/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/bugs/client_trim_caps/objectstore/bluestore-bitmap.yaml b/qa/suites/fs/bugs/client_trim_caps/objectstore/bluestore-bitmap.yaml new file mode 120000 index 00000000..a59cf517 --- /dev/null +++ b/qa/suites/fs/bugs/client_trim_caps/objectstore/bluestore-bitmap.yaml @@ -0,0 +1 @@ +.qa/objectstore/bluestore-bitmap.yaml \ No newline at end of file diff --git a/qa/suites/fs/bugs/client_trim_caps/overrides/+ b/qa/suites/fs/bugs/client_trim_caps/overrides/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/fs/bugs/client_trim_caps/overrides/.qa b/qa/suites/fs/bugs/client_trim_caps/overrides/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/bugs/client_trim_caps/overrides/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/bugs/client_trim_caps/overrides/frag_enable.yaml b/qa/suites/fs/bugs/client_trim_caps/overrides/frag_enable.yaml new file mode 120000 index 00000000..34a39a36 --- /dev/null +++ b/qa/suites/fs/bugs/client_trim_caps/overrides/frag_enable.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/frag_enable.yaml \ No newline at end of file diff --git a/qa/suites/fs/bugs/client_trim_caps/overrides/no_client_pidfile.yaml b/qa/suites/fs/bugs/client_trim_caps/overrides/no_client_pidfile.yaml new file mode 120000 index 00000000..8888f332 --- /dev/null +++ b/qa/suites/fs/bugs/client_trim_caps/overrides/no_client_pidfile.yaml @@ -0,0 +1 @@ +.qa/overrides/no_client_pidfile.yaml \ No newline at end of file diff --git a/qa/suites/fs/bugs/client_trim_caps/overrides/whitelist_health.yaml b/qa/suites/fs/bugs/client_trim_caps/overrides/whitelist_health.yaml new file mode 120000 index 00000000..74f39a49 --- /dev/null +++ b/qa/suites/fs/bugs/client_trim_caps/overrides/whitelist_health.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/whitelist_health.yaml \ No newline at end of file diff --git a/qa/suites/fs/bugs/client_trim_caps/overrides/whitelist_wrongly_marked_down.yaml b/qa/suites/fs/bugs/client_trim_caps/overrides/whitelist_wrongly_marked_down.yaml new file mode 120000 index 00000000..b4528c0f --- /dev/null +++ b/qa/suites/fs/bugs/client_trim_caps/overrides/whitelist_wrongly_marked_down.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/whitelist_wrongly_marked_down.yaml \ No newline at end of file diff --git a/qa/suites/fs/bugs/client_trim_caps/tasks/.qa b/qa/suites/fs/bugs/client_trim_caps/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/bugs/client_trim_caps/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/bugs/client_trim_caps/tasks/trim-i22073.yaml b/qa/suites/fs/bugs/client_trim_caps/tasks/trim-i22073.yaml new file mode 100644 index 00000000..a86e918e --- /dev/null +++ b/qa/suites/fs/bugs/client_trim_caps/tasks/trim-i22073.yaml @@ -0,0 +1,20 @@ +# Note this test is unlikely to exercise the code as expected in the future: +# "It's too tricky to arrange inodes in session->caps. we don't know if it +# still works in the future." -Zheng + +overrides: + ceph: + log-whitelist: + - MDS cache is too large + - \(MDS_CACHE_OVERSIZED\) +tasks: +- exec: + mon.a: + - "ceph tell mds.* config set mds_min_caps_per_client 1" +- background_exec: + mon.a: + - "sleep 30 && ceph tell mds.* config set mds_cache_memory_limit 1" +- exec: + client.0: + - ceph_test_trim_caps + - ceph_test_ino_release_cb diff --git a/qa/suites/fs/multiclient/% b/qa/suites/fs/multiclient/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/fs/multiclient/.qa b/qa/suites/fs/multiclient/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/multiclient/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/multiclient/begin.yaml b/qa/suites/fs/multiclient/begin.yaml new file mode 120000 index 00000000..311d404f --- /dev/null +++ b/qa/suites/fs/multiclient/begin.yaml @@ -0,0 +1 @@ +.qa/cephfs/begin.yaml \ No newline at end of file diff --git a/qa/suites/fs/multiclient/clusters/.qa b/qa/suites/fs/multiclient/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/multiclient/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/multiclient/clusters/1-mds-2-client.yaml b/qa/suites/fs/multiclient/clusters/1-mds-2-client.yaml new file mode 120000 index 00000000..9f4f161a --- /dev/null +++ b/qa/suites/fs/multiclient/clusters/1-mds-2-client.yaml @@ -0,0 +1 @@ +.qa/cephfs/clusters/1-mds-2-client.yaml \ No newline at end of file diff --git a/qa/suites/fs/multiclient/clusters/1-mds-3-client.yaml b/qa/suites/fs/multiclient/clusters/1-mds-3-client.yaml new file mode 120000 index 00000000..6b25e07c --- /dev/null +++ b/qa/suites/fs/multiclient/clusters/1-mds-3-client.yaml @@ -0,0 +1 @@ +.qa/cephfs/clusters/1-mds-3-client.yaml \ No newline at end of file diff --git a/qa/suites/fs/multiclient/conf b/qa/suites/fs/multiclient/conf new file mode 120000 index 00000000..16e8cc44 --- /dev/null +++ b/qa/suites/fs/multiclient/conf @@ -0,0 +1 @@ +.qa/cephfs/conf \ No newline at end of file diff --git a/qa/suites/fs/multiclient/distros/.qa b/qa/suites/fs/multiclient/distros/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/multiclient/distros/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/multiclient/distros/ubuntu_latest.yaml b/qa/suites/fs/multiclient/distros/ubuntu_latest.yaml new file mode 120000 index 00000000..3a09f9ab --- /dev/null +++ b/qa/suites/fs/multiclient/distros/ubuntu_latest.yaml @@ -0,0 +1 @@ +.qa/distros/supported/ubuntu_latest.yaml \ No newline at end of file diff --git a/qa/suites/fs/multiclient/mount/.qa b/qa/suites/fs/multiclient/mount/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/multiclient/mount/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/multiclient/mount/fuse.yaml b/qa/suites/fs/multiclient/mount/fuse.yaml new file mode 120000 index 00000000..0e55da9f --- /dev/null +++ b/qa/suites/fs/multiclient/mount/fuse.yaml @@ -0,0 +1 @@ +.qa/cephfs/mount/fuse.yaml \ No newline at end of file diff --git a/qa/suites/fs/multiclient/mount/kclient.yaml.disabled b/qa/suites/fs/multiclient/mount/kclient.yaml.disabled new file mode 100644 index 00000000..f00f16ae --- /dev/null +++ b/qa/suites/fs/multiclient/mount/kclient.yaml.disabled @@ -0,0 +1,7 @@ +overrides: + ceph: + conf: + global: + ms die on skipped message: false +tasks: +- kclient: diff --git a/qa/suites/fs/multiclient/objectstore-ec b/qa/suites/fs/multiclient/objectstore-ec new file mode 120000 index 00000000..affe2949 --- /dev/null +++ b/qa/suites/fs/multiclient/objectstore-ec @@ -0,0 +1 @@ +.qa/cephfs/objectstore-ec \ No newline at end of file diff --git a/qa/suites/fs/multiclient/overrides/+ b/qa/suites/fs/multiclient/overrides/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/fs/multiclient/overrides/.qa b/qa/suites/fs/multiclient/overrides/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/multiclient/overrides/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/multiclient/overrides/frag_enable.yaml b/qa/suites/fs/multiclient/overrides/frag_enable.yaml new file mode 120000 index 00000000..34a39a36 --- /dev/null +++ b/qa/suites/fs/multiclient/overrides/frag_enable.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/frag_enable.yaml \ No newline at end of file diff --git a/qa/suites/fs/multiclient/overrides/whitelist_health.yaml b/qa/suites/fs/multiclient/overrides/whitelist_health.yaml new file mode 120000 index 00000000..74f39a49 --- /dev/null +++ b/qa/suites/fs/multiclient/overrides/whitelist_health.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/whitelist_health.yaml \ No newline at end of file diff --git a/qa/suites/fs/multiclient/overrides/whitelist_wrongly_marked_down.yaml b/qa/suites/fs/multiclient/overrides/whitelist_wrongly_marked_down.yaml new file mode 120000 index 00000000..b4528c0f --- /dev/null +++ b/qa/suites/fs/multiclient/overrides/whitelist_wrongly_marked_down.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/whitelist_wrongly_marked_down.yaml \ No newline at end of file diff --git a/qa/suites/fs/multiclient/tasks/.qa b/qa/suites/fs/multiclient/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/multiclient/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/multiclient/tasks/cephfs_misc_tests.yaml b/qa/suites/fs/multiclient/tasks/cephfs_misc_tests.yaml new file mode 100644 index 00000000..564989d6 --- /dev/null +++ b/qa/suites/fs/multiclient/tasks/cephfs_misc_tests.yaml @@ -0,0 +1,13 @@ +tasks: +- cephfs_test_runner: + modules: + - tasks.cephfs.test_misc + +overrides: + ceph: + log-whitelist: + - evicting unresponsive client + - POOL_APP_NOT_ENABLED + - has not responded to cap revoke by MDS for over + - MDS_CLIENT_LATE_RELEASE + - responding to mclientcaps diff --git a/qa/suites/fs/multiclient/tasks/fsx-mpi.yaml.disabled b/qa/suites/fs/multiclient/tasks/fsx-mpi.yaml.disabled new file mode 100644 index 00000000..fc099694 --- /dev/null +++ b/qa/suites/fs/multiclient/tasks/fsx-mpi.yaml.disabled @@ -0,0 +1,17 @@ +# make sure we get the same MPI version on all hosts +tasks: +- pexec: + clients: + - cd $TESTDIR + - wget http://download.ceph.com/qa/fsx-mpi.c + - mpicc fsx-mpi.c -o fsx-mpi + - rm fsx-mpi.c + - ln -s $TESTDIR/mnt.* $TESTDIR/gmnt +- ssh_keys: +- mpi: + exec: sudo $TESTDIR/fsx-mpi -o 1MB -N 50000 -p 10000 -l 1048576 $TESTDIR/gmnt/test + workdir: $TESTDIR/gmnt +- pexec: + all: + - rm $TESTDIR/gmnt + - rm $TESTDIR/fsx-mpi diff --git a/qa/suites/fs/multiclient/tasks/ior-shared-file.yaml b/qa/suites/fs/multiclient/tasks/ior-shared-file.yaml new file mode 100644 index 00000000..d401cff4 --- /dev/null +++ b/qa/suites/fs/multiclient/tasks/ior-shared-file.yaml @@ -0,0 +1,23 @@ +# make sure we get the same MPI version on all hosts +tasks: +- pexec: + clients: + - cd $TESTDIR + - wget http://download.ceph.com/qa/ior.tbz2 + - tar xvfj ior.tbz2 + - cd ior + - ./configure + - make + - make install DESTDIR=$TESTDIR/binary/ + - cd $TESTDIR/ + - rm ior.tbz2 + - rm -r ior + - ln -s $TESTDIR/mnt.* $TESTDIR/gmnt +- ssh_keys: +- mpi: + exec: $TESTDIR/binary/usr/local/bin/ior -e -w -r -W -b 10m -a POSIX -o $TESTDIR/gmnt/ior.testfile +- pexec: + all: + - rm -f $TESTDIR/gmnt/ior.testfile + - rm -f $TESTDIR/gmnt + - rm -rf $TESTDIR/binary diff --git a/qa/suites/fs/multiclient/tasks/mdtest.yaml b/qa/suites/fs/multiclient/tasks/mdtest.yaml new file mode 100644 index 00000000..ba8ecde4 --- /dev/null +++ b/qa/suites/fs/multiclient/tasks/mdtest.yaml @@ -0,0 +1,20 @@ +# make sure we get the same MPI version on all hosts +tasks: +- pexec: + clients: + - cd $TESTDIR + - wget http://download.ceph.com/qa/mdtest-1.9.3.tgz + - mkdir mdtest-1.9.3 + - cd mdtest-1.9.3 + - tar xvfz $TESTDIR/mdtest-1.9.3.tgz + - rm $TESTDIR/mdtest-1.9.3.tgz + - MPI_CC=mpicc make + - ln -s $TESTDIR/mnt.* $TESTDIR/gmnt +- ssh_keys: +- mpi: + exec: $TESTDIR/mdtest-1.9.3/mdtest -d $TESTDIR/gmnt -I 20 -z 5 -b 2 -R +- pexec: + all: + - rm -f $TESTDIR/gmnt + - rm -rf $TESTDIR/mdtest-1.9.3 + - rm -rf $TESTDIR/._mdtest-1.9.3 diff --git a/qa/suites/fs/multifs/% b/qa/suites/fs/multifs/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/fs/multifs/.qa b/qa/suites/fs/multifs/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/multifs/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/multifs/begin.yaml b/qa/suites/fs/multifs/begin.yaml new file mode 120000 index 00000000..311d404f --- /dev/null +++ b/qa/suites/fs/multifs/begin.yaml @@ -0,0 +1 @@ +.qa/cephfs/begin.yaml \ No newline at end of file diff --git a/qa/suites/fs/multifs/clusters/.qa b/qa/suites/fs/multifs/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/multifs/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/multifs/clusters/1a3s-mds-2c-client.yaml b/qa/suites/fs/multifs/clusters/1a3s-mds-2c-client.yaml new file mode 120000 index 00000000..c190ea92 --- /dev/null +++ b/qa/suites/fs/multifs/clusters/1a3s-mds-2c-client.yaml @@ -0,0 +1 @@ +.qa/cephfs/clusters/1a3s-mds-2c-client.yaml \ No newline at end of file diff --git a/qa/suites/fs/multifs/conf b/qa/suites/fs/multifs/conf new file mode 120000 index 00000000..16e8cc44 --- /dev/null +++ b/qa/suites/fs/multifs/conf @@ -0,0 +1 @@ +.qa/cephfs/conf \ No newline at end of file diff --git a/qa/suites/fs/multifs/mount/.qa b/qa/suites/fs/multifs/mount/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/multifs/mount/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/multifs/mount/fuse.yaml b/qa/suites/fs/multifs/mount/fuse.yaml new file mode 120000 index 00000000..0e55da9f --- /dev/null +++ b/qa/suites/fs/multifs/mount/fuse.yaml @@ -0,0 +1 @@ +.qa/cephfs/mount/fuse.yaml \ No newline at end of file diff --git a/qa/suites/fs/multifs/objectstore-ec b/qa/suites/fs/multifs/objectstore-ec new file mode 120000 index 00000000..affe2949 --- /dev/null +++ b/qa/suites/fs/multifs/objectstore-ec @@ -0,0 +1 @@ +.qa/cephfs/objectstore-ec \ No newline at end of file diff --git a/qa/suites/fs/multifs/overrides/+ b/qa/suites/fs/multifs/overrides/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/fs/multifs/overrides/.qa b/qa/suites/fs/multifs/overrides/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/multifs/overrides/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/multifs/overrides/frag_enable.yaml b/qa/suites/fs/multifs/overrides/frag_enable.yaml new file mode 120000 index 00000000..34a39a36 --- /dev/null +++ b/qa/suites/fs/multifs/overrides/frag_enable.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/frag_enable.yaml \ No newline at end of file diff --git a/qa/suites/fs/multifs/overrides/mon-debug.yaml b/qa/suites/fs/multifs/overrides/mon-debug.yaml new file mode 100644 index 00000000..24b454c0 --- /dev/null +++ b/qa/suites/fs/multifs/overrides/mon-debug.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + mon: + debug mon: 20 diff --git a/qa/suites/fs/multifs/overrides/whitelist_health.yaml b/qa/suites/fs/multifs/overrides/whitelist_health.yaml new file mode 120000 index 00000000..74f39a49 --- /dev/null +++ b/qa/suites/fs/multifs/overrides/whitelist_health.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/whitelist_health.yaml \ No newline at end of file diff --git a/qa/suites/fs/multifs/overrides/whitelist_wrongly_marked_down.yaml b/qa/suites/fs/multifs/overrides/whitelist_wrongly_marked_down.yaml new file mode 120000 index 00000000..b4528c0f --- /dev/null +++ b/qa/suites/fs/multifs/overrides/whitelist_wrongly_marked_down.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/whitelist_wrongly_marked_down.yaml \ No newline at end of file diff --git a/qa/suites/fs/multifs/supported-random-distros$ b/qa/suites/fs/multifs/supported-random-distros$ new file mode 120000 index 00000000..0862b445 --- /dev/null +++ b/qa/suites/fs/multifs/supported-random-distros$ @@ -0,0 +1 @@ +.qa/distros/supported-random-distro$ \ No newline at end of file diff --git a/qa/suites/fs/multifs/tasks/.qa b/qa/suites/fs/multifs/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/multifs/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/multifs/tasks/failover.yaml b/qa/suites/fs/multifs/tasks/failover.yaml new file mode 100644 index 00000000..0e111a53 --- /dev/null +++ b/qa/suites/fs/multifs/tasks/failover.yaml @@ -0,0 +1,14 @@ +overrides: + ceph: + log-whitelist: + - not responding, replacing + - \(MDS_INSUFFICIENT_STANDBY\) + - \(MDS_ALL_DOWN\) + - \(MDS_UP_LESS_THAN_MAX\) + ceph-fuse: + disabled: true +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_failover + diff --git a/qa/suites/fs/permission/% b/qa/suites/fs/permission/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/fs/permission/.qa b/qa/suites/fs/permission/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/permission/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/permission/begin.yaml b/qa/suites/fs/permission/begin.yaml new file mode 120000 index 00000000..311d404f --- /dev/null +++ b/qa/suites/fs/permission/begin.yaml @@ -0,0 +1 @@ +.qa/cephfs/begin.yaml \ No newline at end of file diff --git a/qa/suites/fs/permission/clusters/.qa b/qa/suites/fs/permission/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/permission/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/permission/clusters/fixed-2-ucephfs.yaml b/qa/suites/fs/permission/clusters/fixed-2-ucephfs.yaml new file mode 120000 index 00000000..b0c41a89 --- /dev/null +++ b/qa/suites/fs/permission/clusters/fixed-2-ucephfs.yaml @@ -0,0 +1 @@ +.qa/cephfs/clusters/fixed-2-ucephfs.yaml \ No newline at end of file diff --git a/qa/suites/fs/permission/conf b/qa/suites/fs/permission/conf new file mode 120000 index 00000000..16e8cc44 --- /dev/null +++ b/qa/suites/fs/permission/conf @@ -0,0 +1 @@ +.qa/cephfs/conf \ No newline at end of file diff --git a/qa/suites/fs/permission/mount/.qa b/qa/suites/fs/permission/mount/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/permission/mount/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/permission/mount/fuse.yaml b/qa/suites/fs/permission/mount/fuse.yaml new file mode 120000 index 00000000..0e55da9f --- /dev/null +++ b/qa/suites/fs/permission/mount/fuse.yaml @@ -0,0 +1 @@ +.qa/cephfs/mount/fuse.yaml \ No newline at end of file diff --git a/qa/suites/fs/permission/objectstore-ec b/qa/suites/fs/permission/objectstore-ec new file mode 120000 index 00000000..affe2949 --- /dev/null +++ b/qa/suites/fs/permission/objectstore-ec @@ -0,0 +1 @@ +.qa/cephfs/objectstore-ec \ No newline at end of file diff --git a/qa/suites/fs/permission/overrides/+ b/qa/suites/fs/permission/overrides/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/fs/permission/overrides/.qa b/qa/suites/fs/permission/overrides/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/permission/overrides/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/permission/overrides/frag_enable.yaml b/qa/suites/fs/permission/overrides/frag_enable.yaml new file mode 120000 index 00000000..34a39a36 --- /dev/null +++ b/qa/suites/fs/permission/overrides/frag_enable.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/frag_enable.yaml \ No newline at end of file diff --git a/qa/suites/fs/permission/overrides/whitelist_health.yaml b/qa/suites/fs/permission/overrides/whitelist_health.yaml new file mode 120000 index 00000000..74f39a49 --- /dev/null +++ b/qa/suites/fs/permission/overrides/whitelist_health.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/whitelist_health.yaml \ No newline at end of file diff --git a/qa/suites/fs/permission/overrides/whitelist_wrongly_marked_down.yaml b/qa/suites/fs/permission/overrides/whitelist_wrongly_marked_down.yaml new file mode 120000 index 00000000..b4528c0f --- /dev/null +++ b/qa/suites/fs/permission/overrides/whitelist_wrongly_marked_down.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/whitelist_wrongly_marked_down.yaml \ No newline at end of file diff --git a/qa/suites/fs/permission/supported-random-distros$ b/qa/suites/fs/permission/supported-random-distros$ new file mode 120000 index 00000000..0862b445 --- /dev/null +++ b/qa/suites/fs/permission/supported-random-distros$ @@ -0,0 +1 @@ +.qa/distros/supported-random-distro$ \ No newline at end of file diff --git a/qa/suites/fs/permission/tasks/.qa b/qa/suites/fs/permission/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/permission/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/permission/tasks/cfuse_workunit_misc.yaml b/qa/suites/fs/permission/tasks/cfuse_workunit_misc.yaml new file mode 100644 index 00000000..618498e6 --- /dev/null +++ b/qa/suites/fs/permission/tasks/cfuse_workunit_misc.yaml @@ -0,0 +1,12 @@ +overrides: + ceph: + conf: + client: + fuse default permissions: false + client acl type: posix_acl +tasks: +- workunit: + clients: + all: + - fs/misc/acl.sh + - fs/misc/chmod.sh diff --git a/qa/suites/fs/permission/tasks/cfuse_workunit_suites_pjd.yaml b/qa/suites/fs/permission/tasks/cfuse_workunit_suites_pjd.yaml new file mode 100644 index 00000000..09be2667 --- /dev/null +++ b/qa/suites/fs/permission/tasks/cfuse_workunit_suites_pjd.yaml @@ -0,0 +1,13 @@ +overrides: + ceph: + conf: + client: + fuse set user groups: true + fuse default permissions: false + client acl type: posix_acl +tasks: +- workunit: + timeout: 6h + clients: + all: + - suites/pjd.sh diff --git a/qa/suites/fs/snaps/% b/qa/suites/fs/snaps/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/fs/snaps/.qa b/qa/suites/fs/snaps/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/snaps/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/snaps/begin.yaml b/qa/suites/fs/snaps/begin.yaml new file mode 120000 index 00000000..311d404f --- /dev/null +++ b/qa/suites/fs/snaps/begin.yaml @@ -0,0 +1 @@ +.qa/cephfs/begin.yaml \ No newline at end of file diff --git a/qa/suites/fs/snaps/clusters/.qa b/qa/suites/fs/snaps/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/snaps/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/snaps/clusters/fixed-2-ucephfs.yaml b/qa/suites/fs/snaps/clusters/fixed-2-ucephfs.yaml new file mode 120000 index 00000000..b0c41a89 --- /dev/null +++ b/qa/suites/fs/snaps/clusters/fixed-2-ucephfs.yaml @@ -0,0 +1 @@ +.qa/cephfs/clusters/fixed-2-ucephfs.yaml \ No newline at end of file diff --git a/qa/suites/fs/snaps/conf b/qa/suites/fs/snaps/conf new file mode 120000 index 00000000..16e8cc44 --- /dev/null +++ b/qa/suites/fs/snaps/conf @@ -0,0 +1 @@ +.qa/cephfs/conf \ No newline at end of file diff --git a/qa/suites/fs/snaps/mount/.qa b/qa/suites/fs/snaps/mount/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/snaps/mount/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/snaps/mount/fuse.yaml b/qa/suites/fs/snaps/mount/fuse.yaml new file mode 120000 index 00000000..0e55da9f --- /dev/null +++ b/qa/suites/fs/snaps/mount/fuse.yaml @@ -0,0 +1 @@ +.qa/cephfs/mount/fuse.yaml \ No newline at end of file diff --git a/qa/suites/fs/snaps/objectstore-ec b/qa/suites/fs/snaps/objectstore-ec new file mode 120000 index 00000000..affe2949 --- /dev/null +++ b/qa/suites/fs/snaps/objectstore-ec @@ -0,0 +1 @@ +.qa/cephfs/objectstore-ec \ No newline at end of file diff --git a/qa/suites/fs/snaps/overrides/+ b/qa/suites/fs/snaps/overrides/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/fs/snaps/overrides/.qa b/qa/suites/fs/snaps/overrides/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/snaps/overrides/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/snaps/overrides/frag_enable.yaml b/qa/suites/fs/snaps/overrides/frag_enable.yaml new file mode 120000 index 00000000..34a39a36 --- /dev/null +++ b/qa/suites/fs/snaps/overrides/frag_enable.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/frag_enable.yaml \ No newline at end of file diff --git a/qa/suites/fs/snaps/overrides/whitelist_health.yaml b/qa/suites/fs/snaps/overrides/whitelist_health.yaml new file mode 120000 index 00000000..74f39a49 --- /dev/null +++ b/qa/suites/fs/snaps/overrides/whitelist_health.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/whitelist_health.yaml \ No newline at end of file diff --git a/qa/suites/fs/snaps/overrides/whitelist_wrongly_marked_down.yaml b/qa/suites/fs/snaps/overrides/whitelist_wrongly_marked_down.yaml new file mode 120000 index 00000000..b4528c0f --- /dev/null +++ b/qa/suites/fs/snaps/overrides/whitelist_wrongly_marked_down.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/whitelist_wrongly_marked_down.yaml \ No newline at end of file diff --git a/qa/suites/fs/snaps/supported-random-distros$ b/qa/suites/fs/snaps/supported-random-distros$ new file mode 120000 index 00000000..0862b445 --- /dev/null +++ b/qa/suites/fs/snaps/supported-random-distros$ @@ -0,0 +1 @@ +.qa/distros/supported-random-distro$ \ No newline at end of file diff --git a/qa/suites/fs/snaps/tasks/.qa b/qa/suites/fs/snaps/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/snaps/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/snaps/tasks/snaptests.yaml b/qa/suites/fs/snaps/tasks/snaptests.yaml new file mode 100644 index 00000000..790c93c2 --- /dev/null +++ b/qa/suites/fs/snaps/tasks/snaptests.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + all: + - fs/snaps diff --git a/qa/suites/fs/thrash/% b/qa/suites/fs/thrash/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/fs/thrash/.qa b/qa/suites/fs/thrash/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/thrash/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/thrash/begin.yaml b/qa/suites/fs/thrash/begin.yaml new file mode 120000 index 00000000..311d404f --- /dev/null +++ b/qa/suites/fs/thrash/begin.yaml @@ -0,0 +1 @@ +.qa/cephfs/begin.yaml \ No newline at end of file diff --git a/qa/suites/fs/thrash/ceph-thrash/.qa b/qa/suites/fs/thrash/ceph-thrash/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/thrash/ceph-thrash/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/thrash/ceph-thrash/default.yaml b/qa/suites/fs/thrash/ceph-thrash/default.yaml new file mode 100644 index 00000000..154615c5 --- /dev/null +++ b/qa/suites/fs/thrash/ceph-thrash/default.yaml @@ -0,0 +1,7 @@ +tasks: +- mds_thrash: + +overrides: + ceph: + log-whitelist: + - not responding, replacing diff --git a/qa/suites/fs/thrash/clusters/.qa b/qa/suites/fs/thrash/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/thrash/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/thrash/clusters/1-mds-1-client-coloc.yaml b/qa/suites/fs/thrash/clusters/1-mds-1-client-coloc.yaml new file mode 120000 index 00000000..d15ecfda --- /dev/null +++ b/qa/suites/fs/thrash/clusters/1-mds-1-client-coloc.yaml @@ -0,0 +1 @@ +.qa/cephfs/clusters/1-mds-1-client-coloc.yaml \ No newline at end of file diff --git a/qa/suites/fs/thrash/conf b/qa/suites/fs/thrash/conf new file mode 120000 index 00000000..16e8cc44 --- /dev/null +++ b/qa/suites/fs/thrash/conf @@ -0,0 +1 @@ +.qa/cephfs/conf \ No newline at end of file diff --git a/qa/suites/fs/thrash/mount/.qa b/qa/suites/fs/thrash/mount/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/thrash/mount/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/thrash/mount/fuse.yaml b/qa/suites/fs/thrash/mount/fuse.yaml new file mode 120000 index 00000000..0e55da9f --- /dev/null +++ b/qa/suites/fs/thrash/mount/fuse.yaml @@ -0,0 +1 @@ +.qa/cephfs/mount/fuse.yaml \ No newline at end of file diff --git a/qa/suites/fs/thrash/msgr-failures/.qa b/qa/suites/fs/thrash/msgr-failures/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/thrash/msgr-failures/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/thrash/msgr-failures/none.yaml b/qa/suites/fs/thrash/msgr-failures/none.yaml new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/fs/thrash/msgr-failures/osd-mds-delay.yaml b/qa/suites/fs/thrash/msgr-failures/osd-mds-delay.yaml new file mode 100644 index 00000000..68802961 --- /dev/null +++ b/qa/suites/fs/thrash/msgr-failures/osd-mds-delay.yaml @@ -0,0 +1,10 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 2500 + ms inject delay type: osd mds + ms inject delay probability: .005 + ms inject delay max: 1 + log-whitelist: + - \(OSD_SLOW_PING_TIME diff --git a/qa/suites/fs/thrash/objectstore-ec b/qa/suites/fs/thrash/objectstore-ec new file mode 120000 index 00000000..affe2949 --- /dev/null +++ b/qa/suites/fs/thrash/objectstore-ec @@ -0,0 +1 @@ +.qa/cephfs/objectstore-ec \ No newline at end of file diff --git a/qa/suites/fs/thrash/overrides/+ b/qa/suites/fs/thrash/overrides/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/fs/thrash/overrides/.qa b/qa/suites/fs/thrash/overrides/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/thrash/overrides/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/thrash/overrides/frag_enable.yaml b/qa/suites/fs/thrash/overrides/frag_enable.yaml new file mode 120000 index 00000000..34a39a36 --- /dev/null +++ b/qa/suites/fs/thrash/overrides/frag_enable.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/frag_enable.yaml \ No newline at end of file diff --git a/qa/suites/fs/thrash/overrides/session_timeout.yaml b/qa/suites/fs/thrash/overrides/session_timeout.yaml new file mode 120000 index 00000000..fce0318c --- /dev/null +++ b/qa/suites/fs/thrash/overrides/session_timeout.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/session_timeout.yaml \ No newline at end of file diff --git a/qa/suites/fs/thrash/overrides/whitelist_health.yaml b/qa/suites/fs/thrash/overrides/whitelist_health.yaml new file mode 120000 index 00000000..74f39a49 --- /dev/null +++ b/qa/suites/fs/thrash/overrides/whitelist_health.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/whitelist_health.yaml \ No newline at end of file diff --git a/qa/suites/fs/thrash/overrides/whitelist_wrongly_marked_down.yaml b/qa/suites/fs/thrash/overrides/whitelist_wrongly_marked_down.yaml new file mode 120000 index 00000000..b4528c0f --- /dev/null +++ b/qa/suites/fs/thrash/overrides/whitelist_wrongly_marked_down.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/whitelist_wrongly_marked_down.yaml \ No newline at end of file diff --git a/qa/suites/fs/thrash/supported-random-distros$ b/qa/suites/fs/thrash/supported-random-distros$ new file mode 120000 index 00000000..0862b445 --- /dev/null +++ b/qa/suites/fs/thrash/supported-random-distros$ @@ -0,0 +1 @@ +.qa/distros/supported-random-distro$ \ No newline at end of file diff --git a/qa/suites/fs/thrash/tasks/.qa b/qa/suites/fs/thrash/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/thrash/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/thrash/tasks/cfuse_workunit_snaptests.yaml b/qa/suites/fs/thrash/tasks/cfuse_workunit_snaptests.yaml new file mode 100644 index 00000000..790c93c2 --- /dev/null +++ b/qa/suites/fs/thrash/tasks/cfuse_workunit_snaptests.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + all: + - fs/snaps diff --git a/qa/suites/fs/thrash/tasks/cfuse_workunit_suites_fsstress.yaml b/qa/suites/fs/thrash/tasks/cfuse_workunit_suites_fsstress.yaml new file mode 120000 index 00000000..c2e859ff --- /dev/null +++ b/qa/suites/fs/thrash/tasks/cfuse_workunit_suites_fsstress.yaml @@ -0,0 +1 @@ +.qa/cephfs/tasks/cfuse_workunit_suites_fsstress.yaml \ No newline at end of file diff --git a/qa/suites/fs/thrash/tasks/cfuse_workunit_suites_pjd.yaml b/qa/suites/fs/thrash/tasks/cfuse_workunit_suites_pjd.yaml new file mode 100644 index 00000000..37e315f7 --- /dev/null +++ b/qa/suites/fs/thrash/tasks/cfuse_workunit_suites_pjd.yaml @@ -0,0 +1,12 @@ +overrides: + ceph: + conf: + client: + fuse set user groups: true + fuse default permissions: false +tasks: +- workunit: + timeout: 6h + clients: + all: + - suites/pjd.sh diff --git a/qa/suites/fs/thrash/tasks/cfuse_workunit_trivial_sync.yaml b/qa/suites/fs/thrash/tasks/cfuse_workunit_trivial_sync.yaml new file mode 120000 index 00000000..a1df0327 --- /dev/null +++ b/qa/suites/fs/thrash/tasks/cfuse_workunit_trivial_sync.yaml @@ -0,0 +1 @@ +.qa/cephfs/tasks/cfuse_workunit_trivial_sync.yaml \ No newline at end of file diff --git a/qa/suites/fs/traceless/% b/qa/suites/fs/traceless/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/fs/traceless/.qa b/qa/suites/fs/traceless/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/traceless/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/traceless/begin.yaml b/qa/suites/fs/traceless/begin.yaml new file mode 120000 index 00000000..311d404f --- /dev/null +++ b/qa/suites/fs/traceless/begin.yaml @@ -0,0 +1 @@ +.qa/cephfs/begin.yaml \ No newline at end of file diff --git a/qa/suites/fs/traceless/clusters/.qa b/qa/suites/fs/traceless/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/traceless/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/traceless/clusters/fixed-2-ucephfs.yaml b/qa/suites/fs/traceless/clusters/fixed-2-ucephfs.yaml new file mode 120000 index 00000000..b0c41a89 --- /dev/null +++ b/qa/suites/fs/traceless/clusters/fixed-2-ucephfs.yaml @@ -0,0 +1 @@ +.qa/cephfs/clusters/fixed-2-ucephfs.yaml \ No newline at end of file diff --git a/qa/suites/fs/traceless/conf b/qa/suites/fs/traceless/conf new file mode 120000 index 00000000..16e8cc44 --- /dev/null +++ b/qa/suites/fs/traceless/conf @@ -0,0 +1 @@ +.qa/cephfs/conf \ No newline at end of file diff --git a/qa/suites/fs/traceless/mount/.qa b/qa/suites/fs/traceless/mount/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/traceless/mount/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/traceless/mount/fuse.yaml b/qa/suites/fs/traceless/mount/fuse.yaml new file mode 120000 index 00000000..0e55da9f --- /dev/null +++ b/qa/suites/fs/traceless/mount/fuse.yaml @@ -0,0 +1 @@ +.qa/cephfs/mount/fuse.yaml \ No newline at end of file diff --git a/qa/suites/fs/traceless/objectstore-ec b/qa/suites/fs/traceless/objectstore-ec new file mode 120000 index 00000000..affe2949 --- /dev/null +++ b/qa/suites/fs/traceless/objectstore-ec @@ -0,0 +1 @@ +.qa/cephfs/objectstore-ec \ No newline at end of file diff --git a/qa/suites/fs/traceless/overrides/+ b/qa/suites/fs/traceless/overrides/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/fs/traceless/overrides/.qa b/qa/suites/fs/traceless/overrides/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/traceless/overrides/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/traceless/overrides/frag_enable.yaml b/qa/suites/fs/traceless/overrides/frag_enable.yaml new file mode 120000 index 00000000..34a39a36 --- /dev/null +++ b/qa/suites/fs/traceless/overrides/frag_enable.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/frag_enable.yaml \ No newline at end of file diff --git a/qa/suites/fs/traceless/overrides/whitelist_health.yaml b/qa/suites/fs/traceless/overrides/whitelist_health.yaml new file mode 120000 index 00000000..74f39a49 --- /dev/null +++ b/qa/suites/fs/traceless/overrides/whitelist_health.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/whitelist_health.yaml \ No newline at end of file diff --git a/qa/suites/fs/traceless/overrides/whitelist_wrongly_marked_down.yaml b/qa/suites/fs/traceless/overrides/whitelist_wrongly_marked_down.yaml new file mode 120000 index 00000000..b4528c0f --- /dev/null +++ b/qa/suites/fs/traceless/overrides/whitelist_wrongly_marked_down.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/whitelist_wrongly_marked_down.yaml \ No newline at end of file diff --git a/qa/suites/fs/traceless/supported-random-distros$ b/qa/suites/fs/traceless/supported-random-distros$ new file mode 120000 index 00000000..0862b445 --- /dev/null +++ b/qa/suites/fs/traceless/supported-random-distros$ @@ -0,0 +1 @@ +.qa/distros/supported-random-distro$ \ No newline at end of file diff --git a/qa/suites/fs/traceless/tasks/.qa b/qa/suites/fs/traceless/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/traceless/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/traceless/tasks/cfuse_workunit_suites_blogbench.yaml b/qa/suites/fs/traceless/tasks/cfuse_workunit_suites_blogbench.yaml new file mode 120000 index 00000000..8702f4f3 --- /dev/null +++ b/qa/suites/fs/traceless/tasks/cfuse_workunit_suites_blogbench.yaml @@ -0,0 +1 @@ +.qa/cephfs/tasks/cfuse_workunit_suites_blogbench.yaml \ No newline at end of file diff --git a/qa/suites/fs/traceless/tasks/cfuse_workunit_suites_dbench.yaml b/qa/suites/fs/traceless/tasks/cfuse_workunit_suites_dbench.yaml new file mode 120000 index 00000000..b0f876c3 --- /dev/null +++ b/qa/suites/fs/traceless/tasks/cfuse_workunit_suites_dbench.yaml @@ -0,0 +1 @@ +.qa/cephfs/tasks/cfuse_workunit_suites_dbench.yaml \ No newline at end of file diff --git a/qa/suites/fs/traceless/tasks/cfuse_workunit_suites_ffsb.yaml b/qa/suites/fs/traceless/tasks/cfuse_workunit_suites_ffsb.yaml new file mode 120000 index 00000000..01e889b2 --- /dev/null +++ b/qa/suites/fs/traceless/tasks/cfuse_workunit_suites_ffsb.yaml @@ -0,0 +1 @@ +.qa/cephfs/tasks/cfuse_workunit_suites_ffsb.yaml \ No newline at end of file diff --git a/qa/suites/fs/traceless/tasks/cfuse_workunit_suites_fsstress.yaml b/qa/suites/fs/traceless/tasks/cfuse_workunit_suites_fsstress.yaml new file mode 120000 index 00000000..c2e859ff --- /dev/null +++ b/qa/suites/fs/traceless/tasks/cfuse_workunit_suites_fsstress.yaml @@ -0,0 +1 @@ +.qa/cephfs/tasks/cfuse_workunit_suites_fsstress.yaml \ No newline at end of file diff --git a/qa/suites/fs/traceless/traceless/.qa b/qa/suites/fs/traceless/traceless/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/traceless/traceless/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/traceless/traceless/50pc.yaml b/qa/suites/fs/traceless/traceless/50pc.yaml new file mode 100644 index 00000000..e0418bcb --- /dev/null +++ b/qa/suites/fs/traceless/traceless/50pc.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + mds: + mds inject traceless reply probability: .5 diff --git a/qa/suites/fs/upgrade/.qa b/qa/suites/fs/upgrade/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/upgrade/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/upgrade/featureful_client/.qa b/qa/suites/fs/upgrade/featureful_client/.qa new file mode 120000 index 00000000..11a54ed3 --- /dev/null +++ b/qa/suites/fs/upgrade/featureful_client/.qa @@ -0,0 +1 @@ +../../../../ \ No newline at end of file diff --git a/qa/suites/fs/upgrade/featureful_client/old_client/% b/qa/suites/fs/upgrade/featureful_client/old_client/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/fs/upgrade/featureful_client/old_client/.qa b/qa/suites/fs/upgrade/featureful_client/old_client/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/upgrade/featureful_client/old_client/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/upgrade/featureful_client/old_client/bluestore-bitmap.yaml b/qa/suites/fs/upgrade/featureful_client/old_client/bluestore-bitmap.yaml new file mode 120000 index 00000000..17ad98e7 --- /dev/null +++ b/qa/suites/fs/upgrade/featureful_client/old_client/bluestore-bitmap.yaml @@ -0,0 +1 @@ +../../../../../cephfs/objectstore-ec/bluestore-bitmap.yaml \ No newline at end of file diff --git a/qa/suites/fs/upgrade/featureful_client/old_client/clusters/.qa b/qa/suites/fs/upgrade/featureful_client/old_client/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/upgrade/featureful_client/old_client/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/upgrade/featureful_client/old_client/clusters/1-mds-2-client-micro.yaml b/qa/suites/fs/upgrade/featureful_client/old_client/clusters/1-mds-2-client-micro.yaml new file mode 120000 index 00000000..feb68f34 --- /dev/null +++ b/qa/suites/fs/upgrade/featureful_client/old_client/clusters/1-mds-2-client-micro.yaml @@ -0,0 +1 @@ +.qa/cephfs/clusters/1-mds-2-client-micro.yaml \ No newline at end of file diff --git a/qa/suites/fs/upgrade/featureful_client/old_client/conf b/qa/suites/fs/upgrade/featureful_client/old_client/conf new file mode 120000 index 00000000..6d471298 --- /dev/null +++ b/qa/suites/fs/upgrade/featureful_client/old_client/conf @@ -0,0 +1 @@ +.qa/cephfs/conf/ \ No newline at end of file diff --git a/qa/suites/fs/upgrade/featureful_client/old_client/overrides/% b/qa/suites/fs/upgrade/featureful_client/old_client/overrides/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/fs/upgrade/featureful_client/old_client/overrides/.qa b/qa/suites/fs/upgrade/featureful_client/old_client/overrides/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/upgrade/featureful_client/old_client/overrides/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/upgrade/featureful_client/old_client/overrides/frag_enable.yaml b/qa/suites/fs/upgrade/featureful_client/old_client/overrides/frag_enable.yaml new file mode 120000 index 00000000..34a39a36 --- /dev/null +++ b/qa/suites/fs/upgrade/featureful_client/old_client/overrides/frag_enable.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/frag_enable.yaml \ No newline at end of file diff --git a/qa/suites/fs/upgrade/featureful_client/old_client/overrides/multimds/no.yaml b/qa/suites/fs/upgrade/featureful_client/old_client/overrides/multimds/no.yaml new file mode 100644 index 00000000..f9e95daa --- /dev/null +++ b/qa/suites/fs/upgrade/featureful_client/old_client/overrides/multimds/no.yaml @@ -0,0 +1,4 @@ +overrides: + ceph: + cephfs: + max_mds: 1 diff --git a/qa/suites/fs/upgrade/featureful_client/old_client/overrides/multimds/yes.yaml b/qa/suites/fs/upgrade/featureful_client/old_client/overrides/multimds/yes.yaml new file mode 100644 index 00000000..b3a9b5d6 --- /dev/null +++ b/qa/suites/fs/upgrade/featureful_client/old_client/overrides/multimds/yes.yaml @@ -0,0 +1,4 @@ +overrides: + ceph: + cephfs: + max_mds: 2 diff --git a/qa/suites/fs/upgrade/featureful_client/old_client/overrides/whitelist_health.yaml b/qa/suites/fs/upgrade/featureful_client/old_client/overrides/whitelist_health.yaml new file mode 120000 index 00000000..74f39a49 --- /dev/null +++ b/qa/suites/fs/upgrade/featureful_client/old_client/overrides/whitelist_health.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/whitelist_health.yaml \ No newline at end of file diff --git a/qa/suites/fs/upgrade/featureful_client/old_client/overrides/whitelist_wrongly_marked_down.yaml b/qa/suites/fs/upgrade/featureful_client/old_client/overrides/whitelist_wrongly_marked_down.yaml new file mode 120000 index 00000000..b4528c0f --- /dev/null +++ b/qa/suites/fs/upgrade/featureful_client/old_client/overrides/whitelist_wrongly_marked_down.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/whitelist_wrongly_marked_down.yaml \ No newline at end of file diff --git a/qa/suites/fs/upgrade/featureful_client/old_client/tasks/% b/qa/suites/fs/upgrade/featureful_client/old_client/tasks/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/fs/upgrade/featureful_client/old_client/tasks/.qa b/qa/suites/fs/upgrade/featureful_client/old_client/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/upgrade/featureful_client/old_client/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/upgrade/featureful_client/old_client/tasks/0-luminous.yaml b/qa/suites/fs/upgrade/featureful_client/old_client/tasks/0-luminous.yaml new file mode 100644 index 00000000..7835cbf9 --- /dev/null +++ b/qa/suites/fs/upgrade/featureful_client/old_client/tasks/0-luminous.yaml @@ -0,0 +1,41 @@ +meta: +- desc: | + install ceph/luminous latest +tasks: +- install: + branch: luminous + exclude_packages: + - librados3 + - ceph-mgr-dashboard + - ceph-mgr-diskprediction-local + - ceph-mgr-diskprediction-cloud + - ceph-mgr-rook + - ceph-mgr-ssh + extra_packages: ['librados2'] +- print: "**** done installing luminous" +- ceph: + mon_bind_addrvec: false + mon_bind_msgr2: false + log-whitelist: + - overall HEALTH_ + - \(FS_ + - \(MDS_ + - \(OSD_ + - \(MON_DOWN\) + - \(CACHE_POOL_ + - \(POOL_ + - \(MGR_DOWN\) + - \(PG_ + - \(SMALLER_PGP_NUM\) + - Monitor daemon marked osd + - Behind on trimming + - Manager daemon + conf: + global: + mon warn on pool no app: false + ms bind msgr2: false +- exec: + osd.0: + - ceph osd require-osd-release luminous + - ceph osd set-require-min-compat-client luminous +- print: "**** done ceph" diff --git a/qa/suites/fs/upgrade/featureful_client/old_client/tasks/1-client.yaml b/qa/suites/fs/upgrade/featureful_client/old_client/tasks/1-client.yaml new file mode 100644 index 00000000..88d686fa --- /dev/null +++ b/qa/suites/fs/upgrade/featureful_client/old_client/tasks/1-client.yaml @@ -0,0 +1,8 @@ +tasks: +- ceph-fuse: +- print: "**** done luminous client" +- workunit: + clients: + all: + - suites/fsstress.sh +- print: "**** done fsstress" diff --git a/qa/suites/fs/upgrade/featureful_client/old_client/tasks/2-upgrade.yaml b/qa/suites/fs/upgrade/featureful_client/old_client/tasks/2-upgrade.yaml new file mode 100644 index 00000000..cd670cf6 --- /dev/null +++ b/qa/suites/fs/upgrade/featureful_client/old_client/tasks/2-upgrade.yaml @@ -0,0 +1,56 @@ +overrides: + ceph: + mon_bind_msgr2: false + mon_bind_addrvec: false + log-whitelist: + - scrub mismatch + - ScrubResult + - wrongly marked + - \(POOL_APP_NOT_ENABLED\) + - \(SLOW_OPS\) + - overall HEALTH_ + - \(MON_MSGR2_NOT_ENABLED\) + - slow request + conf: + global: + bluestore warn on legacy statfs: false + mon pg warn min per osd: 0 + mon: + mon warn on osd down out interval zero: false + +tasks: +- mds_pre_upgrade: +- print: "**** done mds pre-upgrade sequence" +- install.upgrade: + mon.a: + mon.b: +- print: "**** done install.upgrade both hosts" +- ceph.restart: + daemons: [mon.*, mgr.*] + mon-health-to-clog: false + wait-for-healthy: false +- exec: + mon.a: + - ceph config set global mon_warn_on_msgr2_not_enabled false +- ceph.healthy: +- ceph.restart: + daemons: [osd.*] + wait-for-healthy: false + wait-for-osds-up: true +- ceph.stop: [mds.*] +- ceph.restart: + daemons: [mds.*] + wait-for-healthy: false + wait-for-osds-up: true +- exec: + mon.a: + - ceph mon enable-msgr2 + - ceph config rm global mon_warn_on_msgr2_not_enabled +- exec: + mon.a: + - ceph osd dump -f json-pretty + - ceph versions + - ceph osd require-osd-release nautilus + #- ceph osd set-require-min-compat-client nautilus +- ceph.healthy: +- print: "**** done ceph.restart" diff --git a/qa/suites/fs/upgrade/featureful_client/old_client/tasks/3-compat_client/mimic.yaml b/qa/suites/fs/upgrade/featureful_client/old_client/tasks/3-compat_client/mimic.yaml new file mode 100644 index 00000000..36720676 --- /dev/null +++ b/qa/suites/fs/upgrade/featureful_client/old_client/tasks/3-compat_client/mimic.yaml @@ -0,0 +1,10 @@ +overrides: + ceph: + log-whitelist: + - missing required features +tasks: +- exec: + mon.a: + - ceph fs dump --format=json-pretty + - ceph fs set cephfs min_compat_client mimic +- fs.clients_evicted: diff --git a/qa/suites/fs/upgrade/featureful_client/old_client/tasks/3-compat_client/no.yaml b/qa/suites/fs/upgrade/featureful_client/old_client/tasks/3-compat_client/no.yaml new file mode 100644 index 00000000..b495eb41 --- /dev/null +++ b/qa/suites/fs/upgrade/featureful_client/old_client/tasks/3-compat_client/no.yaml @@ -0,0 +1,6 @@ +tasks: +- workunit: + clients: + all: + - suites/fsstress.sh +- print: "**** done fsstress" diff --git a/qa/suites/fs/upgrade/featureful_client/upgraded_client/% b/qa/suites/fs/upgrade/featureful_client/upgraded_client/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/fs/upgrade/featureful_client/upgraded_client/.qa b/qa/suites/fs/upgrade/featureful_client/upgraded_client/.qa new file mode 120000 index 00000000..fea2489f --- /dev/null +++ b/qa/suites/fs/upgrade/featureful_client/upgraded_client/.qa @@ -0,0 +1 @@ +../.qa \ No newline at end of file diff --git a/qa/suites/fs/upgrade/featureful_client/upgraded_client/bluestore-bitmap.yaml b/qa/suites/fs/upgrade/featureful_client/upgraded_client/bluestore-bitmap.yaml new file mode 120000 index 00000000..17ad98e7 --- /dev/null +++ b/qa/suites/fs/upgrade/featureful_client/upgraded_client/bluestore-bitmap.yaml @@ -0,0 +1 @@ +../../../../../cephfs/objectstore-ec/bluestore-bitmap.yaml \ No newline at end of file diff --git a/qa/suites/fs/upgrade/featureful_client/upgraded_client/clusters/.qa b/qa/suites/fs/upgrade/featureful_client/upgraded_client/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/upgrade/featureful_client/upgraded_client/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/upgrade/featureful_client/upgraded_client/clusters/1-mds-2-client-micro.yaml b/qa/suites/fs/upgrade/featureful_client/upgraded_client/clusters/1-mds-2-client-micro.yaml new file mode 120000 index 00000000..feb68f34 --- /dev/null +++ b/qa/suites/fs/upgrade/featureful_client/upgraded_client/clusters/1-mds-2-client-micro.yaml @@ -0,0 +1 @@ +.qa/cephfs/clusters/1-mds-2-client-micro.yaml \ No newline at end of file diff --git a/qa/suites/fs/upgrade/featureful_client/upgraded_client/conf b/qa/suites/fs/upgrade/featureful_client/upgraded_client/conf new file mode 120000 index 00000000..6d471298 --- /dev/null +++ b/qa/suites/fs/upgrade/featureful_client/upgraded_client/conf @@ -0,0 +1 @@ +.qa/cephfs/conf/ \ No newline at end of file diff --git a/qa/suites/fs/upgrade/featureful_client/upgraded_client/overrides/% b/qa/suites/fs/upgrade/featureful_client/upgraded_client/overrides/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/fs/upgrade/featureful_client/upgraded_client/overrides/.qa b/qa/suites/fs/upgrade/featureful_client/upgraded_client/overrides/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/upgrade/featureful_client/upgraded_client/overrides/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/upgrade/featureful_client/upgraded_client/overrides/frag_enable.yaml b/qa/suites/fs/upgrade/featureful_client/upgraded_client/overrides/frag_enable.yaml new file mode 120000 index 00000000..34a39a36 --- /dev/null +++ b/qa/suites/fs/upgrade/featureful_client/upgraded_client/overrides/frag_enable.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/frag_enable.yaml \ No newline at end of file diff --git a/qa/suites/fs/upgrade/featureful_client/upgraded_client/overrides/multimds/no.yaml b/qa/suites/fs/upgrade/featureful_client/upgraded_client/overrides/multimds/no.yaml new file mode 100644 index 00000000..f9e95daa --- /dev/null +++ b/qa/suites/fs/upgrade/featureful_client/upgraded_client/overrides/multimds/no.yaml @@ -0,0 +1,4 @@ +overrides: + ceph: + cephfs: + max_mds: 1 diff --git a/qa/suites/fs/upgrade/featureful_client/upgraded_client/overrides/multimds/yes.yaml b/qa/suites/fs/upgrade/featureful_client/upgraded_client/overrides/multimds/yes.yaml new file mode 100644 index 00000000..b3a9b5d6 --- /dev/null +++ b/qa/suites/fs/upgrade/featureful_client/upgraded_client/overrides/multimds/yes.yaml @@ -0,0 +1,4 @@ +overrides: + ceph: + cephfs: + max_mds: 2 diff --git a/qa/suites/fs/upgrade/featureful_client/upgraded_client/overrides/whitelist_health.yaml b/qa/suites/fs/upgrade/featureful_client/upgraded_client/overrides/whitelist_health.yaml new file mode 120000 index 00000000..74f39a49 --- /dev/null +++ b/qa/suites/fs/upgrade/featureful_client/upgraded_client/overrides/whitelist_health.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/whitelist_health.yaml \ No newline at end of file diff --git a/qa/suites/fs/upgrade/featureful_client/upgraded_client/overrides/whitelist_wrongly_marked_down.yaml b/qa/suites/fs/upgrade/featureful_client/upgraded_client/overrides/whitelist_wrongly_marked_down.yaml new file mode 120000 index 00000000..b4528c0f --- /dev/null +++ b/qa/suites/fs/upgrade/featureful_client/upgraded_client/overrides/whitelist_wrongly_marked_down.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/whitelist_wrongly_marked_down.yaml \ No newline at end of file diff --git a/qa/suites/fs/upgrade/featureful_client/upgraded_client/tasks/% b/qa/suites/fs/upgrade/featureful_client/upgraded_client/tasks/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/fs/upgrade/featureful_client/upgraded_client/tasks/.qa b/qa/suites/fs/upgrade/featureful_client/upgraded_client/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/upgrade/featureful_client/upgraded_client/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/upgrade/featureful_client/upgraded_client/tasks/0-luminous.yaml b/qa/suites/fs/upgrade/featureful_client/upgraded_client/tasks/0-luminous.yaml new file mode 100644 index 00000000..7835cbf9 --- /dev/null +++ b/qa/suites/fs/upgrade/featureful_client/upgraded_client/tasks/0-luminous.yaml @@ -0,0 +1,41 @@ +meta: +- desc: | + install ceph/luminous latest +tasks: +- install: + branch: luminous + exclude_packages: + - librados3 + - ceph-mgr-dashboard + - ceph-mgr-diskprediction-local + - ceph-mgr-diskprediction-cloud + - ceph-mgr-rook + - ceph-mgr-ssh + extra_packages: ['librados2'] +- print: "**** done installing luminous" +- ceph: + mon_bind_addrvec: false + mon_bind_msgr2: false + log-whitelist: + - overall HEALTH_ + - \(FS_ + - \(MDS_ + - \(OSD_ + - \(MON_DOWN\) + - \(CACHE_POOL_ + - \(POOL_ + - \(MGR_DOWN\) + - \(PG_ + - \(SMALLER_PGP_NUM\) + - Monitor daemon marked osd + - Behind on trimming + - Manager daemon + conf: + global: + mon warn on pool no app: false + ms bind msgr2: false +- exec: + osd.0: + - ceph osd require-osd-release luminous + - ceph osd set-require-min-compat-client luminous +- print: "**** done ceph" diff --git a/qa/suites/fs/upgrade/featureful_client/upgraded_client/tasks/1-client.yaml b/qa/suites/fs/upgrade/featureful_client/upgraded_client/tasks/1-client.yaml new file mode 100644 index 00000000..dc1c0d8d --- /dev/null +++ b/qa/suites/fs/upgrade/featureful_client/upgraded_client/tasks/1-client.yaml @@ -0,0 +1,11 @@ +nuke-on-error: false +overrides: + nuke-on-error: false +tasks: +- ceph-fuse: +- print: "**** done luminous client" +#- workunit: +# clients: +# all: +# - suites/fsstress.sh +- print: "**** done fsstress" diff --git a/qa/suites/fs/upgrade/featureful_client/upgraded_client/tasks/2-upgrade.yaml b/qa/suites/fs/upgrade/featureful_client/upgraded_client/tasks/2-upgrade.yaml new file mode 100644 index 00000000..cd670cf6 --- /dev/null +++ b/qa/suites/fs/upgrade/featureful_client/upgraded_client/tasks/2-upgrade.yaml @@ -0,0 +1,56 @@ +overrides: + ceph: + mon_bind_msgr2: false + mon_bind_addrvec: false + log-whitelist: + - scrub mismatch + - ScrubResult + - wrongly marked + - \(POOL_APP_NOT_ENABLED\) + - \(SLOW_OPS\) + - overall HEALTH_ + - \(MON_MSGR2_NOT_ENABLED\) + - slow request + conf: + global: + bluestore warn on legacy statfs: false + mon pg warn min per osd: 0 + mon: + mon warn on osd down out interval zero: false + +tasks: +- mds_pre_upgrade: +- print: "**** done mds pre-upgrade sequence" +- install.upgrade: + mon.a: + mon.b: +- print: "**** done install.upgrade both hosts" +- ceph.restart: + daemons: [mon.*, mgr.*] + mon-health-to-clog: false + wait-for-healthy: false +- exec: + mon.a: + - ceph config set global mon_warn_on_msgr2_not_enabled false +- ceph.healthy: +- ceph.restart: + daemons: [osd.*] + wait-for-healthy: false + wait-for-osds-up: true +- ceph.stop: [mds.*] +- ceph.restart: + daemons: [mds.*] + wait-for-healthy: false + wait-for-osds-up: true +- exec: + mon.a: + - ceph mon enable-msgr2 + - ceph config rm global mon_warn_on_msgr2_not_enabled +- exec: + mon.a: + - ceph osd dump -f json-pretty + - ceph versions + - ceph osd require-osd-release nautilus + #- ceph osd set-require-min-compat-client nautilus +- ceph.healthy: +- print: "**** done ceph.restart" diff --git a/qa/suites/fs/upgrade/featureful_client/upgraded_client/tasks/3-client-upgrade.yaml b/qa/suites/fs/upgrade/featureful_client/upgraded_client/tasks/3-client-upgrade.yaml new file mode 100644 index 00000000..58fdfdbd --- /dev/null +++ b/qa/suites/fs/upgrade/featureful_client/upgraded_client/tasks/3-client-upgrade.yaml @@ -0,0 +1,14 @@ +tasks: +- install.upgrade: + client.0: +- print: "**** done install.upgrade on client.0" +- ceph-fuse: + client.0: + mounted: false + client.1: + skip: true +- ceph-fuse: + client.0: + client.1: + skip: true +- print: "**** done remount client" diff --git a/qa/suites/fs/upgrade/featureful_client/upgraded_client/tasks/4-compat_client.yaml b/qa/suites/fs/upgrade/featureful_client/upgraded_client/tasks/4-compat_client.yaml new file mode 100644 index 00000000..bdf484da --- /dev/null +++ b/qa/suites/fs/upgrade/featureful_client/upgraded_client/tasks/4-compat_client.yaml @@ -0,0 +1,13 @@ +overrides: + ceph: + log-whitelist: + - missing required features +tasks: +- exec: + mon.a: + - ceph fs dump --format=json-pretty + - ceph fs set cephfs min_compat_client mimic +- fs.clients_evicted: + clients: + client.0: False + client.1: True diff --git a/qa/suites/fs/upgrade/featureful_client/upgraded_client/tasks/5-client-sanity.yaml b/qa/suites/fs/upgrade/featureful_client/upgraded_client/tasks/5-client-sanity.yaml new file mode 100644 index 00000000..e206457e --- /dev/null +++ b/qa/suites/fs/upgrade/featureful_client/upgraded_client/tasks/5-client-sanity.yaml @@ -0,0 +1,6 @@ +tasks: +- workunit: + clients: + client.0: + - suites/fsstress.sh +- print: "**** done fsstress" diff --git a/qa/suites/fs/upgrade/snaps/% b/qa/suites/fs/upgrade/snaps/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/fs/upgrade/snaps/.qa b/qa/suites/fs/upgrade/snaps/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/upgrade/snaps/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/upgrade/snaps/clusters/.qa b/qa/suites/fs/upgrade/snaps/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/upgrade/snaps/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/upgrade/snaps/clusters/3-mds.yaml b/qa/suites/fs/upgrade/snaps/clusters/3-mds.yaml new file mode 120000 index 00000000..d7ec418e --- /dev/null +++ b/qa/suites/fs/upgrade/snaps/clusters/3-mds.yaml @@ -0,0 +1 @@ +.qa/cephfs/clusters/3-mds.yaml \ No newline at end of file diff --git a/qa/suites/fs/upgrade/snaps/conf b/qa/suites/fs/upgrade/snaps/conf new file mode 120000 index 00000000..16e8cc44 --- /dev/null +++ b/qa/suites/fs/upgrade/snaps/conf @@ -0,0 +1 @@ +.qa/cephfs/conf \ No newline at end of file diff --git a/qa/suites/fs/upgrade/snaps/objectstore-ec b/qa/suites/fs/upgrade/snaps/objectstore-ec new file mode 120000 index 00000000..affe2949 --- /dev/null +++ b/qa/suites/fs/upgrade/snaps/objectstore-ec @@ -0,0 +1 @@ +.qa/cephfs/objectstore-ec \ No newline at end of file diff --git a/qa/suites/fs/upgrade/snaps/overrides/% b/qa/suites/fs/upgrade/snaps/overrides/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/fs/upgrade/snaps/overrides/.qa b/qa/suites/fs/upgrade/snaps/overrides/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/upgrade/snaps/overrides/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/upgrade/snaps/overrides/frag_enable.yaml b/qa/suites/fs/upgrade/snaps/overrides/frag_enable.yaml new file mode 120000 index 00000000..34a39a36 --- /dev/null +++ b/qa/suites/fs/upgrade/snaps/overrides/frag_enable.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/frag_enable.yaml \ No newline at end of file diff --git a/qa/suites/fs/upgrade/snaps/overrides/multimds/.qa b/qa/suites/fs/upgrade/snaps/overrides/multimds/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/upgrade/snaps/overrides/multimds/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/upgrade/snaps/overrides/multimds/no.yaml b/qa/suites/fs/upgrade/snaps/overrides/multimds/no.yaml new file mode 100644 index 00000000..c740a450 --- /dev/null +++ b/qa/suites/fs/upgrade/snaps/overrides/multimds/no.yaml @@ -0,0 +1,3 @@ +overrides: + ceph: + max_mds: 1 diff --git a/qa/suites/fs/upgrade/snaps/overrides/multimds/yes.yaml b/qa/suites/fs/upgrade/snaps/overrides/multimds/yes.yaml new file mode 100644 index 00000000..ecf118d9 --- /dev/null +++ b/qa/suites/fs/upgrade/snaps/overrides/multimds/yes.yaml @@ -0,0 +1,3 @@ +overrides: + ceph: + max_mds: 2 diff --git a/qa/suites/fs/upgrade/snaps/overrides/whitelist_health.yaml b/qa/suites/fs/upgrade/snaps/overrides/whitelist_health.yaml new file mode 120000 index 00000000..74f39a49 --- /dev/null +++ b/qa/suites/fs/upgrade/snaps/overrides/whitelist_health.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/whitelist_health.yaml \ No newline at end of file diff --git a/qa/suites/fs/upgrade/snaps/overrides/whitelist_rstat.yaml b/qa/suites/fs/upgrade/snaps/overrides/whitelist_rstat.yaml new file mode 100644 index 00000000..434b5ddc --- /dev/null +++ b/qa/suites/fs/upgrade/snaps/overrides/whitelist_rstat.yaml @@ -0,0 +1,8 @@ +overrides: + ceph: + log-whitelist: + - inconsistent rstat on inode + conf: + mds: + mds debug scatterstat: 0 + mds verify scatter: 0 diff --git a/qa/suites/fs/upgrade/snaps/overrides/whitelist_wrongly_marked_down.yaml b/qa/suites/fs/upgrade/snaps/overrides/whitelist_wrongly_marked_down.yaml new file mode 120000 index 00000000..b4528c0f --- /dev/null +++ b/qa/suites/fs/upgrade/snaps/overrides/whitelist_wrongly_marked_down.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/whitelist_wrongly_marked_down.yaml \ No newline at end of file diff --git a/qa/suites/fs/upgrade/snaps/tasks/% b/qa/suites/fs/upgrade/snaps/tasks/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/fs/upgrade/snaps/tasks/.qa b/qa/suites/fs/upgrade/snaps/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/upgrade/snaps/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/upgrade/snaps/tasks/0-luminous.yaml b/qa/suites/fs/upgrade/snaps/tasks/0-luminous.yaml new file mode 100644 index 00000000..7835cbf9 --- /dev/null +++ b/qa/suites/fs/upgrade/snaps/tasks/0-luminous.yaml @@ -0,0 +1,41 @@ +meta: +- desc: | + install ceph/luminous latest +tasks: +- install: + branch: luminous + exclude_packages: + - librados3 + - ceph-mgr-dashboard + - ceph-mgr-diskprediction-local + - ceph-mgr-diskprediction-cloud + - ceph-mgr-rook + - ceph-mgr-ssh + extra_packages: ['librados2'] +- print: "**** done installing luminous" +- ceph: + mon_bind_addrvec: false + mon_bind_msgr2: false + log-whitelist: + - overall HEALTH_ + - \(FS_ + - \(MDS_ + - \(OSD_ + - \(MON_DOWN\) + - \(CACHE_POOL_ + - \(POOL_ + - \(MGR_DOWN\) + - \(PG_ + - \(SMALLER_PGP_NUM\) + - Monitor daemon marked osd + - Behind on trimming + - Manager daemon + conf: + global: + mon warn on pool no app: false + ms bind msgr2: false +- exec: + osd.0: + - ceph osd require-osd-release luminous + - ceph osd set-require-min-compat-client luminous +- print: "**** done ceph" diff --git a/qa/suites/fs/upgrade/snaps/tasks/1-client.yaml b/qa/suites/fs/upgrade/snaps/tasks/1-client.yaml new file mode 100644 index 00000000..0aa6dcf7 --- /dev/null +++ b/qa/suites/fs/upgrade/snaps/tasks/1-client.yaml @@ -0,0 +1,13 @@ +tasks: +- ceph-fuse: +- print: "**** done luminous client" +- exec: + mon.a: + - ceph fs set cephfs allow_new_snaps true --yes-i-really-mean-it +- workunit: + timeout: 5m + cleanup: false + clients: + client.0: + - fs/snap-hierarchy.sh +- print: "**** done snap hierarchy" diff --git a/qa/suites/fs/upgrade/snaps/tasks/2-upgrade.yaml b/qa/suites/fs/upgrade/snaps/tasks/2-upgrade.yaml new file mode 100644 index 00000000..7252bb35 --- /dev/null +++ b/qa/suites/fs/upgrade/snaps/tasks/2-upgrade.yaml @@ -0,0 +1,19 @@ +overrides: + ceph: + conf: + global: + mon pg warn min per osd: 0 + bluestore warn on legacy statfs: false + +tasks: +- mds_pre_upgrade: +- print: "**** done mds pre-upgrade sequence" +- install.upgrade: + mon.a: + mon.b: +- print: "**** done install.upgrade both hosts" +- ceph.stop: [mds.*] +- ceph.restart: + daemons: [mon.*, mgr.*, osd.*, mds.*] + mon-health-to-clog: false +- print: "**** done ceph.restart" diff --git a/qa/suites/fs/upgrade/snaps/tasks/3-sanity.yaml b/qa/suites/fs/upgrade/snaps/tasks/3-sanity.yaml new file mode 100644 index 00000000..d93dc3ba --- /dev/null +++ b/qa/suites/fs/upgrade/snaps/tasks/3-sanity.yaml @@ -0,0 +1,10 @@ +tasks: +- exec: + mon.a: + - ceph status + - ceph fs dump --format=json-pretty + - ceph fs set cephfs max_mds 2 && exit 1 || true +- print: "**** confirmed cannot set max_mds=2" +- exec: + mon.a: + - ceph fs set cephfs allow_new_snaps true diff --git a/qa/suites/fs/upgrade/snaps/tasks/4-client-upgrade/.qa b/qa/suites/fs/upgrade/snaps/tasks/4-client-upgrade/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/upgrade/snaps/tasks/4-client-upgrade/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/upgrade/snaps/tasks/4-client-upgrade/no.yaml b/qa/suites/fs/upgrade/snaps/tasks/4-client-upgrade/no.yaml new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/fs/upgrade/snaps/tasks/4-client-upgrade/yes.yaml b/qa/suites/fs/upgrade/snaps/tasks/4-client-upgrade/yes.yaml new file mode 100644 index 00000000..13b590e2 --- /dev/null +++ b/qa/suites/fs/upgrade/snaps/tasks/4-client-upgrade/yes.yaml @@ -0,0 +1,10 @@ +tasks: +- install.upgrade: + client.0: +- print: "**** done install.upgrade on client.0" +- ceph-fuse: + client.0: + mounted: false +- ceph-fuse: + client.0: +- print: "**** done remount client" diff --git a/qa/suites/fs/upgrade/snaps/tasks/5-client-sanity.yaml b/qa/suites/fs/upgrade/snaps/tasks/5-client-sanity.yaml new file mode 100644 index 00000000..680e4407 --- /dev/null +++ b/qa/suites/fs/upgrade/snaps/tasks/5-client-sanity.yaml @@ -0,0 +1,10 @@ +tasks: +- workunit: + timeout: 5m + cleanup: false + env: + VERIFY: verify + clients: + client.0: + - fs/snap-hierarchy.sh +- print: "**** done verify snap hierarchy" diff --git a/qa/suites/fs/upgrade/snaps/tasks/6-snap-upgrade.yaml b/qa/suites/fs/upgrade/snaps/tasks/6-snap-upgrade.yaml new file mode 100644 index 00000000..fe0b17e1 --- /dev/null +++ b/qa/suites/fs/upgrade/snaps/tasks/6-snap-upgrade.yaml @@ -0,0 +1,16 @@ +overrides: + ceph: + log-whitelist: + - bad backtrace on inode +tasks: +- cephfs_upgrade_snap: +- print: "**** upgraded snapshot metadata" +- exec: + mon.a: + - ceph fs set cephfs max_mds 2 +- print: "**** increased max_mds=2" +- sleep: + duration: 10 +- exec: + mon.a: + - ceph fs dump | grep '^max_mds.*2' diff --git a/qa/suites/fs/upgrade/snaps/tasks/7-client-sanity.yaml b/qa/suites/fs/upgrade/snaps/tasks/7-client-sanity.yaml new file mode 120000 index 00000000..4ad65e45 --- /dev/null +++ b/qa/suites/fs/upgrade/snaps/tasks/7-client-sanity.yaml @@ -0,0 +1 @@ +5-client-sanity.yaml \ No newline at end of file diff --git a/qa/suites/fs/upgrade/volumes/.qa b/qa/suites/fs/upgrade/volumes/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/upgrade/volumes/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/upgrade/volumes/import-legacy/% b/qa/suites/fs/upgrade/volumes/import-legacy/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/fs/upgrade/volumes/import-legacy/.qa b/qa/suites/fs/upgrade/volumes/import-legacy/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/upgrade/volumes/import-legacy/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/upgrade/volumes/import-legacy/bluestore-bitmap.yaml b/qa/suites/fs/upgrade/volumes/import-legacy/bluestore-bitmap.yaml new file mode 120000 index 00000000..17ad98e7 --- /dev/null +++ b/qa/suites/fs/upgrade/volumes/import-legacy/bluestore-bitmap.yaml @@ -0,0 +1 @@ +../../../../../cephfs/objectstore-ec/bluestore-bitmap.yaml \ No newline at end of file diff --git a/qa/suites/fs/upgrade/volumes/import-legacy/clusters/.qa b/qa/suites/fs/upgrade/volumes/import-legacy/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/upgrade/volumes/import-legacy/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/upgrade/volumes/import-legacy/clusters/1-mds-2-client-micro.yaml b/qa/suites/fs/upgrade/volumes/import-legacy/clusters/1-mds-2-client-micro.yaml new file mode 100644 index 00000000..9b443f7d --- /dev/null +++ b/qa/suites/fs/upgrade/volumes/import-legacy/clusters/1-mds-2-client-micro.yaml @@ -0,0 +1,7 @@ +roles: +- [mon.a, mon.b, mon.c, mgr.x, mgr.y, mds.a, mds.b, mds.c, osd.0, osd.1, osd.2, osd.3] +- [client.0, client.1] +openstack: +- volumes: # attached to each instance + count: 4 + size: 10 # GB diff --git a/qa/suites/fs/upgrade/volumes/import-legacy/conf b/qa/suites/fs/upgrade/volumes/import-legacy/conf new file mode 120000 index 00000000..6d471298 --- /dev/null +++ b/qa/suites/fs/upgrade/volumes/import-legacy/conf @@ -0,0 +1 @@ +.qa/cephfs/conf/ \ No newline at end of file diff --git a/qa/suites/fs/upgrade/volumes/import-legacy/overrides/+ b/qa/suites/fs/upgrade/volumes/import-legacy/overrides/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/fs/upgrade/volumes/import-legacy/overrides/.qa b/qa/suites/fs/upgrade/volumes/import-legacy/overrides/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/upgrade/volumes/import-legacy/overrides/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/upgrade/volumes/import-legacy/overrides/frag_enable.yaml b/qa/suites/fs/upgrade/volumes/import-legacy/overrides/frag_enable.yaml new file mode 120000 index 00000000..34a39a36 --- /dev/null +++ b/qa/suites/fs/upgrade/volumes/import-legacy/overrides/frag_enable.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/frag_enable.yaml \ No newline at end of file diff --git a/qa/suites/fs/upgrade/volumes/import-legacy/overrides/pg-warn.yaml b/qa/suites/fs/upgrade/volumes/import-legacy/overrides/pg-warn.yaml new file mode 100644 index 00000000..4ae54a40 --- /dev/null +++ b/qa/suites/fs/upgrade/volumes/import-legacy/overrides/pg-warn.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + global: + mon pg warn min per osd: 0 diff --git a/qa/suites/fs/upgrade/volumes/import-legacy/overrides/whitelist_health.yaml b/qa/suites/fs/upgrade/volumes/import-legacy/overrides/whitelist_health.yaml new file mode 120000 index 00000000..74f39a49 --- /dev/null +++ b/qa/suites/fs/upgrade/volumes/import-legacy/overrides/whitelist_health.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/whitelist_health.yaml \ No newline at end of file diff --git a/qa/suites/fs/upgrade/volumes/import-legacy/overrides/whitelist_wrongly_marked_down.yaml b/qa/suites/fs/upgrade/volumes/import-legacy/overrides/whitelist_wrongly_marked_down.yaml new file mode 120000 index 00000000..b4528c0f --- /dev/null +++ b/qa/suites/fs/upgrade/volumes/import-legacy/overrides/whitelist_wrongly_marked_down.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/whitelist_wrongly_marked_down.yaml \ No newline at end of file diff --git a/qa/suites/fs/upgrade/volumes/import-legacy/tasks/% b/qa/suites/fs/upgrade/volumes/import-legacy/tasks/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/fs/upgrade/volumes/import-legacy/tasks/.qa b/qa/suites/fs/upgrade/volumes/import-legacy/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/upgrade/volumes/import-legacy/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/upgrade/volumes/import-legacy/tasks/0-mimic.yaml b/qa/suites/fs/upgrade/volumes/import-legacy/tasks/0-mimic.yaml new file mode 100644 index 00000000..1ca8973b --- /dev/null +++ b/qa/suites/fs/upgrade/volumes/import-legacy/tasks/0-mimic.yaml @@ -0,0 +1,42 @@ +meta: +- desc: | + install ceph/mimic latest +tasks: +- install: + branch: mimic #tag: v13.2.8 + exclude_packages: + - librados3 + - ceph-mgr-dashboard + - ceph-mgr-diskprediction-local + - ceph-mgr-diskprediction-cloud + - ceph-mgr-rook + - ceph-mgr-cephadm + - cephadm + extra_packages: ['librados2'] +- print: "**** done installing mimic" +- ceph: + mon_bind_addrvec: false + mon_bind_msgr2: false + log-whitelist: + - overall HEALTH_ + - \(FS_ + - \(MDS_ + - \(OSD_ + - \(MON_DOWN\) + - \(CACHE_POOL_ + - \(POOL_ + - \(MGR_DOWN\) + - \(PG_ + - \(SMALLER_PGP_NUM\) + - Monitor daemon marked osd + - Behind on trimming + - Manager daemon + conf: + global: + mon warn on pool no app: false + ms bind msgr2: false +- exec: + osd.0: + - ceph osd require-osd-release mimic + - ceph osd set-require-min-compat-client mimic +- print: "**** done ceph" diff --git a/qa/suites/fs/upgrade/volumes/import-legacy/tasks/1-client.yaml b/qa/suites/fs/upgrade/volumes/import-legacy/tasks/1-client.yaml new file mode 100644 index 00000000..82731071 --- /dev/null +++ b/qa/suites/fs/upgrade/volumes/import-legacy/tasks/1-client.yaml @@ -0,0 +1,33 @@ +tasks: +- workunit: + clients: + client.0: + - fs/upgrade/volume_client + env: + ACTION: create +- print: "**** fs/volume_client create" +- ceph-fuse: + client.0: + mount_path: /volumes/_nogroup/vol_isolated + mountpoint: mnt.0 + auth_id: vol_data_isolated + client.1: + mount_path: /volumes/_nogroup/vol_default + mountpoint: mnt.1 + auth_id: vol_default +- print: "**** ceph-fuse vol_isolated" +- workunit: + clients: + client.0: + - fs/upgrade/volume_client + env: + ACTION: populate + cleanup: false +- workunit: + clients: + client.1: + - fs/upgrade/volume_client + env: + ACTION: populate + cleanup: false +- print: "**** fs/volume_client populate" diff --git a/qa/suites/fs/upgrade/volumes/import-legacy/tasks/2-upgrade.yaml b/qa/suites/fs/upgrade/volumes/import-legacy/tasks/2-upgrade.yaml new file mode 100644 index 00000000..fd23132b --- /dev/null +++ b/qa/suites/fs/upgrade/volumes/import-legacy/tasks/2-upgrade.yaml @@ -0,0 +1,54 @@ +overrides: + ceph: + mon_bind_msgr2: false + mon_bind_addrvec: false + log-whitelist: + - scrub mismatch + - ScrubResult + - wrongly marked + - \(POOL_APP_NOT_ENABLED\) + - \(SLOW_OPS\) + - overall HEALTH_ + - \(MON_MSGR2_NOT_ENABLED\) + - slow request + conf: + global: + bluestore warn on legacy statfs: false + bluestore warn on no per pool omap: false + mon: + mon warn on osd down out interval zero: false + +tasks: +- mds_pre_upgrade: +- print: "**** done mds pre-upgrade sequence" +- install.upgrade: + mon.a: +- print: "**** done install.upgrade both hosts" +- ceph.restart: + daemons: [mon.*, mgr.*] + mon-health-to-clog: false + wait-for-healthy: false +- exec: + mon.a: + - ceph config set global mon_warn_on_msgr2_not_enabled false +- ceph.healthy: +- ceph.restart: + daemons: [osd.*] + wait-for-healthy: false + wait-for-osds-up: true +- ceph.stop: [mds.*] +- ceph.restart: + daemons: [mds.*] + wait-for-healthy: false + wait-for-osds-up: true +- exec: + mon.a: + - ceph mon enable-msgr2 + - ceph versions + - ceph osd dump -f json-pretty + - ceph config rm global mon_warn_on_msgr2_not_enabled + - ceph osd require-osd-release nautilus + - for f in `ceph osd pool ls` ; do ceph osd pool set $f pg_autoscale_mode off ; done + #- ceph osd set-require-min-compat-client nautilus +- ceph.healthy: +- print: "**** done ceph.restart" diff --git a/qa/suites/fs/upgrade/volumes/import-legacy/tasks/3-verify.yaml b/qa/suites/fs/upgrade/volumes/import-legacy/tasks/3-verify.yaml new file mode 100644 index 00000000..003409ca --- /dev/null +++ b/qa/suites/fs/upgrade/volumes/import-legacy/tasks/3-verify.yaml @@ -0,0 +1,25 @@ +overrides: + ceph: + log-whitelist: + - missing required features +tasks: +- exec: + mon.a: + - ceph fs dump --format=json-pretty + - ceph fs volume ls + - ceph fs subvolume ls cephfs +- workunit: + clients: + client.0: + - fs/upgrade/volume_client + env: + ACTION: verify + cleanup: false +- workunit: + clients: + client.1: + - fs/upgrade/volume_client + env: + ACTION: verify + cleanup: false +- print: "**** fs/volume_client verify" diff --git a/qa/suites/fs/upgrade/volumes/import-legacy/ubuntu_18.04.yaml b/qa/suites/fs/upgrade/volumes/import-legacy/ubuntu_18.04.yaml new file mode 120000 index 00000000..cfb85f10 --- /dev/null +++ b/qa/suites/fs/upgrade/volumes/import-legacy/ubuntu_18.04.yaml @@ -0,0 +1 @@ +.qa/distros/all/ubuntu_18.04.yaml \ No newline at end of file diff --git a/qa/suites/fs/verify/% b/qa/suites/fs/verify/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/fs/verify/.qa b/qa/suites/fs/verify/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/verify/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/verify/begin.yaml b/qa/suites/fs/verify/begin.yaml new file mode 120000 index 00000000..311d404f --- /dev/null +++ b/qa/suites/fs/verify/begin.yaml @@ -0,0 +1 @@ +.qa/cephfs/begin.yaml \ No newline at end of file diff --git a/qa/suites/fs/verify/centos_latest.yaml b/qa/suites/fs/verify/centos_latest.yaml new file mode 120000 index 00000000..bd9854e7 --- /dev/null +++ b/qa/suites/fs/verify/centos_latest.yaml @@ -0,0 +1 @@ +.qa/distros/supported/centos_latest.yaml \ No newline at end of file diff --git a/qa/suites/fs/verify/clusters/.qa b/qa/suites/fs/verify/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/verify/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/verify/clusters/fixed-2-ucephfs.yaml b/qa/suites/fs/verify/clusters/fixed-2-ucephfs.yaml new file mode 120000 index 00000000..b0c41a89 --- /dev/null +++ b/qa/suites/fs/verify/clusters/fixed-2-ucephfs.yaml @@ -0,0 +1 @@ +.qa/cephfs/clusters/fixed-2-ucephfs.yaml \ No newline at end of file diff --git a/qa/suites/fs/verify/conf b/qa/suites/fs/verify/conf new file mode 120000 index 00000000..16e8cc44 --- /dev/null +++ b/qa/suites/fs/verify/conf @@ -0,0 +1 @@ +.qa/cephfs/conf \ No newline at end of file diff --git a/qa/suites/fs/verify/mount/.qa b/qa/suites/fs/verify/mount/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/verify/mount/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/verify/mount/fuse.yaml b/qa/suites/fs/verify/mount/fuse.yaml new file mode 120000 index 00000000..0e55da9f --- /dev/null +++ b/qa/suites/fs/verify/mount/fuse.yaml @@ -0,0 +1 @@ +.qa/cephfs/mount/fuse.yaml \ No newline at end of file diff --git a/qa/suites/fs/verify/objectstore-ec b/qa/suites/fs/verify/objectstore-ec new file mode 120000 index 00000000..affe2949 --- /dev/null +++ b/qa/suites/fs/verify/objectstore-ec @@ -0,0 +1 @@ +.qa/cephfs/objectstore-ec \ No newline at end of file diff --git a/qa/suites/fs/verify/overrides/+ b/qa/suites/fs/verify/overrides/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/fs/verify/overrides/.qa b/qa/suites/fs/verify/overrides/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/verify/overrides/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/verify/overrides/frag_enable.yaml b/qa/suites/fs/verify/overrides/frag_enable.yaml new file mode 120000 index 00000000..34a39a36 --- /dev/null +++ b/qa/suites/fs/verify/overrides/frag_enable.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/frag_enable.yaml \ No newline at end of file diff --git a/qa/suites/fs/verify/overrides/mon-debug.yaml b/qa/suites/fs/verify/overrides/mon-debug.yaml new file mode 100644 index 00000000..6ed3e6d5 --- /dev/null +++ b/qa/suites/fs/verify/overrides/mon-debug.yaml @@ -0,0 +1,6 @@ +overrides: + ceph: + conf: + mon: + debug ms: 1 + debug mon: 20 diff --git a/qa/suites/fs/verify/overrides/session_timeout.yaml b/qa/suites/fs/verify/overrides/session_timeout.yaml new file mode 120000 index 00000000..fce0318c --- /dev/null +++ b/qa/suites/fs/verify/overrides/session_timeout.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/session_timeout.yaml \ No newline at end of file diff --git a/qa/suites/fs/verify/overrides/whitelist_health.yaml b/qa/suites/fs/verify/overrides/whitelist_health.yaml new file mode 120000 index 00000000..74f39a49 --- /dev/null +++ b/qa/suites/fs/verify/overrides/whitelist_health.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/whitelist_health.yaml \ No newline at end of file diff --git a/qa/suites/fs/verify/overrides/whitelist_wrongly_marked_down.yaml b/qa/suites/fs/verify/overrides/whitelist_wrongly_marked_down.yaml new file mode 120000 index 00000000..b4528c0f --- /dev/null +++ b/qa/suites/fs/verify/overrides/whitelist_wrongly_marked_down.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/whitelist_wrongly_marked_down.yaml \ No newline at end of file diff --git a/qa/suites/fs/verify/tasks/.qa b/qa/suites/fs/verify/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/verify/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/verify/tasks/cfuse_workunit_suites_dbench.yaml b/qa/suites/fs/verify/tasks/cfuse_workunit_suites_dbench.yaml new file mode 120000 index 00000000..b0f876c3 --- /dev/null +++ b/qa/suites/fs/verify/tasks/cfuse_workunit_suites_dbench.yaml @@ -0,0 +1 @@ +.qa/cephfs/tasks/cfuse_workunit_suites_dbench.yaml \ No newline at end of file diff --git a/qa/suites/fs/verify/tasks/cfuse_workunit_suites_fsstress.yaml b/qa/suites/fs/verify/tasks/cfuse_workunit_suites_fsstress.yaml new file mode 120000 index 00000000..c2e859ff --- /dev/null +++ b/qa/suites/fs/verify/tasks/cfuse_workunit_suites_fsstress.yaml @@ -0,0 +1 @@ +.qa/cephfs/tasks/cfuse_workunit_suites_fsstress.yaml \ No newline at end of file diff --git a/qa/suites/fs/verify/validater/.qa b/qa/suites/fs/verify/validater/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/fs/verify/validater/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/fs/verify/validater/lockdep.yaml b/qa/suites/fs/verify/validater/lockdep.yaml new file mode 100644 index 00000000..25f84355 --- /dev/null +++ b/qa/suites/fs/verify/validater/lockdep.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + global: + lockdep: true diff --git a/qa/suites/fs/verify/validater/valgrind.yaml b/qa/suites/fs/verify/validater/valgrind.yaml new file mode 100644 index 00000000..3da39d69 --- /dev/null +++ b/qa/suites/fs/verify/validater/valgrind.yaml @@ -0,0 +1,29 @@ +# Only works on os_type: centos +# See http://tracker.ceph.com/issues/20360 and http://tracker.ceph.com/issues/18126 + +overrides: + install: + ceph: + debuginfo: true + ceph: + # Valgrind makes everything slow, so ignore slow requests and extend heartbeat grace + log-whitelist: + - slow requests are blocked + conf: + global: + osd heartbeat grace: 40 + mds: + mds heartbeat grace: 60 + mon: + mon osd crush smoke test: false + osd: + osd fast shutdown: false + valgrind: + mon: [--tool=memcheck, --leak-check=full, --show-reachable=yes] + osd: [--tool=memcheck] + mds: [--tool=memcheck] +# see https://tracker.ceph.com/issues/38621 +# mgr: [--tool=memcheck] + ceph-fuse: + client.0: + valgrind: [--tool=memcheck, --leak-check=full, --show-reachable=yes] diff --git a/qa/suites/hadoop/.qa b/qa/suites/hadoop/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/hadoop/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/hadoop/basic/% b/qa/suites/hadoop/basic/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/hadoop/basic/.qa b/qa/suites/hadoop/basic/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/hadoop/basic/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/hadoop/basic/clusters/.qa b/qa/suites/hadoop/basic/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/hadoop/basic/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/hadoop/basic/clusters/fixed-3.yaml b/qa/suites/hadoop/basic/clusters/fixed-3.yaml new file mode 100644 index 00000000..56b0be4c --- /dev/null +++ b/qa/suites/hadoop/basic/clusters/fixed-3.yaml @@ -0,0 +1,13 @@ +overrides: + ceph: + conf: + client: + client permissions: false +roles: +- [mon.0, mds.a, osd.0, hadoop.master.0] +- [mon.1, mgr.x, osd.1, hadoop.slave.0] +- [mon.2, mgr.y, hadoop.slave.1, client.0] +openstack: +- volumes: # attached to each instance + count: 1 + size: 10 # GB diff --git a/qa/suites/hadoop/basic/distros/.qa b/qa/suites/hadoop/basic/distros/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/hadoop/basic/distros/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/hadoop/basic/distros/ubuntu_latest.yaml b/qa/suites/hadoop/basic/distros/ubuntu_latest.yaml new file mode 120000 index 00000000..3a09f9ab --- /dev/null +++ b/qa/suites/hadoop/basic/distros/ubuntu_latest.yaml @@ -0,0 +1 @@ +.qa/distros/supported/ubuntu_latest.yaml \ No newline at end of file diff --git a/qa/suites/hadoop/basic/filestore-xfs.yaml b/qa/suites/hadoop/basic/filestore-xfs.yaml new file mode 120000 index 00000000..41f2a9d1 --- /dev/null +++ b/qa/suites/hadoop/basic/filestore-xfs.yaml @@ -0,0 +1 @@ +.qa/objectstore/filestore-xfs.yaml \ No newline at end of file diff --git a/qa/suites/hadoop/basic/tasks/.qa b/qa/suites/hadoop/basic/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/hadoop/basic/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/hadoop/basic/tasks/repl.yaml b/qa/suites/hadoop/basic/tasks/repl.yaml new file mode 100644 index 00000000..60cdcca3 --- /dev/null +++ b/qa/suites/hadoop/basic/tasks/repl.yaml @@ -0,0 +1,8 @@ +tasks: +- ssh_keys: +- install: +- ceph: +- hadoop: +- workunit: + clients: + client.0: [hadoop/repl.sh] diff --git a/qa/suites/hadoop/basic/tasks/terasort.yaml b/qa/suites/hadoop/basic/tasks/terasort.yaml new file mode 100644 index 00000000..4377894f --- /dev/null +++ b/qa/suites/hadoop/basic/tasks/terasort.yaml @@ -0,0 +1,10 @@ +tasks: +- ssh_keys: +- install: +- ceph: +- hadoop: +- workunit: + clients: + client.0: [hadoop/terasort.sh] + env: + NUM_RECORDS: "10000000" diff --git a/qa/suites/hadoop/basic/tasks/wordcount.yaml b/qa/suites/hadoop/basic/tasks/wordcount.yaml new file mode 100644 index 00000000..b84941b8 --- /dev/null +++ b/qa/suites/hadoop/basic/tasks/wordcount.yaml @@ -0,0 +1,8 @@ +tasks: +- ssh_keys: +- install: +- ceph: +- hadoop: +- workunit: + clients: + client.0: [hadoop/wordcount.sh] diff --git a/qa/suites/kcephfs/.qa b/qa/suites/kcephfs/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/kcephfs/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/kcephfs/cephfs/% b/qa/suites/kcephfs/cephfs/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/kcephfs/cephfs/.qa b/qa/suites/kcephfs/cephfs/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/kcephfs/cephfs/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/kcephfs/cephfs/begin.yaml b/qa/suites/kcephfs/cephfs/begin.yaml new file mode 120000 index 00000000..311d404f --- /dev/null +++ b/qa/suites/kcephfs/cephfs/begin.yaml @@ -0,0 +1 @@ +.qa/cephfs/begin.yaml \ No newline at end of file diff --git a/qa/suites/kcephfs/cephfs/clusters/.qa b/qa/suites/kcephfs/cephfs/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/kcephfs/cephfs/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/kcephfs/cephfs/clusters/1-mds-1-client.yaml b/qa/suites/kcephfs/cephfs/clusters/1-mds-1-client.yaml new file mode 120000 index 00000000..64bdb79f --- /dev/null +++ b/qa/suites/kcephfs/cephfs/clusters/1-mds-1-client.yaml @@ -0,0 +1 @@ +.qa/cephfs/clusters/1-mds-1-client.yaml \ No newline at end of file diff --git a/qa/suites/kcephfs/cephfs/conf b/qa/suites/kcephfs/cephfs/conf new file mode 120000 index 00000000..16e8cc44 --- /dev/null +++ b/qa/suites/kcephfs/cephfs/conf @@ -0,0 +1 @@ +.qa/cephfs/conf \ No newline at end of file diff --git a/qa/suites/kcephfs/cephfs/inline/.qa b/qa/suites/kcephfs/cephfs/inline/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/kcephfs/cephfs/inline/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/kcephfs/cephfs/inline/no.yaml b/qa/suites/kcephfs/cephfs/inline/no.yaml new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/kcephfs/cephfs/inline/yes.yaml b/qa/suites/kcephfs/cephfs/inline/yes.yaml new file mode 100644 index 00000000..da8677a5 --- /dev/null +++ b/qa/suites/kcephfs/cephfs/inline/yes.yaml @@ -0,0 +1,4 @@ +tasks: +- exec: + client.0: + - sudo ceph fs set cephfs inline_data true --yes-i-really-mean-it diff --git a/qa/suites/kcephfs/cephfs/kclient b/qa/suites/kcephfs/cephfs/kclient new file mode 120000 index 00000000..893d2d36 --- /dev/null +++ b/qa/suites/kcephfs/cephfs/kclient @@ -0,0 +1 @@ +.qa/cephfs/mount/kclient \ No newline at end of file diff --git a/qa/suites/kcephfs/cephfs/objectstore-ec b/qa/suites/kcephfs/cephfs/objectstore-ec new file mode 120000 index 00000000..affe2949 --- /dev/null +++ b/qa/suites/kcephfs/cephfs/objectstore-ec @@ -0,0 +1 @@ +.qa/cephfs/objectstore-ec \ No newline at end of file diff --git a/qa/suites/kcephfs/cephfs/overrides/+ b/qa/suites/kcephfs/cephfs/overrides/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/kcephfs/cephfs/overrides/.qa b/qa/suites/kcephfs/cephfs/overrides/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/kcephfs/cephfs/overrides/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/kcephfs/cephfs/overrides/frag_enable.yaml b/qa/suites/kcephfs/cephfs/overrides/frag_enable.yaml new file mode 120000 index 00000000..34a39a36 --- /dev/null +++ b/qa/suites/kcephfs/cephfs/overrides/frag_enable.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/frag_enable.yaml \ No newline at end of file diff --git a/qa/suites/kcephfs/cephfs/overrides/log-config.yaml b/qa/suites/kcephfs/cephfs/overrides/log-config.yaml new file mode 120000 index 00000000..d955aa5b --- /dev/null +++ b/qa/suites/kcephfs/cephfs/overrides/log-config.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/log-config.yaml \ No newline at end of file diff --git a/qa/suites/kcephfs/cephfs/overrides/osd-asserts.yaml b/qa/suites/kcephfs/cephfs/overrides/osd-asserts.yaml new file mode 120000 index 00000000..f290c749 --- /dev/null +++ b/qa/suites/kcephfs/cephfs/overrides/osd-asserts.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/osd-asserts.yaml \ No newline at end of file diff --git a/qa/suites/kcephfs/cephfs/overrides/whitelist_health.yaml b/qa/suites/kcephfs/cephfs/overrides/whitelist_health.yaml new file mode 120000 index 00000000..74f39a49 --- /dev/null +++ b/qa/suites/kcephfs/cephfs/overrides/whitelist_health.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/whitelist_health.yaml \ No newline at end of file diff --git a/qa/suites/kcephfs/cephfs/overrides/whitelist_wrongly_marked_down.yaml b/qa/suites/kcephfs/cephfs/overrides/whitelist_wrongly_marked_down.yaml new file mode 120000 index 00000000..b4528c0f --- /dev/null +++ b/qa/suites/kcephfs/cephfs/overrides/whitelist_wrongly_marked_down.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/whitelist_wrongly_marked_down.yaml \ No newline at end of file diff --git a/qa/suites/kcephfs/cephfs/tasks/.qa b/qa/suites/kcephfs/cephfs/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/kcephfs/cephfs/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_direct_io.yaml b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_direct_io.yaml new file mode 100644 index 00000000..d0128bce --- /dev/null +++ b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_direct_io.yaml @@ -0,0 +1,6 @@ +tasks: +- workunit: + clients: + all: + - direct_io + diff --git a/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_kernel_untar_build.yaml b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_kernel_untar_build.yaml new file mode 100644 index 00000000..5c9acc10 --- /dev/null +++ b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_kernel_untar_build.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + all: + - kernel_untar_build.sh diff --git a/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_misc.yaml b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_misc.yaml new file mode 100644 index 00000000..aa62b9e8 --- /dev/null +++ b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_misc.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + all: + - fs/misc diff --git a/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_o_trunc.yaml b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_o_trunc.yaml new file mode 100644 index 00000000..91c953b9 --- /dev/null +++ b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_o_trunc.yaml @@ -0,0 +1,6 @@ +tasks: +- workunit: + clients: + all: + - fs/test_o_trunc.sh + diff --git a/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_snaps.yaml b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_snaps.yaml new file mode 100644 index 00000000..790c93c2 --- /dev/null +++ b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_snaps.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + all: + - fs/snaps diff --git a/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_dbench.yaml b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_dbench.yaml new file mode 100644 index 00000000..41b2bc8e --- /dev/null +++ b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_dbench.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + all: + - suites/dbench.sh diff --git a/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_ffsb.yaml b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_ffsb.yaml new file mode 100644 index 00000000..3eedd281 --- /dev/null +++ b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_ffsb.yaml @@ -0,0 +1,10 @@ +overrides: + ceph: + log-whitelist: + - SLOW_OPS + - slow request +tasks: +- workunit: + clients: + all: + - suites/ffsb.sh diff --git a/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_fsstress.yaml b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_fsstress.yaml new file mode 100644 index 00000000..ddb18fb7 --- /dev/null +++ b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_fsstress.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + all: + - suites/fsstress.sh diff --git a/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_fsx.yaml b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_fsx.yaml new file mode 100644 index 00000000..8b2b1ab5 --- /dev/null +++ b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_fsx.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + all: + - suites/fsx.sh diff --git a/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_fsync.yaml b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_fsync.yaml new file mode 100644 index 00000000..7efa1adb --- /dev/null +++ b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_fsync.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + all: + - suites/fsync-tester.sh diff --git a/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_iozone.yaml b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_iozone.yaml new file mode 100644 index 00000000..9270f3c5 --- /dev/null +++ b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_iozone.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + all: + - suites/iozone.sh diff --git a/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_pjd.yaml b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_pjd.yaml new file mode 100644 index 00000000..1de182ce --- /dev/null +++ b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_suites_pjd.yaml @@ -0,0 +1,6 @@ +tasks: +- workunit: + timeout: 6h + clients: + all: + - suites/pjd.sh diff --git a/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_trivial_sync.yaml b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_trivial_sync.yaml new file mode 100644 index 00000000..36e7411b --- /dev/null +++ b/qa/suites/kcephfs/cephfs/tasks/kclient_workunit_trivial_sync.yaml @@ -0,0 +1,4 @@ +tasks: +- workunit: + clients: + all: [fs/misc/trivial_sync.sh] diff --git a/qa/suites/kcephfs/mixed-clients/% b/qa/suites/kcephfs/mixed-clients/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/kcephfs/mixed-clients/.qa b/qa/suites/kcephfs/mixed-clients/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/kcephfs/mixed-clients/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/kcephfs/mixed-clients/begin.yaml b/qa/suites/kcephfs/mixed-clients/begin.yaml new file mode 120000 index 00000000..311d404f --- /dev/null +++ b/qa/suites/kcephfs/mixed-clients/begin.yaml @@ -0,0 +1 @@ +.qa/cephfs/begin.yaml \ No newline at end of file diff --git a/qa/suites/kcephfs/mixed-clients/clusters/.qa b/qa/suites/kcephfs/mixed-clients/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/kcephfs/mixed-clients/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/kcephfs/mixed-clients/clusters/1-mds-2-client.yaml b/qa/suites/kcephfs/mixed-clients/clusters/1-mds-2-client.yaml new file mode 120000 index 00000000..9f4f161a --- /dev/null +++ b/qa/suites/kcephfs/mixed-clients/clusters/1-mds-2-client.yaml @@ -0,0 +1 @@ +.qa/cephfs/clusters/1-mds-2-client.yaml \ No newline at end of file diff --git a/qa/suites/kcephfs/mixed-clients/conf b/qa/suites/kcephfs/mixed-clients/conf new file mode 120000 index 00000000..16e8cc44 --- /dev/null +++ b/qa/suites/kcephfs/mixed-clients/conf @@ -0,0 +1 @@ +.qa/cephfs/conf \ No newline at end of file diff --git a/qa/suites/kcephfs/mixed-clients/kclient-overrides b/qa/suites/kcephfs/mixed-clients/kclient-overrides new file mode 120000 index 00000000..58b04fb2 --- /dev/null +++ b/qa/suites/kcephfs/mixed-clients/kclient-overrides @@ -0,0 +1 @@ +.qa/cephfs/mount/kclient/overrides/ \ No newline at end of file diff --git a/qa/suites/kcephfs/mixed-clients/objectstore-ec b/qa/suites/kcephfs/mixed-clients/objectstore-ec new file mode 120000 index 00000000..affe2949 --- /dev/null +++ b/qa/suites/kcephfs/mixed-clients/objectstore-ec @@ -0,0 +1 @@ +.qa/cephfs/objectstore-ec \ No newline at end of file diff --git a/qa/suites/kcephfs/mixed-clients/overrides/+ b/qa/suites/kcephfs/mixed-clients/overrides/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/kcephfs/mixed-clients/overrides/.qa b/qa/suites/kcephfs/mixed-clients/overrides/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/kcephfs/mixed-clients/overrides/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/kcephfs/mixed-clients/overrides/frag_enable.yaml b/qa/suites/kcephfs/mixed-clients/overrides/frag_enable.yaml new file mode 120000 index 00000000..34a39a36 --- /dev/null +++ b/qa/suites/kcephfs/mixed-clients/overrides/frag_enable.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/frag_enable.yaml \ No newline at end of file diff --git a/qa/suites/kcephfs/mixed-clients/overrides/log-config.yaml b/qa/suites/kcephfs/mixed-clients/overrides/log-config.yaml new file mode 120000 index 00000000..d955aa5b --- /dev/null +++ b/qa/suites/kcephfs/mixed-clients/overrides/log-config.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/log-config.yaml \ No newline at end of file diff --git a/qa/suites/kcephfs/mixed-clients/overrides/osd-asserts.yaml b/qa/suites/kcephfs/mixed-clients/overrides/osd-asserts.yaml new file mode 120000 index 00000000..f290c749 --- /dev/null +++ b/qa/suites/kcephfs/mixed-clients/overrides/osd-asserts.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/osd-asserts.yaml \ No newline at end of file diff --git a/qa/suites/kcephfs/mixed-clients/overrides/whitelist_health.yaml b/qa/suites/kcephfs/mixed-clients/overrides/whitelist_health.yaml new file mode 120000 index 00000000..74f39a49 --- /dev/null +++ b/qa/suites/kcephfs/mixed-clients/overrides/whitelist_health.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/whitelist_health.yaml \ No newline at end of file diff --git a/qa/suites/kcephfs/mixed-clients/overrides/whitelist_wrongly_marked_down.yaml b/qa/suites/kcephfs/mixed-clients/overrides/whitelist_wrongly_marked_down.yaml new file mode 120000 index 00000000..b4528c0f --- /dev/null +++ b/qa/suites/kcephfs/mixed-clients/overrides/whitelist_wrongly_marked_down.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/whitelist_wrongly_marked_down.yaml \ No newline at end of file diff --git a/qa/suites/kcephfs/mixed-clients/tasks/.qa b/qa/suites/kcephfs/mixed-clients/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/kcephfs/mixed-clients/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/kcephfs/mixed-clients/tasks/kernel_cfuse_workunits_dbench_iozone.yaml b/qa/suites/kcephfs/mixed-clients/tasks/kernel_cfuse_workunits_dbench_iozone.yaml new file mode 100644 index 00000000..78b2d761 --- /dev/null +++ b/qa/suites/kcephfs/mixed-clients/tasks/kernel_cfuse_workunits_dbench_iozone.yaml @@ -0,0 +1,18 @@ +tasks: +- parallel: + - user-workload + - kclient-workload +user-workload: + sequential: + - ceph-fuse: [client.0] + - workunit: + clients: + client.0: + - suites/iozone.sh +kclient-workload: + sequential: + - kclient: [client.1] + - workunit: + clients: + client.1: + - suites/dbench.sh diff --git a/qa/suites/kcephfs/mixed-clients/tasks/kernel_cfuse_workunits_untarbuild_blogbench.yaml b/qa/suites/kcephfs/mixed-clients/tasks/kernel_cfuse_workunits_untarbuild_blogbench.yaml new file mode 100644 index 00000000..d637ff98 --- /dev/null +++ b/qa/suites/kcephfs/mixed-clients/tasks/kernel_cfuse_workunits_untarbuild_blogbench.yaml @@ -0,0 +1,18 @@ +tasks: +- parallel: + - user-workload + - kclient-workload +user-workload: + sequential: + - ceph-fuse: [client.0] + - workunit: + clients: + client.0: + - suites/blogbench.sh +kclient-workload: + sequential: + - kclient: [client.1] + - workunit: + clients: + client.1: + - kernel_untar_build.sh diff --git a/qa/suites/kcephfs/recovery/% b/qa/suites/kcephfs/recovery/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/kcephfs/recovery/.qa b/qa/suites/kcephfs/recovery/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/kcephfs/recovery/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/kcephfs/recovery/begin.yaml b/qa/suites/kcephfs/recovery/begin.yaml new file mode 120000 index 00000000..311d404f --- /dev/null +++ b/qa/suites/kcephfs/recovery/begin.yaml @@ -0,0 +1 @@ +.qa/cephfs/begin.yaml \ No newline at end of file diff --git a/qa/suites/kcephfs/recovery/clusters/.qa b/qa/suites/kcephfs/recovery/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/kcephfs/recovery/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/kcephfs/recovery/clusters/1-mds-4-client.yaml b/qa/suites/kcephfs/recovery/clusters/1-mds-4-client.yaml new file mode 120000 index 00000000..65b01b41 --- /dev/null +++ b/qa/suites/kcephfs/recovery/clusters/1-mds-4-client.yaml @@ -0,0 +1 @@ +.qa/cephfs/clusters/1-mds-4-client.yaml \ No newline at end of file diff --git a/qa/suites/kcephfs/recovery/conf b/qa/suites/kcephfs/recovery/conf new file mode 120000 index 00000000..16e8cc44 --- /dev/null +++ b/qa/suites/kcephfs/recovery/conf @@ -0,0 +1 @@ +.qa/cephfs/conf \ No newline at end of file diff --git a/qa/suites/kcephfs/recovery/kclient b/qa/suites/kcephfs/recovery/kclient new file mode 120000 index 00000000..22f94e15 --- /dev/null +++ b/qa/suites/kcephfs/recovery/kclient @@ -0,0 +1 @@ +.qa/cephfs/mount/kclient/ \ No newline at end of file diff --git a/qa/suites/kcephfs/recovery/objectstore-ec b/qa/suites/kcephfs/recovery/objectstore-ec new file mode 120000 index 00000000..affe2949 --- /dev/null +++ b/qa/suites/kcephfs/recovery/objectstore-ec @@ -0,0 +1 @@ +.qa/cephfs/objectstore-ec \ No newline at end of file diff --git a/qa/suites/kcephfs/recovery/overrides/+ b/qa/suites/kcephfs/recovery/overrides/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/kcephfs/recovery/overrides/.qa b/qa/suites/kcephfs/recovery/overrides/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/kcephfs/recovery/overrides/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/kcephfs/recovery/overrides/frag_enable.yaml b/qa/suites/kcephfs/recovery/overrides/frag_enable.yaml new file mode 120000 index 00000000..34a39a36 --- /dev/null +++ b/qa/suites/kcephfs/recovery/overrides/frag_enable.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/frag_enable.yaml \ No newline at end of file diff --git a/qa/suites/kcephfs/recovery/overrides/log-config.yaml b/qa/suites/kcephfs/recovery/overrides/log-config.yaml new file mode 120000 index 00000000..d955aa5b --- /dev/null +++ b/qa/suites/kcephfs/recovery/overrides/log-config.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/log-config.yaml \ No newline at end of file diff --git a/qa/suites/kcephfs/recovery/overrides/osd-asserts.yaml b/qa/suites/kcephfs/recovery/overrides/osd-asserts.yaml new file mode 120000 index 00000000..f290c749 --- /dev/null +++ b/qa/suites/kcephfs/recovery/overrides/osd-asserts.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/osd-asserts.yaml \ No newline at end of file diff --git a/qa/suites/kcephfs/recovery/overrides/whitelist_health.yaml b/qa/suites/kcephfs/recovery/overrides/whitelist_health.yaml new file mode 120000 index 00000000..74f39a49 --- /dev/null +++ b/qa/suites/kcephfs/recovery/overrides/whitelist_health.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/whitelist_health.yaml \ No newline at end of file diff --git a/qa/suites/kcephfs/recovery/overrides/whitelist_wrongly_marked_down.yaml b/qa/suites/kcephfs/recovery/overrides/whitelist_wrongly_marked_down.yaml new file mode 120000 index 00000000..b4528c0f --- /dev/null +++ b/qa/suites/kcephfs/recovery/overrides/whitelist_wrongly_marked_down.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/whitelist_wrongly_marked_down.yaml \ No newline at end of file diff --git a/qa/suites/kcephfs/recovery/tasks/.qa b/qa/suites/kcephfs/recovery/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/kcephfs/recovery/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/kcephfs/recovery/tasks/auto-repair.yaml b/qa/suites/kcephfs/recovery/tasks/auto-repair.yaml new file mode 100644 index 00000000..90d0e7bc --- /dev/null +++ b/qa/suites/kcephfs/recovery/tasks/auto-repair.yaml @@ -0,0 +1,13 @@ +overrides: + ceph: + log-whitelist: + - force file system read-only + - bad backtrace + - MDS in read-only mode + - \(MDS_READ_ONLY\) + + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_auto_repair diff --git a/qa/suites/kcephfs/recovery/tasks/backtrace.yaml b/qa/suites/kcephfs/recovery/tasks/backtrace.yaml new file mode 100644 index 00000000..d740a5f6 --- /dev/null +++ b/qa/suites/kcephfs/recovery/tasks/backtrace.yaml @@ -0,0 +1,5 @@ + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_backtrace diff --git a/qa/suites/kcephfs/recovery/tasks/client-limits.yaml b/qa/suites/kcephfs/recovery/tasks/client-limits.yaml new file mode 100644 index 00000000..f816cee9 --- /dev/null +++ b/qa/suites/kcephfs/recovery/tasks/client-limits.yaml @@ -0,0 +1,20 @@ + +overrides: + ceph: + log-whitelist: + - responding to mclientcaps\(revoke\) + - not advance its oldest_client_tid + - failing to advance its oldest client/flush tid + - Too many inodes in cache + - failing to respond to cache pressure + - slow requests are blocked + - failing to respond to capability release + - MDS cache is too large + - \(MDS_CLIENT_OLDEST_TID\) + - \(MDS_CACHE_OVERSIZED\) + +tasks: + - cephfs_test_runner: + fail_on_skip: false + modules: + - tasks.cephfs.test_client_limits diff --git a/qa/suites/kcephfs/recovery/tasks/client-recovery.yaml b/qa/suites/kcephfs/recovery/tasks/client-recovery.yaml new file mode 100644 index 00000000..725a259d --- /dev/null +++ b/qa/suites/kcephfs/recovery/tasks/client-recovery.yaml @@ -0,0 +1,15 @@ + +# The task interferes with the network, so we need +# to permit OSDs to complain about that. +overrides: + ceph: + log-whitelist: + - but it is still running + - slow request + - evicting unresponsive client + +tasks: + - cephfs_test_runner: + fail_on_skip: false + modules: + - tasks.cephfs.test_client_recovery diff --git a/qa/suites/kcephfs/recovery/tasks/damage.yaml b/qa/suites/kcephfs/recovery/tasks/damage.yaml new file mode 100644 index 00000000..9ae738f0 --- /dev/null +++ b/qa/suites/kcephfs/recovery/tasks/damage.yaml @@ -0,0 +1,27 @@ + +overrides: + ceph: + log-whitelist: + - bad backtrace + - object missing on disk + - error reading table object + - error reading sessionmap + - Error loading MDS rank + - missing journal object + - Error recovering journal + - error decoding table object + - failed to read JournalPointer + - Corrupt directory entry + - Corrupt fnode header + - corrupt sessionmap header + - Corrupt dentry + - Scrub error on inode + - Metadata damage detected + - MDS_READ_ONLY + - force file system read-only + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_damage + diff --git a/qa/suites/kcephfs/recovery/tasks/data-scan.yaml b/qa/suites/kcephfs/recovery/tasks/data-scan.yaml new file mode 100644 index 00000000..8a05e22a --- /dev/null +++ b/qa/suites/kcephfs/recovery/tasks/data-scan.yaml @@ -0,0 +1,19 @@ + +overrides: + ceph: + log-whitelist: + - bad backtrace + - object missing on disk + - error reading table object + - error reading sessionmap + - unmatched fragstat + - was unreadable, recreating it now + - Scrub error on inode + - Metadata damage detected + - inconsistent rstat on inode + - Error recovering journal + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_data_scan diff --git a/qa/suites/kcephfs/recovery/tasks/failover.yaml b/qa/suites/kcephfs/recovery/tasks/failover.yaml new file mode 100644 index 00000000..ab7b4d37 --- /dev/null +++ b/qa/suites/kcephfs/recovery/tasks/failover.yaml @@ -0,0 +1,12 @@ +overrides: + ceph: + log-whitelist: + - not responding, replacing + - \(MDS_INSUFFICIENT_STANDBY\) + - \(MDS_ALL_DOWN\) + - \(MDS_UP_LESS_THAN_MAX\) +tasks: + - cephfs_test_runner: + fail_on_skip: false + modules: + - tasks.cephfs.test_failover diff --git a/qa/suites/kcephfs/recovery/tasks/forward-scrub.yaml b/qa/suites/kcephfs/recovery/tasks/forward-scrub.yaml new file mode 100644 index 00000000..b92cf105 --- /dev/null +++ b/qa/suites/kcephfs/recovery/tasks/forward-scrub.yaml @@ -0,0 +1,14 @@ + +overrides: + ceph: + log-whitelist: + - inode wrongly marked free + - bad backtrace on inode + - inode table repaired for inode + - Scrub error on inode + - Metadata damage detected + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_forward_scrub diff --git a/qa/suites/kcephfs/recovery/tasks/journal-repair.yaml b/qa/suites/kcephfs/recovery/tasks/journal-repair.yaml new file mode 100644 index 00000000..66f819d0 --- /dev/null +++ b/qa/suites/kcephfs/recovery/tasks/journal-repair.yaml @@ -0,0 +1,14 @@ + +overrides: + ceph: + log-whitelist: + - bad backtrace on directory inode + - error reading table object + - Metadata damage detected + - slow requests are blocked + - Behind on trimming + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_journal_repair diff --git a/qa/suites/kcephfs/recovery/tasks/mds-flush.yaml b/qa/suites/kcephfs/recovery/tasks/mds-flush.yaml new file mode 100644 index 00000000..d59a8ad5 --- /dev/null +++ b/qa/suites/kcephfs/recovery/tasks/mds-flush.yaml @@ -0,0 +1,5 @@ + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_flush diff --git a/qa/suites/kcephfs/recovery/tasks/mds-full.yaml b/qa/suites/kcephfs/recovery/tasks/mds-full.yaml new file mode 100644 index 00000000..e9744e71 --- /dev/null +++ b/qa/suites/kcephfs/recovery/tasks/mds-full.yaml @@ -0,0 +1,29 @@ + +overrides: + ceph: + cephfs_ec_profile: + - disabled + log-whitelist: + - OSD full dropping all updates + - OSD near full + - pausewr flag + - failsafe engaged, dropping updates + - failsafe disengaged, no longer dropping + - is full \(reached quota + - POOL_FULL + - POOL_BACKFILLFULL + conf: + mon: + mon osd nearfull ratio: 0.6 + mon osd backfillfull ratio: 0.6 + mon osd full ratio: 0.7 + osd: + osd mon report interval: 5 + osd objectstore: memstore + osd failsafe full ratio: 1.0 + memstore device bytes: 200000000 + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_full diff --git a/qa/suites/kcephfs/recovery/tasks/pool-perm.yaml b/qa/suites/kcephfs/recovery/tasks/pool-perm.yaml new file mode 100644 index 00000000..f220626d --- /dev/null +++ b/qa/suites/kcephfs/recovery/tasks/pool-perm.yaml @@ -0,0 +1,5 @@ + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_pool_perm diff --git a/qa/suites/kcephfs/recovery/tasks/sessionmap.yaml b/qa/suites/kcephfs/recovery/tasks/sessionmap.yaml new file mode 100644 index 00000000..88ae6019 --- /dev/null +++ b/qa/suites/kcephfs/recovery/tasks/sessionmap.yaml @@ -0,0 +1,10 @@ +overrides: + ceph: + log-whitelist: + - client session with non-allowable root + +tasks: + - cephfs_test_runner: + fail_on_skip: false + modules: + - tasks.cephfs.test_sessionmap diff --git a/qa/suites/kcephfs/recovery/tasks/strays.yaml b/qa/suites/kcephfs/recovery/tasks/strays.yaml new file mode 100644 index 00000000..2809fc14 --- /dev/null +++ b/qa/suites/kcephfs/recovery/tasks/strays.yaml @@ -0,0 +1,5 @@ + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_strays diff --git a/qa/suites/kcephfs/recovery/tasks/volume-client.yaml b/qa/suites/kcephfs/recovery/tasks/volume-client.yaml new file mode 100644 index 00000000..9ecaaf4f --- /dev/null +++ b/qa/suites/kcephfs/recovery/tasks/volume-client.yaml @@ -0,0 +1,9 @@ +overrides: + ceph: + log-whitelist: + - MON_DOWN +tasks: + - cephfs_test_runner: + fail_on_skip: false + modules: + - tasks.cephfs.test_volume_client diff --git a/qa/suites/kcephfs/thrash/% b/qa/suites/kcephfs/thrash/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/kcephfs/thrash/.qa b/qa/suites/kcephfs/thrash/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/kcephfs/thrash/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/kcephfs/thrash/begin.yaml b/qa/suites/kcephfs/thrash/begin.yaml new file mode 120000 index 00000000..311d404f --- /dev/null +++ b/qa/suites/kcephfs/thrash/begin.yaml @@ -0,0 +1 @@ +.qa/cephfs/begin.yaml \ No newline at end of file diff --git a/qa/suites/kcephfs/thrash/clusters/.qa b/qa/suites/kcephfs/thrash/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/kcephfs/thrash/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/kcephfs/thrash/clusters/1-mds-1-client.yaml b/qa/suites/kcephfs/thrash/clusters/1-mds-1-client.yaml new file mode 120000 index 00000000..64bdb79f --- /dev/null +++ b/qa/suites/kcephfs/thrash/clusters/1-mds-1-client.yaml @@ -0,0 +1 @@ +.qa/cephfs/clusters/1-mds-1-client.yaml \ No newline at end of file diff --git a/qa/suites/kcephfs/thrash/conf b/qa/suites/kcephfs/thrash/conf new file mode 120000 index 00000000..16e8cc44 --- /dev/null +++ b/qa/suites/kcephfs/thrash/conf @@ -0,0 +1 @@ +.qa/cephfs/conf \ No newline at end of file diff --git a/qa/suites/kcephfs/thrash/kclient b/qa/suites/kcephfs/thrash/kclient new file mode 120000 index 00000000..22f94e15 --- /dev/null +++ b/qa/suites/kcephfs/thrash/kclient @@ -0,0 +1 @@ +.qa/cephfs/mount/kclient/ \ No newline at end of file diff --git a/qa/suites/kcephfs/thrash/objectstore-ec b/qa/suites/kcephfs/thrash/objectstore-ec new file mode 120000 index 00000000..affe2949 --- /dev/null +++ b/qa/suites/kcephfs/thrash/objectstore-ec @@ -0,0 +1 @@ +.qa/cephfs/objectstore-ec \ No newline at end of file diff --git a/qa/suites/kcephfs/thrash/overrides/+ b/qa/suites/kcephfs/thrash/overrides/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/kcephfs/thrash/overrides/.qa b/qa/suites/kcephfs/thrash/overrides/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/kcephfs/thrash/overrides/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/kcephfs/thrash/overrides/frag_enable.yaml b/qa/suites/kcephfs/thrash/overrides/frag_enable.yaml new file mode 120000 index 00000000..34a39a36 --- /dev/null +++ b/qa/suites/kcephfs/thrash/overrides/frag_enable.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/frag_enable.yaml \ No newline at end of file diff --git a/qa/suites/kcephfs/thrash/overrides/log-config.yaml b/qa/suites/kcephfs/thrash/overrides/log-config.yaml new file mode 120000 index 00000000..d955aa5b --- /dev/null +++ b/qa/suites/kcephfs/thrash/overrides/log-config.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/log-config.yaml \ No newline at end of file diff --git a/qa/suites/kcephfs/thrash/overrides/osd-asserts.yaml b/qa/suites/kcephfs/thrash/overrides/osd-asserts.yaml new file mode 120000 index 00000000..f290c749 --- /dev/null +++ b/qa/suites/kcephfs/thrash/overrides/osd-asserts.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/osd-asserts.yaml \ No newline at end of file diff --git a/qa/suites/kcephfs/thrash/overrides/thrash-health-whitelist.yaml b/qa/suites/kcephfs/thrash/overrides/thrash-health-whitelist.yaml new file mode 120000 index 00000000..9124eb1a --- /dev/null +++ b/qa/suites/kcephfs/thrash/overrides/thrash-health-whitelist.yaml @@ -0,0 +1 @@ +.qa/tasks/thrashosds-health.yaml \ No newline at end of file diff --git a/qa/suites/kcephfs/thrash/overrides/whitelist_health.yaml b/qa/suites/kcephfs/thrash/overrides/whitelist_health.yaml new file mode 120000 index 00000000..74f39a49 --- /dev/null +++ b/qa/suites/kcephfs/thrash/overrides/whitelist_health.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/whitelist_health.yaml \ No newline at end of file diff --git a/qa/suites/kcephfs/thrash/overrides/whitelist_wrongly_marked_down.yaml b/qa/suites/kcephfs/thrash/overrides/whitelist_wrongly_marked_down.yaml new file mode 120000 index 00000000..b4528c0f --- /dev/null +++ b/qa/suites/kcephfs/thrash/overrides/whitelist_wrongly_marked_down.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/whitelist_wrongly_marked_down.yaml \ No newline at end of file diff --git a/qa/suites/kcephfs/thrash/thrashers/.qa b/qa/suites/kcephfs/thrash/thrashers/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/kcephfs/thrash/thrashers/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/kcephfs/thrash/thrashers/default.yaml b/qa/suites/kcephfs/thrash/thrashers/default.yaml new file mode 100644 index 00000000..1829619b --- /dev/null +++ b/qa/suites/kcephfs/thrash/thrashers/default.yaml @@ -0,0 +1,7 @@ +overrides: + ceph: + log-whitelist: + - but it is still running + - objects unfound and apparently lost +tasks: +- thrashosds: diff --git a/qa/suites/kcephfs/thrash/thrashers/mds.yaml b/qa/suites/kcephfs/thrash/thrashers/mds.yaml new file mode 100644 index 00000000..ce87575c --- /dev/null +++ b/qa/suites/kcephfs/thrash/thrashers/mds.yaml @@ -0,0 +1,7 @@ +overrides: + ceph: + log-whitelist: + - not responding, replacing + +tasks: +- mds_thrash: diff --git a/qa/suites/kcephfs/thrash/thrashers/mon.yaml b/qa/suites/kcephfs/thrash/thrashers/mon.yaml new file mode 100644 index 00000000..d72a99cb --- /dev/null +++ b/qa/suites/kcephfs/thrash/thrashers/mon.yaml @@ -0,0 +1,9 @@ +overrides: + ceph: + log-whitelist: + - \(MON_DOWN\) + +tasks: +- mon_thrash: + revive_delay: 20 + thrash_delay: 1 diff --git a/qa/suites/kcephfs/thrash/workloads/.qa b/qa/suites/kcephfs/thrash/workloads/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/kcephfs/thrash/workloads/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/kcephfs/thrash/workloads/kclient_workunit_suites_ffsb.yaml b/qa/suites/kcephfs/thrash/workloads/kclient_workunit_suites_ffsb.yaml new file mode 100644 index 00000000..53e74bea --- /dev/null +++ b/qa/suites/kcephfs/thrash/workloads/kclient_workunit_suites_ffsb.yaml @@ -0,0 +1,13 @@ +overrides: + ceph: + log-whitelist: + - SLOW_OPS + - slow request + conf: + osd: + filestore flush min: 0 +tasks: +- workunit: + clients: + all: + - suites/ffsb.sh diff --git a/qa/suites/kcephfs/thrash/workloads/kclient_workunit_suites_iozone.yaml b/qa/suites/kcephfs/thrash/workloads/kclient_workunit_suites_iozone.yaml new file mode 100644 index 00000000..9270f3c5 --- /dev/null +++ b/qa/suites/kcephfs/thrash/workloads/kclient_workunit_suites_iozone.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + all: + - suites/iozone.sh diff --git a/qa/suites/krbd/.qa b/qa/suites/krbd/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/krbd/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/krbd/basic/% b/qa/suites/krbd/basic/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/krbd/basic/.qa b/qa/suites/krbd/basic/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/krbd/basic/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/krbd/basic/bluestore-bitmap.yaml b/qa/suites/krbd/basic/bluestore-bitmap.yaml new file mode 120000 index 00000000..a59cf517 --- /dev/null +++ b/qa/suites/krbd/basic/bluestore-bitmap.yaml @@ -0,0 +1 @@ +.qa/objectstore/bluestore-bitmap.yaml \ No newline at end of file diff --git a/qa/suites/krbd/basic/ceph/.qa b/qa/suites/krbd/basic/ceph/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/krbd/basic/ceph/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/krbd/basic/ceph/ceph.yaml b/qa/suites/krbd/basic/ceph/ceph.yaml new file mode 100644 index 00000000..2030acb9 --- /dev/null +++ b/qa/suites/krbd/basic/ceph/ceph.yaml @@ -0,0 +1,3 @@ +tasks: +- install: +- ceph: diff --git a/qa/suites/krbd/basic/clusters/.qa b/qa/suites/krbd/basic/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/krbd/basic/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/krbd/basic/clusters/fixed-1.yaml b/qa/suites/krbd/basic/clusters/fixed-1.yaml new file mode 120000 index 00000000..02df5dd0 --- /dev/null +++ b/qa/suites/krbd/basic/clusters/fixed-1.yaml @@ -0,0 +1 @@ +.qa/clusters/fixed-1.yaml \ No newline at end of file diff --git a/qa/suites/krbd/basic/conf.yaml b/qa/suites/krbd/basic/conf.yaml new file mode 100644 index 00000000..5e7ed992 --- /dev/null +++ b/qa/suites/krbd/basic/conf.yaml @@ -0,0 +1,7 @@ +overrides: + ceph: + conf: + global: + ms die on skipped message: false + client: + rbd default features: 37 diff --git a/qa/suites/krbd/basic/ms_mode/.qa b/qa/suites/krbd/basic/ms_mode/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/krbd/basic/ms_mode/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/krbd/basic/ms_mode/crc.yaml b/qa/suites/krbd/basic/ms_mode/crc.yaml new file mode 100644 index 00000000..3b072578 --- /dev/null +++ b/qa/suites/krbd/basic/ms_mode/crc.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + client: + rbd default map options: ms_mode=crc diff --git a/qa/suites/krbd/basic/ms_mode/legacy.yaml b/qa/suites/krbd/basic/ms_mode/legacy.yaml new file mode 100644 index 00000000..0048dcb0 --- /dev/null +++ b/qa/suites/krbd/basic/ms_mode/legacy.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + client: + rbd default map options: ms_mode=legacy diff --git a/qa/suites/krbd/basic/ms_mode/secure.yaml b/qa/suites/krbd/basic/ms_mode/secure.yaml new file mode 100644 index 00000000..a735db18 --- /dev/null +++ b/qa/suites/krbd/basic/ms_mode/secure.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + client: + rbd default map options: ms_mode=secure diff --git a/qa/suites/krbd/basic/tasks/.qa b/qa/suites/krbd/basic/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/krbd/basic/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/krbd/basic/tasks/krbd_deep_flatten.yaml b/qa/suites/krbd/basic/tasks/krbd_deep_flatten.yaml new file mode 100644 index 00000000..a821b73a --- /dev/null +++ b/qa/suites/krbd/basic/tasks/krbd_deep_flatten.yaml @@ -0,0 +1,5 @@ +tasks: +- cram: + clients: + client.0: + - qa/rbd/krbd_deep_flatten.t diff --git a/qa/suites/krbd/basic/tasks/krbd_discard.yaml b/qa/suites/krbd/basic/tasks/krbd_discard.yaml new file mode 100644 index 00000000..59ec5b94 --- /dev/null +++ b/qa/suites/krbd/basic/tasks/krbd_discard.yaml @@ -0,0 +1,9 @@ +tasks: +- cram: + clients: + client.0: + - qa/rbd/krbd_discard.t + - qa/rbd/krbd_discard_512b.t + - qa/rbd/krbd_discard_4M.t + - qa/rbd/krbd_zeroout.t + - qa/rbd/krbd_discard_granularity.t diff --git a/qa/suites/krbd/basic/tasks/krbd_huge_image.yaml b/qa/suites/krbd/basic/tasks/krbd_huge_image.yaml new file mode 100644 index 00000000..15ff033c --- /dev/null +++ b/qa/suites/krbd/basic/tasks/krbd_huge_image.yaml @@ -0,0 +1,5 @@ +tasks: +- cram: + clients: + client.0: + - qa/rbd/krbd_huge_image.t diff --git a/qa/suites/krbd/basic/tasks/krbd_msgr_segments.yaml b/qa/suites/krbd/basic/tasks/krbd_msgr_segments.yaml new file mode 100644 index 00000000..cfa524e7 --- /dev/null +++ b/qa/suites/krbd/basic/tasks/krbd_msgr_segments.yaml @@ -0,0 +1,5 @@ +tasks: +- cram: + clients: + client.0: + - qa/rbd/krbd_msgr_segments.t diff --git a/qa/suites/krbd/basic/tasks/krbd_parent_overlap.yaml b/qa/suites/krbd/basic/tasks/krbd_parent_overlap.yaml new file mode 100644 index 00000000..9bcf1fa3 --- /dev/null +++ b/qa/suites/krbd/basic/tasks/krbd_parent_overlap.yaml @@ -0,0 +1,5 @@ +tasks: +- cram: + clients: + client.0: + - qa/rbd/krbd_parent_overlap.t diff --git a/qa/suites/krbd/basic/tasks/krbd_read_only.yaml b/qa/suites/krbd/basic/tasks/krbd_read_only.yaml new file mode 100644 index 00000000..8194b89c --- /dev/null +++ b/qa/suites/krbd/basic/tasks/krbd_read_only.yaml @@ -0,0 +1,6 @@ +tasks: +- cram: + clients: + client.0: + - qa/rbd/krbd_blkroset.t + - qa/rbd/krbd_get_features.t diff --git a/qa/suites/krbd/basic/tasks/krbd_whole_object_zeroout.yaml b/qa/suites/krbd/basic/tasks/krbd_whole_object_zeroout.yaml new file mode 100644 index 00000000..3b0ff8d1 --- /dev/null +++ b/qa/suites/krbd/basic/tasks/krbd_whole_object_zeroout.yaml @@ -0,0 +1,5 @@ +tasks: +- cram: + clients: + client.0: + - qa/rbd/krbd_whole_object_zeroout.t diff --git a/qa/suites/krbd/fsx/% b/qa/suites/krbd/fsx/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/krbd/fsx/.qa b/qa/suites/krbd/fsx/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/krbd/fsx/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/krbd/fsx/ceph/.qa b/qa/suites/krbd/fsx/ceph/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/krbd/fsx/ceph/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/krbd/fsx/ceph/ceph.yaml b/qa/suites/krbd/fsx/ceph/ceph.yaml new file mode 100644 index 00000000..2030acb9 --- /dev/null +++ b/qa/suites/krbd/fsx/ceph/ceph.yaml @@ -0,0 +1,3 @@ +tasks: +- install: +- ceph: diff --git a/qa/suites/krbd/fsx/clusters/.qa b/qa/suites/krbd/fsx/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/krbd/fsx/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/krbd/fsx/clusters/3-node.yaml b/qa/suites/krbd/fsx/clusters/3-node.yaml new file mode 100644 index 00000000..0433ec9b --- /dev/null +++ b/qa/suites/krbd/fsx/clusters/3-node.yaml @@ -0,0 +1,14 @@ +# fixed-3.yaml, but with two additional clients on the same target +roles: +- [mon.a, mon.c, mgr.x, osd.0, osd.1, osd.2, osd.3] +- [mon.b, mgr.y, osd.4, osd.5, osd.6, osd.7] +- [client.0, client.1, client.2] +openstack: +- volumes: # attached to each instance + count: 4 + size: 10 # GB +overrides: + ceph: + conf: + osd: + osd shutdown pgref assert: true diff --git a/qa/suites/krbd/fsx/conf.yaml b/qa/suites/krbd/fsx/conf.yaml new file mode 100644 index 00000000..30da870b --- /dev/null +++ b/qa/suites/krbd/fsx/conf.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + global: + ms die on skipped message: false diff --git a/qa/suites/krbd/fsx/ms_mode$/.qa b/qa/suites/krbd/fsx/ms_mode$/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/krbd/fsx/ms_mode$/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/krbd/fsx/ms_mode$/crc.yaml b/qa/suites/krbd/fsx/ms_mode$/crc.yaml new file mode 100644 index 00000000..3b072578 --- /dev/null +++ b/qa/suites/krbd/fsx/ms_mode$/crc.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + client: + rbd default map options: ms_mode=crc diff --git a/qa/suites/krbd/fsx/ms_mode$/legacy.yaml b/qa/suites/krbd/fsx/ms_mode$/legacy.yaml new file mode 100644 index 00000000..0048dcb0 --- /dev/null +++ b/qa/suites/krbd/fsx/ms_mode$/legacy.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + client: + rbd default map options: ms_mode=legacy diff --git a/qa/suites/krbd/fsx/ms_mode$/prefer-crc.yaml b/qa/suites/krbd/fsx/ms_mode$/prefer-crc.yaml new file mode 100644 index 00000000..1054473a --- /dev/null +++ b/qa/suites/krbd/fsx/ms_mode$/prefer-crc.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + client: + rbd default map options: ms_mode=prefer-crc diff --git a/qa/suites/krbd/fsx/ms_mode$/secure.yaml b/qa/suites/krbd/fsx/ms_mode$/secure.yaml new file mode 100644 index 00000000..a735db18 --- /dev/null +++ b/qa/suites/krbd/fsx/ms_mode$/secure.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + client: + rbd default map options: ms_mode=secure diff --git a/qa/suites/krbd/fsx/objectstore/.qa b/qa/suites/krbd/fsx/objectstore/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/krbd/fsx/objectstore/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/krbd/fsx/objectstore/bluestore-bitmap.yaml b/qa/suites/krbd/fsx/objectstore/bluestore-bitmap.yaml new file mode 120000 index 00000000..a59cf517 --- /dev/null +++ b/qa/suites/krbd/fsx/objectstore/bluestore-bitmap.yaml @@ -0,0 +1 @@ +.qa/objectstore/bluestore-bitmap.yaml \ No newline at end of file diff --git a/qa/suites/krbd/fsx/objectstore/filestore-xfs.yaml b/qa/suites/krbd/fsx/objectstore/filestore-xfs.yaml new file mode 120000 index 00000000..41f2a9d1 --- /dev/null +++ b/qa/suites/krbd/fsx/objectstore/filestore-xfs.yaml @@ -0,0 +1 @@ +.qa/objectstore/filestore-xfs.yaml \ No newline at end of file diff --git a/qa/suites/krbd/fsx/striping/.qa b/qa/suites/krbd/fsx/striping/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/krbd/fsx/striping/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/krbd/fsx/striping/default/% b/qa/suites/krbd/fsx/striping/default/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/krbd/fsx/striping/default/.qa b/qa/suites/krbd/fsx/striping/default/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/krbd/fsx/striping/default/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/krbd/fsx/striping/default/msgr-failures/.qa b/qa/suites/krbd/fsx/striping/default/msgr-failures/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/krbd/fsx/striping/default/msgr-failures/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/krbd/fsx/striping/default/msgr-failures/few.yaml b/qa/suites/krbd/fsx/striping/default/msgr-failures/few.yaml new file mode 100644 index 00000000..4326fe23 --- /dev/null +++ b/qa/suites/krbd/fsx/striping/default/msgr-failures/few.yaml @@ -0,0 +1,7 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 5000 + log-whitelist: + - \(OSD_SLOW_PING_TIME diff --git a/qa/suites/krbd/fsx/striping/default/msgr-failures/many.yaml b/qa/suites/krbd/fsx/striping/default/msgr-failures/many.yaml new file mode 100644 index 00000000..4caedaeb --- /dev/null +++ b/qa/suites/krbd/fsx/striping/default/msgr-failures/many.yaml @@ -0,0 +1,7 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 500 + log-whitelist: + - \(OSD_SLOW_PING_TIME diff --git a/qa/suites/krbd/fsx/striping/default/randomized-striping-off.yaml b/qa/suites/krbd/fsx/striping/default/randomized-striping-off.yaml new file mode 100644 index 00000000..0bf96a8d --- /dev/null +++ b/qa/suites/krbd/fsx/striping/default/randomized-striping-off.yaml @@ -0,0 +1,3 @@ +overrides: + rbd_fsx: + randomized_striping: false diff --git a/qa/suites/krbd/fsx/striping/fancy/% b/qa/suites/krbd/fsx/striping/fancy/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/krbd/fsx/striping/fancy/.qa b/qa/suites/krbd/fsx/striping/fancy/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/krbd/fsx/striping/fancy/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/krbd/fsx/striping/fancy/msgr-failures/.qa b/qa/suites/krbd/fsx/striping/fancy/msgr-failures/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/krbd/fsx/striping/fancy/msgr-failures/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/krbd/fsx/striping/fancy/msgr-failures/few.yaml b/qa/suites/krbd/fsx/striping/fancy/msgr-failures/few.yaml new file mode 100644 index 00000000..4326fe23 --- /dev/null +++ b/qa/suites/krbd/fsx/striping/fancy/msgr-failures/few.yaml @@ -0,0 +1,7 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 5000 + log-whitelist: + - \(OSD_SLOW_PING_TIME diff --git a/qa/suites/krbd/fsx/striping/fancy/randomized-striping-on.yaml b/qa/suites/krbd/fsx/striping/fancy/randomized-striping-on.yaml new file mode 100644 index 00000000..c2823e4e --- /dev/null +++ b/qa/suites/krbd/fsx/striping/fancy/randomized-striping-on.yaml @@ -0,0 +1,3 @@ +overrides: + rbd_fsx: + randomized_striping: true diff --git a/qa/suites/krbd/fsx/tasks/.qa b/qa/suites/krbd/fsx/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/krbd/fsx/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/krbd/fsx/tasks/fsx-1-client.yaml b/qa/suites/krbd/fsx/tasks/fsx-1-client.yaml new file mode 100644 index 00000000..b0af9829 --- /dev/null +++ b/qa/suites/krbd/fsx/tasks/fsx-1-client.yaml @@ -0,0 +1,10 @@ +tasks: +- rbd_fsx: + clients: [client.0] + ops: 20000 + krbd: true + readbdy: 512 + writebdy: 512 + truncbdy: 512 + holebdy: 512 + punch_holes: true diff --git a/qa/suites/krbd/fsx/tasks/fsx-3-client.yaml b/qa/suites/krbd/fsx/tasks/fsx-3-client.yaml new file mode 100644 index 00000000..5b8e3701 --- /dev/null +++ b/qa/suites/krbd/fsx/tasks/fsx-3-client.yaml @@ -0,0 +1,10 @@ +tasks: +- rbd_fsx: + clients: [client.0, client.1, client.2] + ops: 10000 + krbd: true + readbdy: 512 + writebdy: 512 + truncbdy: 512 + holebdy: 512 + punch_holes: true diff --git a/qa/suites/krbd/rbd-nomount/% b/qa/suites/krbd/rbd-nomount/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/krbd/rbd-nomount/.qa b/qa/suites/krbd/rbd-nomount/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/krbd/rbd-nomount/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/krbd/rbd-nomount/bluestore-bitmap.yaml b/qa/suites/krbd/rbd-nomount/bluestore-bitmap.yaml new file mode 120000 index 00000000..a59cf517 --- /dev/null +++ b/qa/suites/krbd/rbd-nomount/bluestore-bitmap.yaml @@ -0,0 +1 @@ +.qa/objectstore/bluestore-bitmap.yaml \ No newline at end of file diff --git a/qa/suites/krbd/rbd-nomount/clusters/.qa b/qa/suites/krbd/rbd-nomount/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/krbd/rbd-nomount/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/krbd/rbd-nomount/clusters/fixed-3.yaml b/qa/suites/krbd/rbd-nomount/clusters/fixed-3.yaml new file mode 120000 index 00000000..f75a848b --- /dev/null +++ b/qa/suites/krbd/rbd-nomount/clusters/fixed-3.yaml @@ -0,0 +1 @@ +.qa/clusters/fixed-3.yaml \ No newline at end of file diff --git a/qa/suites/krbd/rbd-nomount/conf.yaml b/qa/suites/krbd/rbd-nomount/conf.yaml new file mode 100644 index 00000000..5e7ed992 --- /dev/null +++ b/qa/suites/krbd/rbd-nomount/conf.yaml @@ -0,0 +1,7 @@ +overrides: + ceph: + conf: + global: + ms die on skipped message: false + client: + rbd default features: 37 diff --git a/qa/suites/krbd/rbd-nomount/install/.qa b/qa/suites/krbd/rbd-nomount/install/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/krbd/rbd-nomount/install/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/krbd/rbd-nomount/install/ceph.yaml b/qa/suites/krbd/rbd-nomount/install/ceph.yaml new file mode 100644 index 00000000..2030acb9 --- /dev/null +++ b/qa/suites/krbd/rbd-nomount/install/ceph.yaml @@ -0,0 +1,3 @@ +tasks: +- install: +- ceph: diff --git a/qa/suites/krbd/rbd-nomount/ms_mode/.qa b/qa/suites/krbd/rbd-nomount/ms_mode/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/krbd/rbd-nomount/ms_mode/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/krbd/rbd-nomount/ms_mode/crc.yaml b/qa/suites/krbd/rbd-nomount/ms_mode/crc.yaml new file mode 100644 index 00000000..3b072578 --- /dev/null +++ b/qa/suites/krbd/rbd-nomount/ms_mode/crc.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + client: + rbd default map options: ms_mode=crc diff --git a/qa/suites/krbd/rbd-nomount/ms_mode/legacy.yaml b/qa/suites/krbd/rbd-nomount/ms_mode/legacy.yaml new file mode 100644 index 00000000..0048dcb0 --- /dev/null +++ b/qa/suites/krbd/rbd-nomount/ms_mode/legacy.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + client: + rbd default map options: ms_mode=legacy diff --git a/qa/suites/krbd/rbd-nomount/ms_mode/secure.yaml b/qa/suites/krbd/rbd-nomount/ms_mode/secure.yaml new file mode 100644 index 00000000..a735db18 --- /dev/null +++ b/qa/suites/krbd/rbd-nomount/ms_mode/secure.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + client: + rbd default map options: ms_mode=secure diff --git a/qa/suites/krbd/rbd-nomount/msgr-failures/.qa b/qa/suites/krbd/rbd-nomount/msgr-failures/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/krbd/rbd-nomount/msgr-failures/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/krbd/rbd-nomount/msgr-failures/few.yaml b/qa/suites/krbd/rbd-nomount/msgr-failures/few.yaml new file mode 100644 index 00000000..4326fe23 --- /dev/null +++ b/qa/suites/krbd/rbd-nomount/msgr-failures/few.yaml @@ -0,0 +1,7 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 5000 + log-whitelist: + - \(OSD_SLOW_PING_TIME diff --git a/qa/suites/krbd/rbd-nomount/msgr-failures/many.yaml b/qa/suites/krbd/rbd-nomount/msgr-failures/many.yaml new file mode 100644 index 00000000..4caedaeb --- /dev/null +++ b/qa/suites/krbd/rbd-nomount/msgr-failures/many.yaml @@ -0,0 +1,7 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 500 + log-whitelist: + - \(OSD_SLOW_PING_TIME diff --git a/qa/suites/krbd/rbd-nomount/tasks/.qa b/qa/suites/krbd/rbd-nomount/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/krbd/rbd-nomount/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/krbd/rbd-nomount/tasks/krbd_data_pool.yaml b/qa/suites/krbd/rbd-nomount/tasks/krbd_data_pool.yaml new file mode 100644 index 00000000..35b9d67e --- /dev/null +++ b/qa/suites/krbd/rbd-nomount/tasks/krbd_data_pool.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + all: + - rbd/krbd_data_pool.sh diff --git a/qa/suites/krbd/rbd-nomount/tasks/krbd_exclusive_option.yaml b/qa/suites/krbd/rbd-nomount/tasks/krbd_exclusive_option.yaml new file mode 100644 index 00000000..567deebf --- /dev/null +++ b/qa/suites/krbd/rbd-nomount/tasks/krbd_exclusive_option.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + all: + - rbd/krbd_exclusive_option.sh diff --git a/qa/suites/krbd/rbd-nomount/tasks/krbd_fallocate.yaml b/qa/suites/krbd/rbd-nomount/tasks/krbd_fallocate.yaml new file mode 100644 index 00000000..a7286982 --- /dev/null +++ b/qa/suites/krbd/rbd-nomount/tasks/krbd_fallocate.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + all: + - rbd/krbd_fallocate.sh diff --git a/qa/suites/krbd/rbd-nomount/tasks/krbd_latest_osdmap_on_map.yaml b/qa/suites/krbd/rbd-nomount/tasks/krbd_latest_osdmap_on_map.yaml new file mode 100644 index 00000000..522be6a4 --- /dev/null +++ b/qa/suites/krbd/rbd-nomount/tasks/krbd_latest_osdmap_on_map.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + all: + - rbd/krbd_latest_osdmap_on_map.sh diff --git a/qa/suites/krbd/rbd-nomount/tasks/krbd_namespaces.yaml b/qa/suites/krbd/rbd-nomount/tasks/krbd_namespaces.yaml new file mode 100644 index 00000000..4d6519a2 --- /dev/null +++ b/qa/suites/krbd/rbd-nomount/tasks/krbd_namespaces.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + all: + - rbd/krbd_namespaces.sh diff --git a/qa/suites/krbd/rbd-nomount/tasks/krbd_udev_enumerate.yaml b/qa/suites/krbd/rbd-nomount/tasks/krbd_udev_enumerate.yaml new file mode 100644 index 00000000..c326507a --- /dev/null +++ b/qa/suites/krbd/rbd-nomount/tasks/krbd_udev_enumerate.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + all: + - rbd/krbd_udev_enumerate.sh diff --git a/qa/suites/krbd/rbd-nomount/tasks/krbd_udev_netlink_enobufs.yaml b/qa/suites/krbd/rbd-nomount/tasks/krbd_udev_netlink_enobufs.yaml new file mode 100644 index 00000000..b0530d52 --- /dev/null +++ b/qa/suites/krbd/rbd-nomount/tasks/krbd_udev_netlink_enobufs.yaml @@ -0,0 +1,10 @@ +overrides: + ceph: + log-whitelist: + - pauserd,pausewr flag\(s\) set + +tasks: +- workunit: + clients: + all: + - rbd/krbd_udev_netlink_enobufs.sh diff --git a/qa/suites/krbd/rbd-nomount/tasks/krbd_udev_netns.yaml b/qa/suites/krbd/rbd-nomount/tasks/krbd_udev_netns.yaml new file mode 100644 index 00000000..21e06e38 --- /dev/null +++ b/qa/suites/krbd/rbd-nomount/tasks/krbd_udev_netns.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + all: + - rbd/krbd_udev_netns.sh diff --git a/qa/suites/krbd/rbd-nomount/tasks/krbd_udev_symlinks.yaml b/qa/suites/krbd/rbd-nomount/tasks/krbd_udev_symlinks.yaml new file mode 100644 index 00000000..ee79932f --- /dev/null +++ b/qa/suites/krbd/rbd-nomount/tasks/krbd_udev_symlinks.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + all: + - rbd/krbd_udev_symlinks.sh diff --git a/qa/suites/krbd/rbd-nomount/tasks/rbd_concurrent.yaml b/qa/suites/krbd/rbd-nomount/tasks/rbd_concurrent.yaml new file mode 100644 index 00000000..675b98e7 --- /dev/null +++ b/qa/suites/krbd/rbd-nomount/tasks/rbd_concurrent.yaml @@ -0,0 +1,10 @@ +tasks: +- workunit: + clients: + all: + - rbd/concurrent.sh +# Options for rbd/concurrent.sh (default values shown) +# env: +# RBD_CONCURRENT_ITER: 100 +# RBD_CONCURRENT_COUNT: 5 +# RBD_CONCURRENT_DELAY: 5 diff --git a/qa/suites/krbd/rbd-nomount/tasks/rbd_huge_tickets.yaml b/qa/suites/krbd/rbd-nomount/tasks/rbd_huge_tickets.yaml new file mode 100644 index 00000000..ea421eec --- /dev/null +++ b/qa/suites/krbd/rbd-nomount/tasks/rbd_huge_tickets.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + all: + - rbd/huge-tickets.sh diff --git a/qa/suites/krbd/rbd-nomount/tasks/rbd_image_read.yaml b/qa/suites/krbd/rbd-nomount/tasks/rbd_image_read.yaml new file mode 100644 index 00000000..e5017e11 --- /dev/null +++ b/qa/suites/krbd/rbd-nomount/tasks/rbd_image_read.yaml @@ -0,0 +1,15 @@ +tasks: +- workunit: + clients: + all: + - rbd/image_read.sh +# Options for rbd/image_read.sh (default values shown) +# env: +# IMAGE_READ_LOCAL_FILES: 'false' +# IMAGE_READ_FORMAT: '2' +# IMAGE_READ_VERBOSE: 'true' +# IMAGE_READ_PAGE_SIZE: '4096' +# IMAGE_READ_OBJECT_ORDER: '22' +# IMAGE_READ_TEST_CLONES: 'true' +# IMAGE_READ_DOUBLE_ORDER: 'true' +# IMAGE_READ_HALF_ORDER: 'false' diff --git a/qa/suites/krbd/rbd-nomount/tasks/rbd_kernel.yaml b/qa/suites/krbd/rbd-nomount/tasks/rbd_kernel.yaml new file mode 100644 index 00000000..aa155827 --- /dev/null +++ b/qa/suites/krbd/rbd-nomount/tasks/rbd_kernel.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + all: + - rbd/kernel.sh diff --git a/qa/suites/krbd/rbd-nomount/tasks/rbd_map_snapshot_io.yaml b/qa/suites/krbd/rbd-nomount/tasks/rbd_map_snapshot_io.yaml new file mode 100644 index 00000000..c1529398 --- /dev/null +++ b/qa/suites/krbd/rbd-nomount/tasks/rbd_map_snapshot_io.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + all: + - rbd/map-snapshot-io.sh diff --git a/qa/suites/krbd/rbd-nomount/tasks/rbd_map_unmap.yaml b/qa/suites/krbd/rbd-nomount/tasks/rbd_map_unmap.yaml new file mode 100644 index 00000000..c2160997 --- /dev/null +++ b/qa/suites/krbd/rbd-nomount/tasks/rbd_map_unmap.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + all: + - rbd/map-unmap.sh diff --git a/qa/suites/krbd/rbd-nomount/tasks/rbd_simple_big.yaml b/qa/suites/krbd/rbd-nomount/tasks/rbd_simple_big.yaml new file mode 100644 index 00000000..c493cfaf --- /dev/null +++ b/qa/suites/krbd/rbd-nomount/tasks/rbd_simple_big.yaml @@ -0,0 +1,6 @@ +tasks: +- workunit: + clients: + all: + - rbd/simple_big.sh + diff --git a/qa/suites/krbd/rbd/% b/qa/suites/krbd/rbd/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/krbd/rbd/.qa b/qa/suites/krbd/rbd/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/krbd/rbd/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/krbd/rbd/bluestore-bitmap.yaml b/qa/suites/krbd/rbd/bluestore-bitmap.yaml new file mode 120000 index 00000000..a59cf517 --- /dev/null +++ b/qa/suites/krbd/rbd/bluestore-bitmap.yaml @@ -0,0 +1 @@ +.qa/objectstore/bluestore-bitmap.yaml \ No newline at end of file diff --git a/qa/suites/krbd/rbd/clusters/.qa b/qa/suites/krbd/rbd/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/krbd/rbd/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/krbd/rbd/clusters/fixed-3.yaml b/qa/suites/krbd/rbd/clusters/fixed-3.yaml new file mode 120000 index 00000000..f75a848b --- /dev/null +++ b/qa/suites/krbd/rbd/clusters/fixed-3.yaml @@ -0,0 +1 @@ +.qa/clusters/fixed-3.yaml \ No newline at end of file diff --git a/qa/suites/krbd/rbd/conf.yaml b/qa/suites/krbd/rbd/conf.yaml new file mode 100644 index 00000000..5e7ed992 --- /dev/null +++ b/qa/suites/krbd/rbd/conf.yaml @@ -0,0 +1,7 @@ +overrides: + ceph: + conf: + global: + ms die on skipped message: false + client: + rbd default features: 37 diff --git a/qa/suites/krbd/rbd/ms_mode/.qa b/qa/suites/krbd/rbd/ms_mode/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/krbd/rbd/ms_mode/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/krbd/rbd/ms_mode/crc.yaml b/qa/suites/krbd/rbd/ms_mode/crc.yaml new file mode 100644 index 00000000..3b072578 --- /dev/null +++ b/qa/suites/krbd/rbd/ms_mode/crc.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + client: + rbd default map options: ms_mode=crc diff --git a/qa/suites/krbd/rbd/ms_mode/legacy.yaml b/qa/suites/krbd/rbd/ms_mode/legacy.yaml new file mode 100644 index 00000000..0048dcb0 --- /dev/null +++ b/qa/suites/krbd/rbd/ms_mode/legacy.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + client: + rbd default map options: ms_mode=legacy diff --git a/qa/suites/krbd/rbd/ms_mode/secure.yaml b/qa/suites/krbd/rbd/ms_mode/secure.yaml new file mode 100644 index 00000000..a735db18 --- /dev/null +++ b/qa/suites/krbd/rbd/ms_mode/secure.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + client: + rbd default map options: ms_mode=secure diff --git a/qa/suites/krbd/rbd/msgr-failures/.qa b/qa/suites/krbd/rbd/msgr-failures/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/krbd/rbd/msgr-failures/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/krbd/rbd/msgr-failures/few.yaml b/qa/suites/krbd/rbd/msgr-failures/few.yaml new file mode 100644 index 00000000..4326fe23 --- /dev/null +++ b/qa/suites/krbd/rbd/msgr-failures/few.yaml @@ -0,0 +1,7 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 5000 + log-whitelist: + - \(OSD_SLOW_PING_TIME diff --git a/qa/suites/krbd/rbd/msgr-failures/many.yaml b/qa/suites/krbd/rbd/msgr-failures/many.yaml new file mode 100644 index 00000000..4caedaeb --- /dev/null +++ b/qa/suites/krbd/rbd/msgr-failures/many.yaml @@ -0,0 +1,7 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 500 + log-whitelist: + - \(OSD_SLOW_PING_TIME diff --git a/qa/suites/krbd/rbd/tasks/.qa b/qa/suites/krbd/rbd/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/krbd/rbd/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/krbd/rbd/tasks/rbd_fio.yaml b/qa/suites/krbd/rbd/tasks/rbd_fio.yaml new file mode 100644 index 00000000..01088fa4 --- /dev/null +++ b/qa/suites/krbd/rbd/tasks/rbd_fio.yaml @@ -0,0 +1,11 @@ +tasks: +- install: +- ceph: null +- rbd_fio: + client.0: + fio-io-size: 90% + formats: [2] + features: [[layering,exclusive-lock]] + io-engine: sync + rw: randrw + runtime: 900 diff --git a/qa/suites/krbd/rbd/tasks/rbd_workunit_kernel_untar_build.yaml b/qa/suites/krbd/rbd/tasks/rbd_workunit_kernel_untar_build.yaml new file mode 100644 index 00000000..699cde82 --- /dev/null +++ b/qa/suites/krbd/rbd/tasks/rbd_workunit_kernel_untar_build.yaml @@ -0,0 +1,12 @@ +tasks: +- install: + extra_system_packages: + deb: ['bison', 'flex', 'libelf-dev', 'libssl-dev'] + rpm: ['bison', 'flex', 'elfutils-libelf-devel', 'openssl-devel'] +- ceph: +- rbd: + all: +- workunit: + clients: + all: + - kernel_untar_build.sh diff --git a/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_dbench.yaml b/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_dbench.yaml new file mode 100644 index 00000000..d779eea2 --- /dev/null +++ b/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_dbench.yaml @@ -0,0 +1,9 @@ +tasks: +- install: +- ceph: +- rbd: + all: +- workunit: + clients: + all: + - suites/dbench.sh diff --git a/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_ffsb.yaml b/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_ffsb.yaml new file mode 100644 index 00000000..5204bb87 --- /dev/null +++ b/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_ffsb.yaml @@ -0,0 +1,10 @@ +tasks: +- install: +- ceph: +- rbd: + all: + image_size: 20480 +- workunit: + clients: + all: + - suites/ffsb.sh diff --git a/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_fsstress.yaml b/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_fsstress.yaml new file mode 100644 index 00000000..f9d62fef --- /dev/null +++ b/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_fsstress.yaml @@ -0,0 +1,9 @@ +tasks: +- install: +- ceph: +- rbd: + all: +- workunit: + clients: + all: + - suites/fsstress.sh diff --git a/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_fsstress_ext4.yaml b/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_fsstress_ext4.yaml new file mode 100644 index 00000000..f765b74a --- /dev/null +++ b/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_fsstress_ext4.yaml @@ -0,0 +1,10 @@ +tasks: +- install: +- ceph: +- rbd: + all: + fs_type: ext4 +- workunit: + clients: + all: + - suites/fsstress.sh diff --git a/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_fsx.yaml b/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_fsx.yaml new file mode 100644 index 00000000..98c0849c --- /dev/null +++ b/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_fsx.yaml @@ -0,0 +1,9 @@ +tasks: +- install: +- ceph: +- rbd: + all: +- workunit: + clients: + all: + - suites/fsx.sh diff --git a/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_iozone.yaml b/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_iozone.yaml new file mode 100644 index 00000000..eb8f18d6 --- /dev/null +++ b/qa/suites/krbd/rbd/tasks/rbd_workunit_suites_iozone.yaml @@ -0,0 +1,10 @@ +tasks: +- install: +- ceph: +- rbd: + all: + image_size: 20480 +- workunit: + clients: + all: + - suites/iozone.sh diff --git a/qa/suites/krbd/rbd/tasks/rbd_workunit_trivial_sync.yaml b/qa/suites/krbd/rbd/tasks/rbd_workunit_trivial_sync.yaml new file mode 100644 index 00000000..7c2796b2 --- /dev/null +++ b/qa/suites/krbd/rbd/tasks/rbd_workunit_trivial_sync.yaml @@ -0,0 +1,8 @@ +tasks: +- install: +- ceph: +- rbd: + all: +- workunit: + clients: + all: [fs/misc/trivial_sync.sh] diff --git a/qa/suites/krbd/singleton/% b/qa/suites/krbd/singleton/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/krbd/singleton/.qa b/qa/suites/krbd/singleton/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/krbd/singleton/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/krbd/singleton/bluestore-bitmap.yaml b/qa/suites/krbd/singleton/bluestore-bitmap.yaml new file mode 120000 index 00000000..a59cf517 --- /dev/null +++ b/qa/suites/krbd/singleton/bluestore-bitmap.yaml @@ -0,0 +1 @@ +.qa/objectstore/bluestore-bitmap.yaml \ No newline at end of file diff --git a/qa/suites/krbd/singleton/conf.yaml b/qa/suites/krbd/singleton/conf.yaml new file mode 100644 index 00000000..5e7ed992 --- /dev/null +++ b/qa/suites/krbd/singleton/conf.yaml @@ -0,0 +1,7 @@ +overrides: + ceph: + conf: + global: + ms die on skipped message: false + client: + rbd default features: 37 diff --git a/qa/suites/krbd/singleton/ms_mode$/.qa b/qa/suites/krbd/singleton/ms_mode$/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/krbd/singleton/ms_mode$/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/krbd/singleton/ms_mode$/crc.yaml b/qa/suites/krbd/singleton/ms_mode$/crc.yaml new file mode 100644 index 00000000..3b072578 --- /dev/null +++ b/qa/suites/krbd/singleton/ms_mode$/crc.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + client: + rbd default map options: ms_mode=crc diff --git a/qa/suites/krbd/singleton/ms_mode$/legacy.yaml b/qa/suites/krbd/singleton/ms_mode$/legacy.yaml new file mode 100644 index 00000000..0048dcb0 --- /dev/null +++ b/qa/suites/krbd/singleton/ms_mode$/legacy.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + client: + rbd default map options: ms_mode=legacy diff --git a/qa/suites/krbd/singleton/ms_mode$/prefer-crc.yaml b/qa/suites/krbd/singleton/ms_mode$/prefer-crc.yaml new file mode 100644 index 00000000..1054473a --- /dev/null +++ b/qa/suites/krbd/singleton/ms_mode$/prefer-crc.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + client: + rbd default map options: ms_mode=prefer-crc diff --git a/qa/suites/krbd/singleton/ms_mode$/secure.yaml b/qa/suites/krbd/singleton/ms_mode$/secure.yaml new file mode 100644 index 00000000..a735db18 --- /dev/null +++ b/qa/suites/krbd/singleton/ms_mode$/secure.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + client: + rbd default map options: ms_mode=secure diff --git a/qa/suites/krbd/singleton/msgr-failures/.qa b/qa/suites/krbd/singleton/msgr-failures/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/krbd/singleton/msgr-failures/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/krbd/singleton/msgr-failures/few.yaml b/qa/suites/krbd/singleton/msgr-failures/few.yaml new file mode 100644 index 00000000..4326fe23 --- /dev/null +++ b/qa/suites/krbd/singleton/msgr-failures/few.yaml @@ -0,0 +1,7 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 5000 + log-whitelist: + - \(OSD_SLOW_PING_TIME diff --git a/qa/suites/krbd/singleton/msgr-failures/many.yaml b/qa/suites/krbd/singleton/msgr-failures/many.yaml new file mode 100644 index 00000000..4caedaeb --- /dev/null +++ b/qa/suites/krbd/singleton/msgr-failures/many.yaml @@ -0,0 +1,7 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 500 + log-whitelist: + - \(OSD_SLOW_PING_TIME diff --git a/qa/suites/krbd/singleton/tasks/.qa b/qa/suites/krbd/singleton/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/krbd/singleton/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/krbd/singleton/tasks/rbd_xfstests.yaml b/qa/suites/krbd/singleton/tasks/rbd_xfstests.yaml new file mode 100644 index 00000000..c94af020 --- /dev/null +++ b/qa/suites/krbd/singleton/tasks/rbd_xfstests.yaml @@ -0,0 +1,38 @@ +roles: +- [mon.a, mon.c, osd.0, osd.1, osd.2] +- [mon.b, mgr.x, mds.a, osd.3, osd.4, osd.5] +- [client.0] +- [client.1] +openstack: +- volumes: # attached to each instance + count: 3 + size: 10 # GB +tasks: +- install: +- ceph: +- rbd.xfstests: + client.0: &ref + test_image: 'test_image-0' + test_size: 5120 # MB + scratch_image: 'scratch_image-0' + scratch_size: 15360 # MB + fs_type: ext4 + tests: '-g auto -g blockdev -x clone' + exclude: + - generic/042 + - generic/392 + - generic/044 + - generic/045 + - generic/046 + - generic/223 + - ext4/002 # removed upstream + - ext4/304 + - generic/388 + - generic/405 + - generic/422 + - shared/298 # lockdep false positive + randomize: true + client.1: + <<: *ref + test_image: 'test_image-1' + scratch_image: 'scratch_image-1' diff --git a/qa/suites/krbd/thrash/% b/qa/suites/krbd/thrash/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/krbd/thrash/.qa b/qa/suites/krbd/thrash/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/krbd/thrash/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/krbd/thrash/bluestore-bitmap.yaml b/qa/suites/krbd/thrash/bluestore-bitmap.yaml new file mode 120000 index 00000000..a59cf517 --- /dev/null +++ b/qa/suites/krbd/thrash/bluestore-bitmap.yaml @@ -0,0 +1 @@ +.qa/objectstore/bluestore-bitmap.yaml \ No newline at end of file diff --git a/qa/suites/krbd/thrash/ceph/.qa b/qa/suites/krbd/thrash/ceph/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/krbd/thrash/ceph/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/krbd/thrash/ceph/ceph.yaml b/qa/suites/krbd/thrash/ceph/ceph.yaml new file mode 100644 index 00000000..2030acb9 --- /dev/null +++ b/qa/suites/krbd/thrash/ceph/ceph.yaml @@ -0,0 +1,3 @@ +tasks: +- install: +- ceph: diff --git a/qa/suites/krbd/thrash/clusters/.qa b/qa/suites/krbd/thrash/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/krbd/thrash/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/krbd/thrash/clusters/fixed-3.yaml b/qa/suites/krbd/thrash/clusters/fixed-3.yaml new file mode 120000 index 00000000..f75a848b --- /dev/null +++ b/qa/suites/krbd/thrash/clusters/fixed-3.yaml @@ -0,0 +1 @@ +.qa/clusters/fixed-3.yaml \ No newline at end of file diff --git a/qa/suites/krbd/thrash/conf.yaml b/qa/suites/krbd/thrash/conf.yaml new file mode 100644 index 00000000..5e7ed992 --- /dev/null +++ b/qa/suites/krbd/thrash/conf.yaml @@ -0,0 +1,7 @@ +overrides: + ceph: + conf: + global: + ms die on skipped message: false + client: + rbd default features: 37 diff --git a/qa/suites/krbd/thrash/ms_mode$/.qa b/qa/suites/krbd/thrash/ms_mode$/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/krbd/thrash/ms_mode$/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/krbd/thrash/ms_mode$/crc.yaml b/qa/suites/krbd/thrash/ms_mode$/crc.yaml new file mode 100644 index 00000000..3b072578 --- /dev/null +++ b/qa/suites/krbd/thrash/ms_mode$/crc.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + client: + rbd default map options: ms_mode=crc diff --git a/qa/suites/krbd/thrash/ms_mode$/legacy.yaml b/qa/suites/krbd/thrash/ms_mode$/legacy.yaml new file mode 100644 index 00000000..0048dcb0 --- /dev/null +++ b/qa/suites/krbd/thrash/ms_mode$/legacy.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + client: + rbd default map options: ms_mode=legacy diff --git a/qa/suites/krbd/thrash/ms_mode$/prefer-crc.yaml b/qa/suites/krbd/thrash/ms_mode$/prefer-crc.yaml new file mode 100644 index 00000000..1054473a --- /dev/null +++ b/qa/suites/krbd/thrash/ms_mode$/prefer-crc.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + client: + rbd default map options: ms_mode=prefer-crc diff --git a/qa/suites/krbd/thrash/ms_mode$/secure.yaml b/qa/suites/krbd/thrash/ms_mode$/secure.yaml new file mode 100644 index 00000000..a735db18 --- /dev/null +++ b/qa/suites/krbd/thrash/ms_mode$/secure.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + client: + rbd default map options: ms_mode=secure diff --git a/qa/suites/krbd/thrash/thrashers/.qa b/qa/suites/krbd/thrash/thrashers/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/krbd/thrash/thrashers/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/krbd/thrash/thrashers/backoff.yaml b/qa/suites/krbd/thrash/thrashers/backoff.yaml new file mode 100644 index 00000000..48a7a2a2 --- /dev/null +++ b/qa/suites/krbd/thrash/thrashers/backoff.yaml @@ -0,0 +1,15 @@ +overrides: + ceph: + conf: + osd: + osd backoff on peering: true + osd backoff on degraded: true + log-whitelist: + - wrongly marked me down + - objects unfound and apparently lost +tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgnum_shrink: 1 + chance_pgpnum_fix: 1 diff --git a/qa/suites/krbd/thrash/thrashers/mon-thrasher.yaml b/qa/suites/krbd/thrash/thrashers/mon-thrasher.yaml new file mode 100644 index 00000000..415684de --- /dev/null +++ b/qa/suites/krbd/thrash/thrashers/mon-thrasher.yaml @@ -0,0 +1,8 @@ +overrides: + ceph: + log-whitelist: + - \(MON_DOWN\) +tasks: +- mon_thrash: + revive_delay: 20 + thrash_delay: 1 diff --git a/qa/suites/krbd/thrash/thrashers/pggrow.yaml b/qa/suites/krbd/thrash/thrashers/pggrow.yaml new file mode 100644 index 00000000..14346a26 --- /dev/null +++ b/qa/suites/krbd/thrash/thrashers/pggrow.yaml @@ -0,0 +1,10 @@ +overrides: + ceph: + log-whitelist: + - but it is still running + - objects unfound and apparently lost +tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 2 + chance_pgpnum_fix: 1 diff --git a/qa/suites/krbd/thrash/thrashers/upmap.yaml b/qa/suites/krbd/thrash/thrashers/upmap.yaml new file mode 100644 index 00000000..86b51709 --- /dev/null +++ b/qa/suites/krbd/thrash/thrashers/upmap.yaml @@ -0,0 +1,17 @@ +overrides: + ceph: + crush_tunables: optimal + conf: + mon: + mon osd initial require min compat client: luminous + log-whitelist: + - wrongly marked me down + - objects unfound and apparently lost +tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgnum_shrink: 1 + chance_pgpnum_fix: 1 + chance_thrash_pg_upmap: 3 + chance_thrash_pg_upmap_items: 3 diff --git a/qa/suites/krbd/thrash/thrashosds-health.yaml b/qa/suites/krbd/thrash/thrashosds-health.yaml new file mode 120000 index 00000000..9124eb1a --- /dev/null +++ b/qa/suites/krbd/thrash/thrashosds-health.yaml @@ -0,0 +1 @@ +.qa/tasks/thrashosds-health.yaml \ No newline at end of file diff --git a/qa/suites/krbd/thrash/workloads/.qa b/qa/suites/krbd/thrash/workloads/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/krbd/thrash/workloads/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/krbd/thrash/workloads/rbd_fio.yaml b/qa/suites/krbd/thrash/workloads/rbd_fio.yaml new file mode 100644 index 00000000..157210f5 --- /dev/null +++ b/qa/suites/krbd/thrash/workloads/rbd_fio.yaml @@ -0,0 +1,11 @@ +tasks: +- rbd_fio: + client.0: + fio-io-size: 100% + formats: [2] + features: [[layering,exclusive-lock]] + io-engine: libaio + rw: randrw + bs: 1024 + io-depth: 256 + runtime: 1200 diff --git a/qa/suites/krbd/thrash/workloads/rbd_workunit_suites_ffsb.yaml b/qa/suites/krbd/thrash/workloads/rbd_workunit_suites_ffsb.yaml new file mode 100644 index 00000000..4ae7d690 --- /dev/null +++ b/qa/suites/krbd/thrash/workloads/rbd_workunit_suites_ffsb.yaml @@ -0,0 +1,8 @@ +tasks: +- rbd: + all: + image_size: 20480 +- workunit: + clients: + all: + - suites/ffsb.sh diff --git a/qa/suites/krbd/unmap/% b/qa/suites/krbd/unmap/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/krbd/unmap/.qa b/qa/suites/krbd/unmap/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/krbd/unmap/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/krbd/unmap/ceph/.qa b/qa/suites/krbd/unmap/ceph/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/krbd/unmap/ceph/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/krbd/unmap/ceph/ceph.yaml b/qa/suites/krbd/unmap/ceph/ceph.yaml new file mode 100644 index 00000000..aee5779f --- /dev/null +++ b/qa/suites/krbd/unmap/ceph/ceph.yaml @@ -0,0 +1,14 @@ +overrides: + ceph: + crush_tunables: bobtail + mon_bind_addrvec: false + mon_bind_msgr2: false + conf: + global: + ms bind msgr2: false +tasks: +- install: +- ceph: +- exec: + client.0: + - "ceph osd getcrushmap -o /dev/stdout | crushtool -d - | sed -e 's/alg straw2/alg straw/g' | crushtool -c /dev/stdin -o /dev/stdout | ceph osd setcrushmap -i /dev/stdin" diff --git a/qa/suites/krbd/unmap/clusters/.qa b/qa/suites/krbd/unmap/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/krbd/unmap/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/krbd/unmap/clusters/separate-client.yaml b/qa/suites/krbd/unmap/clusters/separate-client.yaml new file mode 100644 index 00000000..be134318 --- /dev/null +++ b/qa/suites/krbd/unmap/clusters/separate-client.yaml @@ -0,0 +1,16 @@ +# fixed-1.yaml, but with client.0 on a separate target +overrides: + ceph-deploy: + conf: + global: + osd pool default size: 2 + osd crush chooseleaf type: 0 + osd pool default pg num: 128 + osd pool default pgp num: 128 +roles: +- [mon.a, mgr.x, osd.0, osd.1, osd.2] +- [client.0] +openstack: +- volumes: # attached to each instance + count: 3 + size: 10 # GB diff --git a/qa/suites/krbd/unmap/conf.yaml b/qa/suites/krbd/unmap/conf.yaml new file mode 100644 index 00000000..8984e8dc --- /dev/null +++ b/qa/suites/krbd/unmap/conf.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + client: + rbd default features: 1 # pre-single-major is v3.13, so layering only diff --git a/qa/suites/krbd/unmap/filestore-xfs.yaml b/qa/suites/krbd/unmap/filestore-xfs.yaml new file mode 120000 index 00000000..41f2a9d1 --- /dev/null +++ b/qa/suites/krbd/unmap/filestore-xfs.yaml @@ -0,0 +1 @@ +.qa/objectstore/filestore-xfs.yaml \ No newline at end of file diff --git a/qa/suites/krbd/unmap/kernels/.qa b/qa/suites/krbd/unmap/kernels/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/krbd/unmap/kernels/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/krbd/unmap/kernels/pre-single-major.yaml b/qa/suites/krbd/unmap/kernels/pre-single-major.yaml new file mode 100644 index 00000000..a5636b45 --- /dev/null +++ b/qa/suites/krbd/unmap/kernels/pre-single-major.yaml @@ -0,0 +1,10 @@ +overrides: + kernel: + client.0: + branch: nightly_pre-single-major # v3.12.z +tasks: +- exec: + client.0: + - "modprobe -r rbd" + - "modprobe --first-time rbd" + - "test ! -f /sys/module/rbd/parameters/single_major" diff --git a/qa/suites/krbd/unmap/kernels/single-major-off.yaml b/qa/suites/krbd/unmap/kernels/single-major-off.yaml new file mode 100644 index 00000000..9dc2488e --- /dev/null +++ b/qa/suites/krbd/unmap/kernels/single-major-off.yaml @@ -0,0 +1,6 @@ +tasks: +- exec: + client.0: + - "modprobe -r rbd" + - "modprobe --first-time rbd single_major=0" + - "grep -q N /sys/module/rbd/parameters/single_major" diff --git a/qa/suites/krbd/unmap/kernels/single-major-on.yaml b/qa/suites/krbd/unmap/kernels/single-major-on.yaml new file mode 100644 index 00000000..c3889f34 --- /dev/null +++ b/qa/suites/krbd/unmap/kernels/single-major-on.yaml @@ -0,0 +1,6 @@ +tasks: +- exec: + client.0: + - "modprobe -r rbd" + - "modprobe --first-time rbd single_major=1" + - "grep -q Y /sys/module/rbd/parameters/single_major" diff --git a/qa/suites/krbd/unmap/tasks/.qa b/qa/suites/krbd/unmap/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/krbd/unmap/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/krbd/unmap/tasks/unmap.yaml b/qa/suites/krbd/unmap/tasks/unmap.yaml new file mode 100644 index 00000000..435061b4 --- /dev/null +++ b/qa/suites/krbd/unmap/tasks/unmap.yaml @@ -0,0 +1,5 @@ +tasks: +- cram: + clients: + client.0: + - src/test/cli-integration/rbd/unmap.t diff --git a/qa/suites/krbd/wac/.qa b/qa/suites/krbd/wac/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/krbd/wac/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/krbd/wac/sysfs/% b/qa/suites/krbd/wac/sysfs/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/krbd/wac/sysfs/.qa b/qa/suites/krbd/wac/sysfs/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/krbd/wac/sysfs/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/krbd/wac/sysfs/bluestore-bitmap.yaml b/qa/suites/krbd/wac/sysfs/bluestore-bitmap.yaml new file mode 120000 index 00000000..a59cf517 --- /dev/null +++ b/qa/suites/krbd/wac/sysfs/bluestore-bitmap.yaml @@ -0,0 +1 @@ +.qa/objectstore/bluestore-bitmap.yaml \ No newline at end of file diff --git a/qa/suites/krbd/wac/sysfs/ceph/.qa b/qa/suites/krbd/wac/sysfs/ceph/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/krbd/wac/sysfs/ceph/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/krbd/wac/sysfs/ceph/ceph.yaml b/qa/suites/krbd/wac/sysfs/ceph/ceph.yaml new file mode 100644 index 00000000..2030acb9 --- /dev/null +++ b/qa/suites/krbd/wac/sysfs/ceph/ceph.yaml @@ -0,0 +1,3 @@ +tasks: +- install: +- ceph: diff --git a/qa/suites/krbd/wac/sysfs/clusters/.qa b/qa/suites/krbd/wac/sysfs/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/krbd/wac/sysfs/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/krbd/wac/sysfs/clusters/fixed-1.yaml b/qa/suites/krbd/wac/sysfs/clusters/fixed-1.yaml new file mode 120000 index 00000000..02df5dd0 --- /dev/null +++ b/qa/suites/krbd/wac/sysfs/clusters/fixed-1.yaml @@ -0,0 +1 @@ +.qa/clusters/fixed-1.yaml \ No newline at end of file diff --git a/qa/suites/krbd/wac/sysfs/conf.yaml b/qa/suites/krbd/wac/sysfs/conf.yaml new file mode 100644 index 00000000..5e7ed992 --- /dev/null +++ b/qa/suites/krbd/wac/sysfs/conf.yaml @@ -0,0 +1,7 @@ +overrides: + ceph: + conf: + global: + ms die on skipped message: false + client: + rbd default features: 37 diff --git a/qa/suites/krbd/wac/sysfs/tasks/.qa b/qa/suites/krbd/wac/sysfs/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/krbd/wac/sysfs/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/krbd/wac/sysfs/tasks/stable_writes.yaml b/qa/suites/krbd/wac/sysfs/tasks/stable_writes.yaml new file mode 100644 index 00000000..cd1ba930 --- /dev/null +++ b/qa/suites/krbd/wac/sysfs/tasks/stable_writes.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + all: + - rbd/krbd_stable_writes.sh diff --git a/qa/suites/krbd/wac/wac/% b/qa/suites/krbd/wac/wac/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/krbd/wac/wac/.qa b/qa/suites/krbd/wac/wac/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/krbd/wac/wac/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/krbd/wac/wac/bluestore-bitmap.yaml b/qa/suites/krbd/wac/wac/bluestore-bitmap.yaml new file mode 120000 index 00000000..a59cf517 --- /dev/null +++ b/qa/suites/krbd/wac/wac/bluestore-bitmap.yaml @@ -0,0 +1 @@ +.qa/objectstore/bluestore-bitmap.yaml \ No newline at end of file diff --git a/qa/suites/krbd/wac/wac/ceph/.qa b/qa/suites/krbd/wac/wac/ceph/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/krbd/wac/wac/ceph/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/krbd/wac/wac/ceph/ceph.yaml b/qa/suites/krbd/wac/wac/ceph/ceph.yaml new file mode 100644 index 00000000..2030acb9 --- /dev/null +++ b/qa/suites/krbd/wac/wac/ceph/ceph.yaml @@ -0,0 +1,3 @@ +tasks: +- install: +- ceph: diff --git a/qa/suites/krbd/wac/wac/clusters/.qa b/qa/suites/krbd/wac/wac/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/krbd/wac/wac/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/krbd/wac/wac/clusters/fixed-3.yaml b/qa/suites/krbd/wac/wac/clusters/fixed-3.yaml new file mode 120000 index 00000000..f75a848b --- /dev/null +++ b/qa/suites/krbd/wac/wac/clusters/fixed-3.yaml @@ -0,0 +1 @@ +.qa/clusters/fixed-3.yaml \ No newline at end of file diff --git a/qa/suites/krbd/wac/wac/conf.yaml b/qa/suites/krbd/wac/wac/conf.yaml new file mode 100644 index 00000000..5e7ed992 --- /dev/null +++ b/qa/suites/krbd/wac/wac/conf.yaml @@ -0,0 +1,7 @@ +overrides: + ceph: + conf: + global: + ms die on skipped message: false + client: + rbd default features: 37 diff --git a/qa/suites/krbd/wac/wac/tasks/.qa b/qa/suites/krbd/wac/wac/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/krbd/wac/wac/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/krbd/wac/wac/tasks/wac.yaml b/qa/suites/krbd/wac/wac/tasks/wac.yaml new file mode 100644 index 00000000..52dabc38 --- /dev/null +++ b/qa/suites/krbd/wac/wac/tasks/wac.yaml @@ -0,0 +1,11 @@ +tasks: +- exec: + client.0: + - "dmesg -C" +- rbd: + all: + fs_type: ext4 +- workunit: + clients: + all: + - suites/wac.sh diff --git a/qa/suites/krbd/wac/wac/verify/.qa b/qa/suites/krbd/wac/wac/verify/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/krbd/wac/wac/verify/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/krbd/wac/wac/verify/many-resets.yaml b/qa/suites/krbd/wac/wac/verify/many-resets.yaml new file mode 100644 index 00000000..d69f6503 --- /dev/null +++ b/qa/suites/krbd/wac/wac/verify/many-resets.yaml @@ -0,0 +1,12 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 500 + log-whitelist: + - \(OSD_SLOW_PING_TIME +tasks: +- exec: + client.0: + - "dmesg | grep -q 'libceph: osd.* socket closed'" + - "dmesg | grep -q 'libceph: osd.* socket error on write'" diff --git a/qa/suites/krbd/wac/wac/verify/no-resets.yaml b/qa/suites/krbd/wac/wac/verify/no-resets.yaml new file mode 100644 index 00000000..2728479d --- /dev/null +++ b/qa/suites/krbd/wac/wac/verify/no-resets.yaml @@ -0,0 +1,5 @@ +tasks: +- exec: + client.0: + - "! dmesg | grep -q 'libceph: osd.* socket closed'" + - "! dmesg | grep -q 'libceph: osd.* socket error on write'" diff --git a/qa/suites/marginal/.qa b/qa/suites/marginal/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/marginal/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/marginal/basic/% b/qa/suites/marginal/basic/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/marginal/basic/.qa b/qa/suites/marginal/basic/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/marginal/basic/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/marginal/basic/clusters/.qa b/qa/suites/marginal/basic/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/marginal/basic/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/marginal/basic/clusters/fixed-3.yaml b/qa/suites/marginal/basic/clusters/fixed-3.yaml new file mode 100644 index 00000000..5e23c9e4 --- /dev/null +++ b/qa/suites/marginal/basic/clusters/fixed-3.yaml @@ -0,0 +1,4 @@ +roles: +- [mon.a, mon.c, osd.0, osd.1, osd.2] +- [mon.b, mgr.x, mds.a, osd.3, osd.4, osd.5] +- [client.0] diff --git a/qa/suites/marginal/basic/tasks/.qa b/qa/suites/marginal/basic/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/marginal/basic/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/marginal/basic/tasks/kclient_workunit_suites_blogbench.yaml b/qa/suites/marginal/basic/tasks/kclient_workunit_suites_blogbench.yaml new file mode 100644 index 00000000..4f25d806 --- /dev/null +++ b/qa/suites/marginal/basic/tasks/kclient_workunit_suites_blogbench.yaml @@ -0,0 +1,8 @@ +tasks: +- install: +- ceph: +- kclient: +- workunit: + clients: + all: + - suites/blogbench.sh diff --git a/qa/suites/marginal/basic/tasks/kclient_workunit_suites_fsx.yaml b/qa/suites/marginal/basic/tasks/kclient_workunit_suites_fsx.yaml new file mode 100644 index 00000000..a0d2e765 --- /dev/null +++ b/qa/suites/marginal/basic/tasks/kclient_workunit_suites_fsx.yaml @@ -0,0 +1,8 @@ +tasks: +- install: +- ceph: +- kclient: +- workunit: + clients: + all: + - suites/fsx.sh diff --git a/qa/suites/marginal/fs-misc/% b/qa/suites/marginal/fs-misc/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/marginal/fs-misc/.qa b/qa/suites/marginal/fs-misc/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/marginal/fs-misc/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/marginal/fs-misc/clusters/.qa b/qa/suites/marginal/fs-misc/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/marginal/fs-misc/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/marginal/fs-misc/clusters/two_clients.yaml b/qa/suites/marginal/fs-misc/clusters/two_clients.yaml new file mode 100644 index 00000000..19d312dc --- /dev/null +++ b/qa/suites/marginal/fs-misc/clusters/two_clients.yaml @@ -0,0 +1,4 @@ +roles: +- [mon.a, mon.b, mon.c, mgr.x, mds.a, osd.0, osd.1, osd.2] +- [client.1] +- [client.0] diff --git a/qa/suites/marginal/fs-misc/tasks/.qa b/qa/suites/marginal/fs-misc/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/marginal/fs-misc/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/marginal/fs-misc/tasks/locktest.yaml b/qa/suites/marginal/fs-misc/tasks/locktest.yaml new file mode 100644 index 00000000..444bb1f1 --- /dev/null +++ b/qa/suites/marginal/fs-misc/tasks/locktest.yaml @@ -0,0 +1,5 @@ +tasks: +- install: +- ceph: +- kclient: +- locktest: [client.0, client.1] diff --git a/qa/suites/marginal/mds_restart/% b/qa/suites/marginal/mds_restart/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/marginal/mds_restart/.qa b/qa/suites/marginal/mds_restart/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/marginal/mds_restart/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/marginal/mds_restart/clusters/.qa b/qa/suites/marginal/mds_restart/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/marginal/mds_restart/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/marginal/mds_restart/clusters/one_mds.yaml b/qa/suites/marginal/mds_restart/clusters/one_mds.yaml new file mode 100644 index 00000000..45c3e80e --- /dev/null +++ b/qa/suites/marginal/mds_restart/clusters/one_mds.yaml @@ -0,0 +1,4 @@ +roles: +- [mon.a, mon.b, mon.c, mgr.x, osd.0, osd.1, osd.2] +- [mds.a] +- [client.0] diff --git a/qa/suites/marginal/mds_restart/tasks/.qa b/qa/suites/marginal/mds_restart/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/marginal/mds_restart/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/marginal/mds_restart/tasks/restart-workunit-backtraces.yaml b/qa/suites/marginal/mds_restart/tasks/restart-workunit-backtraces.yaml new file mode 100644 index 00000000..d086d4cf --- /dev/null +++ b/qa/suites/marginal/mds_restart/tasks/restart-workunit-backtraces.yaml @@ -0,0 +1,11 @@ +tasks: +- install: +- ceph: + conf: + mds: + mds log segment size: 16384 + mds log max segments: 1 +- restart: + exec: + client.0: + - test-backtraces.py diff --git a/qa/suites/marginal/multimds/% b/qa/suites/marginal/multimds/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/marginal/multimds/.qa b/qa/suites/marginal/multimds/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/marginal/multimds/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/marginal/multimds/clusters/.qa b/qa/suites/marginal/multimds/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/marginal/multimds/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/marginal/multimds/clusters/3-node-3-mds.yaml b/qa/suites/marginal/multimds/clusters/3-node-3-mds.yaml new file mode 100644 index 00000000..2995ea9f --- /dev/null +++ b/qa/suites/marginal/multimds/clusters/3-node-3-mds.yaml @@ -0,0 +1,5 @@ +roles: +- [mon.a, mon.c, mds.a, osd.0, osd.1, osd.2] +- [mon.b, mgr.x, mds.b, mds.c, osd.3, osd.4, osd.5] +- [client.0] +- [client.1] diff --git a/qa/suites/marginal/multimds/clusters/3-node-9-mds.yaml b/qa/suites/marginal/multimds/clusters/3-node-9-mds.yaml new file mode 100644 index 00000000..083a07c2 --- /dev/null +++ b/qa/suites/marginal/multimds/clusters/3-node-9-mds.yaml @@ -0,0 +1,5 @@ +roles: +- [mon.a, mon.c, mds.a, mds.b, mds.c, mds.d, osd.0, osd.1, osd.2] +- [mon.b, mgr.x, mds.e, mds.f, mds.g, mds.h, mds.i, osd.3, osd.4, osd.5] +- [client.0] +- [client.1] diff --git a/qa/suites/marginal/multimds/mounts/.qa b/qa/suites/marginal/multimds/mounts/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/marginal/multimds/mounts/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/marginal/multimds/mounts/ceph-fuse.yaml b/qa/suites/marginal/multimds/mounts/ceph-fuse.yaml new file mode 100644 index 00000000..55d8beb0 --- /dev/null +++ b/qa/suites/marginal/multimds/mounts/ceph-fuse.yaml @@ -0,0 +1,7 @@ +tasks: +- install: +- ceph: + conf: + client: + fuse_default_permissions: 0 +- ceph-fuse: diff --git a/qa/suites/marginal/multimds/mounts/kclient.yaml b/qa/suites/marginal/multimds/mounts/kclient.yaml new file mode 100644 index 00000000..c18db8f5 --- /dev/null +++ b/qa/suites/marginal/multimds/mounts/kclient.yaml @@ -0,0 +1,4 @@ +tasks: +- install: +- ceph: +- kclient: diff --git a/qa/suites/marginal/multimds/tasks/.qa b/qa/suites/marginal/multimds/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/marginal/multimds/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/marginal/multimds/tasks/workunit_misc.yaml b/qa/suites/marginal/multimds/tasks/workunit_misc.yaml new file mode 100644 index 00000000..aa62b9e8 --- /dev/null +++ b/qa/suites/marginal/multimds/tasks/workunit_misc.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + all: + - fs/misc diff --git a/qa/suites/marginal/multimds/tasks/workunit_suites_blogbench.yaml b/qa/suites/marginal/multimds/tasks/workunit_suites_blogbench.yaml new file mode 100644 index 00000000..4c1fcc11 --- /dev/null +++ b/qa/suites/marginal/multimds/tasks/workunit_suites_blogbench.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + all: + - suites/blogbench.sh diff --git a/qa/suites/marginal/multimds/tasks/workunit_suites_dbench.yaml b/qa/suites/marginal/multimds/tasks/workunit_suites_dbench.yaml new file mode 100644 index 00000000..41b2bc8e --- /dev/null +++ b/qa/suites/marginal/multimds/tasks/workunit_suites_dbench.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + all: + - suites/dbench.sh diff --git a/qa/suites/marginal/multimds/tasks/workunit_suites_fsstress.yaml b/qa/suites/marginal/multimds/tasks/workunit_suites_fsstress.yaml new file mode 100644 index 00000000..ddb18fb7 --- /dev/null +++ b/qa/suites/marginal/multimds/tasks/workunit_suites_fsstress.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + all: + - suites/fsstress.sh diff --git a/qa/suites/marginal/multimds/tasks/workunit_suites_fsync.yaml b/qa/suites/marginal/multimds/tasks/workunit_suites_fsync.yaml new file mode 100644 index 00000000..7efa1adb --- /dev/null +++ b/qa/suites/marginal/multimds/tasks/workunit_suites_fsync.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + all: + - suites/fsync-tester.sh diff --git a/qa/suites/marginal/multimds/tasks/workunit_suites_pjd.yaml b/qa/suites/marginal/multimds/tasks/workunit_suites_pjd.yaml new file mode 100644 index 00000000..a1937ead --- /dev/null +++ b/qa/suites/marginal/multimds/tasks/workunit_suites_pjd.yaml @@ -0,0 +1,11 @@ +overrides: + ceph: + conf: + client: + fuse default permissions: false + fuse set user groups: true +tasks: +- workunit: + clients: + all: + - suites/pjd.sh diff --git a/qa/suites/marginal/multimds/tasks/workunit_suites_truncate_delay.yaml b/qa/suites/marginal/multimds/tasks/workunit_suites_truncate_delay.yaml new file mode 100644 index 00000000..3aa5f882 --- /dev/null +++ b/qa/suites/marginal/multimds/tasks/workunit_suites_truncate_delay.yaml @@ -0,0 +1,15 @@ +tasks: +- install: +- ceph: + conf: + client: + ms_inject_delay_probability: 1 + ms_inject_delay_type: osd + ms_inject_delay_max: 5 + client_oc_max_dirty_age: 1 +- ceph-fuse: +- exec: + client.0: + - dd if=/dev/zero of=./foo count=100 + - sleep 2 + - truncate --size 0 ./foo diff --git a/qa/suites/marginal/multimds/thrash/.qa b/qa/suites/marginal/multimds/thrash/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/marginal/multimds/thrash/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/marginal/multimds/thrash/exports.yaml b/qa/suites/marginal/multimds/thrash/exports.yaml new file mode 100644 index 00000000..240b46df --- /dev/null +++ b/qa/suites/marginal/multimds/thrash/exports.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + mds: + mds thrash exports: 1 diff --git a/qa/suites/marginal/multimds/thrash/normal.yaml b/qa/suites/marginal/multimds/thrash/normal.yaml new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/mixed-clients/.qa b/qa/suites/mixed-clients/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/mixed-clients/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/mixed-clients/basic/.qa b/qa/suites/mixed-clients/basic/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/mixed-clients/basic/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/mixed-clients/basic/clusters/.qa b/qa/suites/mixed-clients/basic/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/mixed-clients/basic/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/mixed-clients/basic/clusters/fixed-3.yaml b/qa/suites/mixed-clients/basic/clusters/fixed-3.yaml new file mode 100644 index 00000000..134bca1b --- /dev/null +++ b/qa/suites/mixed-clients/basic/clusters/fixed-3.yaml @@ -0,0 +1,4 @@ +roles: +- [mon.a, mgr.x, mds.a, osd.0, osd.1] +- [mon.b, mon.c, osd.2, osd.3, client.0] +- [client.1] diff --git a/qa/suites/mixed-clients/basic/objectstore b/qa/suites/mixed-clients/basic/objectstore new file mode 120000 index 00000000..c40bd326 --- /dev/null +++ b/qa/suites/mixed-clients/basic/objectstore @@ -0,0 +1 @@ +.qa/objectstore \ No newline at end of file diff --git a/qa/suites/mixed-clients/basic/tasks/.qa b/qa/suites/mixed-clients/basic/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/mixed-clients/basic/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/mixed-clients/basic/tasks/kernel_cfuse_workunits_dbench_iozone.yaml b/qa/suites/mixed-clients/basic/tasks/kernel_cfuse_workunits_dbench_iozone.yaml new file mode 100644 index 00000000..bb347be7 --- /dev/null +++ b/qa/suites/mixed-clients/basic/tasks/kernel_cfuse_workunits_dbench_iozone.yaml @@ -0,0 +1,26 @@ +overrides: + ceph: + conf: + global: + ms die on skipped message: false +tasks: +- install: + branch: dumpling +- ceph: +- parallel: + - user-workload + - kclient-workload +user-workload: + sequential: + - ceph-fuse: [client.0] + - workunit: + clients: + client.0: + - suites/iozone.sh +kclient-workload: + sequential: + - kclient: [client.1] + - workunit: + clients: + client.1: + - suites/dbench.sh diff --git a/qa/suites/mixed-clients/basic/tasks/kernel_cfuse_workunits_untarbuild_blogbench.yaml b/qa/suites/mixed-clients/basic/tasks/kernel_cfuse_workunits_untarbuild_blogbench.yaml new file mode 100644 index 00000000..2c32a61e --- /dev/null +++ b/qa/suites/mixed-clients/basic/tasks/kernel_cfuse_workunits_untarbuild_blogbench.yaml @@ -0,0 +1,26 @@ +overrides: + ceph: + conf: + global: + ms die on skipped message: false +tasks: +- install: + branch: dumpling +- ceph: +- parallel: + - user-workload + - kclient-workload +user-workload: + sequential: + - ceph-fuse: [client.0] + - workunit: + clients: + client.0: + - suites/blogbench.sh +kclient-workload: + sequential: + - kclient: [client.1] + - workunit: + clients: + client.1: + - kernel_untar_build.sh diff --git a/qa/suites/multimds/.qa b/qa/suites/multimds/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/multimds/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/multimds/basic/% b/qa/suites/multimds/basic/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/multimds/basic/.qa b/qa/suites/multimds/basic/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/multimds/basic/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/multimds/basic/0-supported-random-distro$ b/qa/suites/multimds/basic/0-supported-random-distro$ new file mode 120000 index 00000000..78f2991b --- /dev/null +++ b/qa/suites/multimds/basic/0-supported-random-distro$ @@ -0,0 +1 @@ +.qa/distros/supported-random-distro$/ \ No newline at end of file diff --git a/qa/suites/multimds/basic/begin.yaml b/qa/suites/multimds/basic/begin.yaml new file mode 120000 index 00000000..d64b08e9 --- /dev/null +++ b/qa/suites/multimds/basic/begin.yaml @@ -0,0 +1 @@ +../../fs/basic_workload/begin.yaml \ No newline at end of file diff --git a/qa/suites/multimds/basic/clusters/.qa b/qa/suites/multimds/basic/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/multimds/basic/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/multimds/basic/clusters/3-mds.yaml b/qa/suites/multimds/basic/clusters/3-mds.yaml new file mode 120000 index 00000000..d7ec418e --- /dev/null +++ b/qa/suites/multimds/basic/clusters/3-mds.yaml @@ -0,0 +1 @@ +.qa/cephfs/clusters/3-mds.yaml \ No newline at end of file diff --git a/qa/suites/multimds/basic/clusters/9-mds.yaml b/qa/suites/multimds/basic/clusters/9-mds.yaml new file mode 120000 index 00000000..eeb9225a --- /dev/null +++ b/qa/suites/multimds/basic/clusters/9-mds.yaml @@ -0,0 +1 @@ +.qa/cephfs/clusters/9-mds.yaml \ No newline at end of file diff --git a/qa/suites/multimds/basic/conf b/qa/suites/multimds/basic/conf new file mode 120000 index 00000000..16e8cc44 --- /dev/null +++ b/qa/suites/multimds/basic/conf @@ -0,0 +1 @@ +.qa/cephfs/conf \ No newline at end of file diff --git a/qa/suites/multimds/basic/inline b/qa/suites/multimds/basic/inline new file mode 120000 index 00000000..e7d71330 --- /dev/null +++ b/qa/suites/multimds/basic/inline @@ -0,0 +1 @@ +../../fs/basic_workload/inline \ No newline at end of file diff --git a/qa/suites/multimds/basic/mount b/qa/suites/multimds/basic/mount new file mode 120000 index 00000000..e3600f45 --- /dev/null +++ b/qa/suites/multimds/basic/mount @@ -0,0 +1 @@ +.qa/cephfs/mount/ \ No newline at end of file diff --git a/qa/suites/multimds/basic/objectstore-ec b/qa/suites/multimds/basic/objectstore-ec new file mode 120000 index 00000000..affe2949 --- /dev/null +++ b/qa/suites/multimds/basic/objectstore-ec @@ -0,0 +1 @@ +.qa/cephfs/objectstore-ec \ No newline at end of file diff --git a/qa/suites/multimds/basic/overrides/% b/qa/suites/multimds/basic/overrides/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/multimds/basic/overrides/.qa b/qa/suites/multimds/basic/overrides/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/multimds/basic/overrides/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/multimds/basic/overrides/basic b/qa/suites/multimds/basic/overrides/basic new file mode 120000 index 00000000..75173551 --- /dev/null +++ b/qa/suites/multimds/basic/overrides/basic @@ -0,0 +1 @@ +../../../fs/basic_workload/overrides \ No newline at end of file diff --git a/qa/suites/multimds/basic/overrides/fuse-default-perm-no.yaml b/qa/suites/multimds/basic/overrides/fuse-default-perm-no.yaml new file mode 120000 index 00000000..1da96a10 --- /dev/null +++ b/qa/suites/multimds/basic/overrides/fuse-default-perm-no.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/fuse/default-perm/no.yaml \ No newline at end of file diff --git a/qa/suites/multimds/basic/q_check_counter/.qa b/qa/suites/multimds/basic/q_check_counter/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/multimds/basic/q_check_counter/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/multimds/basic/q_check_counter/check_counter.yaml b/qa/suites/multimds/basic/q_check_counter/check_counter.yaml new file mode 100644 index 00000000..1018b1e4 --- /dev/null +++ b/qa/suites/multimds/basic/q_check_counter/check_counter.yaml @@ -0,0 +1,8 @@ + +tasks: +- check-counter: + counters: + mds: + - "mds.exported" + - "mds.imported" + diff --git a/qa/suites/multimds/basic/tasks/.qa b/qa/suites/multimds/basic/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/multimds/basic/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/multimds/basic/tasks/cephfs_test_exports.yaml b/qa/suites/multimds/basic/tasks/cephfs_test_exports.yaml new file mode 100644 index 00000000..46334fe1 --- /dev/null +++ b/qa/suites/multimds/basic/tasks/cephfs_test_exports.yaml @@ -0,0 +1,5 @@ +tasks: +- cephfs_test_runner: + fail_on_skip: false + modules: + - tasks.cephfs.test_exports diff --git a/qa/suites/multimds/basic/tasks/cephfs_test_snapshots.yaml b/qa/suites/multimds/basic/tasks/cephfs_test_snapshots.yaml new file mode 100644 index 00000000..c86aaac9 --- /dev/null +++ b/qa/suites/multimds/basic/tasks/cephfs_test_snapshots.yaml @@ -0,0 +1,13 @@ +overrides: + check-counter: + dry_run: true + ceph: + log-whitelist: + - evicting unresponsive client + - RECENT_CRASH + +tasks: +- cephfs_test_runner: + fail_on_skip: false + modules: + - tasks.cephfs.test_snapshots diff --git a/qa/suites/multimds/basic/tasks/cfuse_workunit_kernel_untar_build.yaml b/qa/suites/multimds/basic/tasks/cfuse_workunit_kernel_untar_build.yaml new file mode 100644 index 00000000..8dbc24a9 --- /dev/null +++ b/qa/suites/multimds/basic/tasks/cfuse_workunit_kernel_untar_build.yaml @@ -0,0 +1,10 @@ +overrides: + ceph: + conf: + client: + fuse_default_permissions: 0 +tasks: +- workunit: + clients: + all: + - kernel_untar_build.sh diff --git a/qa/suites/multimds/basic/tasks/cfuse_workunit_misc.yaml b/qa/suites/multimds/basic/tasks/cfuse_workunit_misc.yaml new file mode 100644 index 00000000..58866a27 --- /dev/null +++ b/qa/suites/multimds/basic/tasks/cfuse_workunit_misc.yaml @@ -0,0 +1,6 @@ +tasks: +- workunit: + clients: + all: + - fs/misc + diff --git a/qa/suites/multimds/basic/tasks/cfuse_workunit_norstats.yaml b/qa/suites/multimds/basic/tasks/cfuse_workunit_norstats.yaml new file mode 100644 index 00000000..3776131b --- /dev/null +++ b/qa/suites/multimds/basic/tasks/cfuse_workunit_norstats.yaml @@ -0,0 +1,11 @@ +tasks: +- workunit: + clients: + all: + - fs/norstats + +overrides: + ceph: + conf: + client: + client dirsize rbytes: false diff --git a/qa/suites/multimds/basic/tasks/cfuse_workunit_suites_blogbench.yaml b/qa/suites/multimds/basic/tasks/cfuse_workunit_suites_blogbench.yaml new file mode 120000 index 00000000..8702f4f3 --- /dev/null +++ b/qa/suites/multimds/basic/tasks/cfuse_workunit_suites_blogbench.yaml @@ -0,0 +1 @@ +.qa/cephfs/tasks/cfuse_workunit_suites_blogbench.yaml \ No newline at end of file diff --git a/qa/suites/multimds/basic/tasks/cfuse_workunit_suites_dbench.yaml b/qa/suites/multimds/basic/tasks/cfuse_workunit_suites_dbench.yaml new file mode 120000 index 00000000..b0f876c3 --- /dev/null +++ b/qa/suites/multimds/basic/tasks/cfuse_workunit_suites_dbench.yaml @@ -0,0 +1 @@ +.qa/cephfs/tasks/cfuse_workunit_suites_dbench.yaml \ No newline at end of file diff --git a/qa/suites/multimds/basic/tasks/cfuse_workunit_suites_ffsb.yaml b/qa/suites/multimds/basic/tasks/cfuse_workunit_suites_ffsb.yaml new file mode 120000 index 00000000..01e889b2 --- /dev/null +++ b/qa/suites/multimds/basic/tasks/cfuse_workunit_suites_ffsb.yaml @@ -0,0 +1 @@ +.qa/cephfs/tasks/cfuse_workunit_suites_ffsb.yaml \ No newline at end of file diff --git a/qa/suites/multimds/basic/tasks/cfuse_workunit_suites_fsstress.yaml b/qa/suites/multimds/basic/tasks/cfuse_workunit_suites_fsstress.yaml new file mode 120000 index 00000000..c2e859ff --- /dev/null +++ b/qa/suites/multimds/basic/tasks/cfuse_workunit_suites_fsstress.yaml @@ -0,0 +1 @@ +.qa/cephfs/tasks/cfuse_workunit_suites_fsstress.yaml \ No newline at end of file diff --git a/qa/suites/multimds/basic/tasks/cfuse_workunit_suites_fsx.yaml b/qa/suites/multimds/basic/tasks/cfuse_workunit_suites_fsx.yaml new file mode 100644 index 00000000..8b2b1ab5 --- /dev/null +++ b/qa/suites/multimds/basic/tasks/cfuse_workunit_suites_fsx.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + all: + - suites/fsx.sh diff --git a/qa/suites/multimds/basic/tasks/cfuse_workunit_suites_pjd.yaml b/qa/suites/multimds/basic/tasks/cfuse_workunit_suites_pjd.yaml new file mode 100644 index 00000000..37e315f7 --- /dev/null +++ b/qa/suites/multimds/basic/tasks/cfuse_workunit_suites_pjd.yaml @@ -0,0 +1,12 @@ +overrides: + ceph: + conf: + client: + fuse set user groups: true + fuse default permissions: false +tasks: +- workunit: + timeout: 6h + clients: + all: + - suites/pjd.sh diff --git a/qa/suites/multimds/thrash/% b/qa/suites/multimds/thrash/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/multimds/thrash/.qa b/qa/suites/multimds/thrash/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/multimds/thrash/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/multimds/thrash/0-supported-random-distro$ b/qa/suites/multimds/thrash/0-supported-random-distro$ new file mode 120000 index 00000000..78f2991b --- /dev/null +++ b/qa/suites/multimds/thrash/0-supported-random-distro$ @@ -0,0 +1 @@ +.qa/distros/supported-random-distro$/ \ No newline at end of file diff --git a/qa/suites/multimds/thrash/begin.yaml b/qa/suites/multimds/thrash/begin.yaml new file mode 120000 index 00000000..42459d1b --- /dev/null +++ b/qa/suites/multimds/thrash/begin.yaml @@ -0,0 +1 @@ +../../fs/thrash/begin.yaml \ No newline at end of file diff --git a/qa/suites/multimds/thrash/ceph-thrash b/qa/suites/multimds/thrash/ceph-thrash new file mode 120000 index 00000000..d632af9d --- /dev/null +++ b/qa/suites/multimds/thrash/ceph-thrash @@ -0,0 +1 @@ +../../fs/thrash/ceph-thrash/ \ No newline at end of file diff --git a/qa/suites/multimds/thrash/clusters/.qa b/qa/suites/multimds/thrash/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/multimds/thrash/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/multimds/thrash/clusters/3-mds-2-standby.yaml b/qa/suites/multimds/thrash/clusters/3-mds-2-standby.yaml new file mode 100644 index 00000000..5936fcb5 --- /dev/null +++ b/qa/suites/multimds/thrash/clusters/3-mds-2-standby.yaml @@ -0,0 +1,4 @@ +roles: +- [mon.a, mon.c, mds.a, mds.c, mds.e, osd.0, osd.1, osd.2] +- [mon.b, mgr.x, mds.b, mds.d, osd.3, osd.4, osd.5] +- [client.0] diff --git a/qa/suites/multimds/thrash/clusters/9-mds-3-standby.yaml b/qa/suites/multimds/thrash/clusters/9-mds-3-standby.yaml new file mode 100644 index 00000000..6775e621 --- /dev/null +++ b/qa/suites/multimds/thrash/clusters/9-mds-3-standby.yaml @@ -0,0 +1,4 @@ +roles: +- [mon.a, mon.c, mds.a, mds.c, mds.e, mds.g, mds.i, mds.k, osd.0, osd.1, osd.2] +- [mon.b, mgr.x, mds.b, mds.d, mds.f, mds.h, mds.j, mds.l, osd.3, osd.4, osd.5] +- [client.0] diff --git a/qa/suites/multimds/thrash/conf b/qa/suites/multimds/thrash/conf new file mode 120000 index 00000000..16e8cc44 --- /dev/null +++ b/qa/suites/multimds/thrash/conf @@ -0,0 +1 @@ +.qa/cephfs/conf \ No newline at end of file diff --git a/qa/suites/multimds/thrash/mount b/qa/suites/multimds/thrash/mount new file mode 120000 index 00000000..e3600f45 --- /dev/null +++ b/qa/suites/multimds/thrash/mount @@ -0,0 +1 @@ +.qa/cephfs/mount/ \ No newline at end of file diff --git a/qa/suites/multimds/thrash/msgr-failures b/qa/suites/multimds/thrash/msgr-failures new file mode 120000 index 00000000..534e0d84 --- /dev/null +++ b/qa/suites/multimds/thrash/msgr-failures @@ -0,0 +1 @@ +../../fs/thrash/msgr-failures/ \ No newline at end of file diff --git a/qa/suites/multimds/thrash/objectstore-ec b/qa/suites/multimds/thrash/objectstore-ec new file mode 120000 index 00000000..affe2949 --- /dev/null +++ b/qa/suites/multimds/thrash/objectstore-ec @@ -0,0 +1 @@ +.qa/cephfs/objectstore-ec \ No newline at end of file diff --git a/qa/suites/multimds/thrash/overrides/% b/qa/suites/multimds/thrash/overrides/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/multimds/thrash/overrides/.qa b/qa/suites/multimds/thrash/overrides/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/multimds/thrash/overrides/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/multimds/thrash/overrides/fuse-default-perm-no.yaml b/qa/suites/multimds/thrash/overrides/fuse-default-perm-no.yaml new file mode 120000 index 00000000..1da96a10 --- /dev/null +++ b/qa/suites/multimds/thrash/overrides/fuse-default-perm-no.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/fuse/default-perm/no.yaml \ No newline at end of file diff --git a/qa/suites/multimds/thrash/overrides/thrash b/qa/suites/multimds/thrash/overrides/thrash new file mode 120000 index 00000000..1f0c36d2 --- /dev/null +++ b/qa/suites/multimds/thrash/overrides/thrash @@ -0,0 +1 @@ +../../../fs/thrash/overrides/ \ No newline at end of file diff --git a/qa/suites/multimds/thrash/overrides/thrash_debug.yaml b/qa/suites/multimds/thrash/overrides/thrash_debug.yaml new file mode 100644 index 00000000..2243037c --- /dev/null +++ b/qa/suites/multimds/thrash/overrides/thrash_debug.yaml @@ -0,0 +1,7 @@ +overrides: + ceph: + conf: + mds: + debug ms: 10 + client: + debug ms: 10 diff --git a/qa/suites/multimds/thrash/tasks/.qa b/qa/suites/multimds/thrash/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/multimds/thrash/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/multimds/thrash/tasks/cfuse_workunit_suites_fsstress.yaml b/qa/suites/multimds/thrash/tasks/cfuse_workunit_suites_fsstress.yaml new file mode 120000 index 00000000..324538ec --- /dev/null +++ b/qa/suites/multimds/thrash/tasks/cfuse_workunit_suites_fsstress.yaml @@ -0,0 +1 @@ +../../../fs/thrash/tasks/cfuse_workunit_suites_fsstress.yaml \ No newline at end of file diff --git a/qa/suites/multimds/thrash/tasks/cfuse_workunit_suites_pjd.yaml b/qa/suites/multimds/thrash/tasks/cfuse_workunit_suites_pjd.yaml new file mode 120000 index 00000000..a6852a2a --- /dev/null +++ b/qa/suites/multimds/thrash/tasks/cfuse_workunit_suites_pjd.yaml @@ -0,0 +1 @@ +../../../fs/thrash/tasks/cfuse_workunit_suites_pjd.yaml \ No newline at end of file diff --git a/qa/suites/multimds/verify/% b/qa/suites/multimds/verify/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/multimds/verify/.qa b/qa/suites/multimds/verify/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/multimds/verify/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/multimds/verify/begin.yaml b/qa/suites/multimds/verify/begin.yaml new file mode 120000 index 00000000..a199d468 --- /dev/null +++ b/qa/suites/multimds/verify/begin.yaml @@ -0,0 +1 @@ +../../fs/verify/begin.yaml \ No newline at end of file diff --git a/qa/suites/multimds/verify/centos_latest.yaml b/qa/suites/multimds/verify/centos_latest.yaml new file mode 120000 index 00000000..bd9854e7 --- /dev/null +++ b/qa/suites/multimds/verify/centos_latest.yaml @@ -0,0 +1 @@ +.qa/distros/supported/centos_latest.yaml \ No newline at end of file diff --git a/qa/suites/multimds/verify/clusters/.qa b/qa/suites/multimds/verify/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/multimds/verify/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/multimds/verify/clusters/3-mds.yaml b/qa/suites/multimds/verify/clusters/3-mds.yaml new file mode 120000 index 00000000..d7ec418e --- /dev/null +++ b/qa/suites/multimds/verify/clusters/3-mds.yaml @@ -0,0 +1 @@ +.qa/cephfs/clusters/3-mds.yaml \ No newline at end of file diff --git a/qa/suites/multimds/verify/clusters/9-mds.yaml b/qa/suites/multimds/verify/clusters/9-mds.yaml new file mode 120000 index 00000000..eeb9225a --- /dev/null +++ b/qa/suites/multimds/verify/clusters/9-mds.yaml @@ -0,0 +1 @@ +.qa/cephfs/clusters/9-mds.yaml \ No newline at end of file diff --git a/qa/suites/multimds/verify/conf b/qa/suites/multimds/verify/conf new file mode 120000 index 00000000..16e8cc44 --- /dev/null +++ b/qa/suites/multimds/verify/conf @@ -0,0 +1 @@ +.qa/cephfs/conf \ No newline at end of file diff --git a/qa/suites/multimds/verify/mount b/qa/suites/multimds/verify/mount new file mode 120000 index 00000000..e3600f45 --- /dev/null +++ b/qa/suites/multimds/verify/mount @@ -0,0 +1 @@ +.qa/cephfs/mount/ \ No newline at end of file diff --git a/qa/suites/multimds/verify/objectstore-ec b/qa/suites/multimds/verify/objectstore-ec new file mode 120000 index 00000000..affe2949 --- /dev/null +++ b/qa/suites/multimds/verify/objectstore-ec @@ -0,0 +1 @@ +.qa/cephfs/objectstore-ec \ No newline at end of file diff --git a/qa/suites/multimds/verify/overrides/% b/qa/suites/multimds/verify/overrides/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/multimds/verify/overrides/.qa b/qa/suites/multimds/verify/overrides/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/multimds/verify/overrides/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/multimds/verify/overrides/fuse-default-perm-no.yaml b/qa/suites/multimds/verify/overrides/fuse-default-perm-no.yaml new file mode 120000 index 00000000..1da96a10 --- /dev/null +++ b/qa/suites/multimds/verify/overrides/fuse-default-perm-no.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/fuse/default-perm/no.yaml \ No newline at end of file diff --git a/qa/suites/multimds/verify/overrides/verify b/qa/suites/multimds/verify/overrides/verify new file mode 120000 index 00000000..3ded92ed --- /dev/null +++ b/qa/suites/multimds/verify/overrides/verify @@ -0,0 +1 @@ +../../../fs/verify/overrides/ \ No newline at end of file diff --git a/qa/suites/multimds/verify/tasks b/qa/suites/multimds/verify/tasks new file mode 120000 index 00000000..f0edfbd4 --- /dev/null +++ b/qa/suites/multimds/verify/tasks @@ -0,0 +1 @@ +../../fs/verify/tasks/ \ No newline at end of file diff --git a/qa/suites/multimds/verify/validater b/qa/suites/multimds/verify/validater new file mode 120000 index 00000000..0c7f8a50 --- /dev/null +++ b/qa/suites/multimds/verify/validater @@ -0,0 +1 @@ +../../fs/verify/validater/ \ No newline at end of file diff --git a/qa/suites/perf-basic/% b/qa/suites/perf-basic/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/perf-basic/.qa b/qa/suites/perf-basic/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/perf-basic/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/perf-basic/ceph.yaml b/qa/suites/perf-basic/ceph.yaml new file mode 100644 index 00000000..43807c69 --- /dev/null +++ b/qa/suites/perf-basic/ceph.yaml @@ -0,0 +1,24 @@ +meta: +- desc: | + perf-basic is a basic performance suite. + Must be run on bare-metal machines. + On VMs performance results will be inconsistent + and can't be compared across runs. + Run ceph on a single node. + Use xfs beneath the osds. + Setup rgw on client.0 + +roles: +- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0] +tasks: +- install: +- ceph: + fs: xfs + wait-for-scrub: false + log-whitelist: + - \(PG_ + - \(OSD_ + - \(OBJECT_ + - overall HEALTH +- rgw: [client.0] +- ssh_keys: diff --git a/qa/suites/perf-basic/objectstore/.qa b/qa/suites/perf-basic/objectstore/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/perf-basic/objectstore/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/perf-basic/objectstore/bluestore.yaml b/qa/suites/perf-basic/objectstore/bluestore.yaml new file mode 100644 index 00000000..f5793d76 --- /dev/null +++ b/qa/suites/perf-basic/objectstore/bluestore.yaml @@ -0,0 +1,15 @@ +overrides: + ceph: + fs: xfs + conf: + osd: + osd objectstore: bluestore + bluestore block size: 96636764160 + ceph-deploy: + fs: xfs + bluestore: yes + conf: + osd: + osd objectstore: bluestore + bluestore block size: 96636764160 + diff --git a/qa/suites/perf-basic/objectstore/filestore-xfs.yaml b/qa/suites/perf-basic/objectstore/filestore-xfs.yaml new file mode 100644 index 00000000..f7aa0dd7 --- /dev/null +++ b/qa/suites/perf-basic/objectstore/filestore-xfs.yaml @@ -0,0 +1,15 @@ +overrides: + ceph: + fs: xfs + conf: + osd: + osd objectstore: filestore + osd sloppy crc: true + ceph-deploy: + fs: xfs + filestore: True + conf: + osd: + osd objectstore: filestore + osd sloppy crc: true + diff --git a/qa/suites/perf-basic/settings/.qa b/qa/suites/perf-basic/settings/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/perf-basic/settings/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/perf-basic/settings/optimized.yaml b/qa/suites/perf-basic/settings/optimized.yaml new file mode 100644 index 00000000..fffb9be5 --- /dev/null +++ b/qa/suites/perf-basic/settings/optimized.yaml @@ -0,0 +1,80 @@ +meta: +- desc: | + Use debug level 0/0 for performance tests. + +overrides: + ceph: + conf: + mon: + debug mon: "0/0" + debug ms: "0/0" + debug paxos: "0/0" + osd: + debug filestore: "0/0" + debug journal: "0/0" + debug ms: "0/0" + debug osd: "0/0" + global: + auth client required: none + auth cluster required: none + auth service required: none + auth supported: none + + debug lockdep: "0/0" + debug context: "0/0" + debug crush: "0/0" + debug mds: "0/0" + debug mds balancer: "0/0" + debug mds locker: "0/0" + debug mds log: "0/0" + debug mds log expire: "0/0" + debug mds migrator: "0/0" + debug buffer: "0/0" + debug timer: "0/0" + debug filer: "0/0" + debug striper: "0/0" + debug objecter: "0/0" + debug rados: "0/0" + debug rbd: "0/0" + debug rbd mirror: "0/0" + debug rbd replay: "0/0" + debug journaler: "0/0" + debug objectcacher: "0/0" + debug client: "0/0" + debug osd: "0/0" + debug optracker: "0/0" + debug objclass: "0/0" + debug filestore: "0/0" + debug journal: "0/0" + debug ms: "0/0" + debug mon: "0/0" + debug monc: "0/0" + debug paxos: "0/0" + debug tp: "0/0" + debug auth: "0/0" + debug crypto: "0/0" + debug finisher: "0/0" + debug heartbeatmap: "0/0" + debug perfcounter: "0/0" + debug rgw: "0/0" + debug rgw sync: "0/0" + debug civetweb: "0/0" + debug javaclient: "0/0" + debug asok: "0/0" + debug throttle: "0/0" + debug refs: "0/0" + debug xio: "0/0" + debug compressor: "0/0" + debug bluestore: "0/0" + debug bluefs: "0/0" + debug bdev: "0/0" + debug kstore: "0/0" + debug rocksdb: "0/0" + debug leveldb: "0/0" + debug memdb: "0/0" + debug kinetic: "0/0" + debug fuse: "0/0" + debug mgr: "0/0" + debug mgrc: "0/0" + debug dpdk: "0/0" + debug eventtrace: "0/0" diff --git a/qa/suites/perf-basic/supported-all-distro b/qa/suites/perf-basic/supported-all-distro new file mode 120000 index 00000000..ca82dde5 --- /dev/null +++ b/qa/suites/perf-basic/supported-all-distro @@ -0,0 +1 @@ +.qa/distros/supported-all-distro \ No newline at end of file diff --git a/qa/suites/perf-basic/workloads/.qa b/qa/suites/perf-basic/workloads/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/perf-basic/workloads/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/perf-basic/workloads/cosbench_64K_write.yaml b/qa/suites/perf-basic/workloads/cosbench_64K_write.yaml new file mode 100644 index 00000000..ebdb6d71 --- /dev/null +++ b/qa/suites/perf-basic/workloads/cosbench_64K_write.yaml @@ -0,0 +1,31 @@ +meta: +- desc: | + Run cosbench benchmark using cbt. + 64K write workload. + +overrides: + rgw: + data_pool_pg_size: 64 + index_pool_pg_size: 64 +tasks: +- cbt: + branch: 'nautilus' + benchmarks: + cosbench: + obj_size: [64KB] + osd_ra: [4096] + workers: 1 + containers_max: 1000 + objects_max: 100 + mode: [write] + template: [default] + rampup: 30 + runtime: 300 + rampdown: 30 + containers: ["u(1,100)"] + objects: ["u(1,100)"] + ratio: [100] + cluster: + user: 'ubuntu' + osds_per_node: 1 + iterations: 1 diff --git a/qa/suites/perf-basic/workloads/fio_4K_rand_write.yaml b/qa/suites/perf-basic/workloads/fio_4K_rand_write.yaml new file mode 100644 index 00000000..a660ac5a --- /dev/null +++ b/qa/suites/perf-basic/workloads/fio_4K_rand_write.yaml @@ -0,0 +1,30 @@ +meta: +- desc: | + Run librbdfio benchmark using cbt. + 4K randwrite workload. + +tasks: +- cbt: + branch: 'nautilus' + benchmarks: + librbdfio: + op_size: [4096] + time: 300 + mode: ['randwrite'] + norandommap: True + vol_size: 4096 + procs_per_volume: [1] + volumes_per_client: [2] + iodepth: [32] + osd_ra: [4096] + pool_profile: 'rbd' + log_avg_msec: 100 + cluster: + user: 'ubuntu' + osds_per_node: 3 + iterations: 1 + pool_profiles: + rbd: + pg_size: 256 + pgp_size: 256 + replication: 3 diff --git a/qa/suites/perf-basic/workloads/radosbench_4K_write.yaml b/qa/suites/perf-basic/workloads/radosbench_4K_write.yaml new file mode 100644 index 00000000..93c01792 --- /dev/null +++ b/qa/suites/perf-basic/workloads/radosbench_4K_write.yaml @@ -0,0 +1,29 @@ +meta: +- desc: | + Run radosbench benchmark using cbt. + 4K write workload. + +tasks: +- cbt: + branch: 'nautilus' + benchmarks: + radosbench: + concurrent_ops: 4 + concurrent_procs: 2 + op_size: [4096] + pool_monitoring_list: + - collectl + pool_profile: 'replicated' + run_monitoring_list: + - collectl + time: 300 + write_only: true + cluster: + user: 'ubuntu' + osds_per_node: 3 + iterations: 1 + pool_profiles: + replicated: + pg_size: 256 + pgp_size: 256 + replication: 'replicated' diff --git a/qa/suites/powercycle/.qa b/qa/suites/powercycle/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/powercycle/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/powercycle/osd/% b/qa/suites/powercycle/osd/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/powercycle/osd/.qa b/qa/suites/powercycle/osd/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/powercycle/osd/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/powercycle/osd/clusters/.qa b/qa/suites/powercycle/osd/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/powercycle/osd/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/powercycle/osd/clusters/3osd-1per-target.yaml b/qa/suites/powercycle/osd/clusters/3osd-1per-target.yaml new file mode 100644 index 00000000..2fbcd018 --- /dev/null +++ b/qa/suites/powercycle/osd/clusters/3osd-1per-target.yaml @@ -0,0 +1,5 @@ +roles: +- [mon.a, mon.b, mon.c, mgr.x, mgr.y, mds.a, client.0] +- [osd.0] +- [osd.1] +- [osd.2] diff --git a/qa/suites/powercycle/osd/objectstore b/qa/suites/powercycle/osd/objectstore new file mode 120000 index 00000000..c40bd326 --- /dev/null +++ b/qa/suites/powercycle/osd/objectstore @@ -0,0 +1 @@ +.qa/objectstore \ No newline at end of file diff --git a/qa/suites/powercycle/osd/powercycle/.qa b/qa/suites/powercycle/osd/powercycle/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/powercycle/osd/powercycle/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/powercycle/osd/powercycle/default.yaml b/qa/suites/powercycle/osd/powercycle/default.yaml new file mode 100644 index 00000000..a693f4b4 --- /dev/null +++ b/qa/suites/powercycle/osd/powercycle/default.yaml @@ -0,0 +1,10 @@ +tasks: +- install: + extra_system_packages: + deb: ['bison', 'flex', 'libelf-dev', 'libssl-dev'] + rpm: ['bison', 'flex', 'elfutils-libelf-devel', 'openssl-devel'] +- ceph: +- thrashosds: + chance_down: 1.0 + powercycle: true + timeout: 600 diff --git a/qa/suites/powercycle/osd/supported-all-distro b/qa/suites/powercycle/osd/supported-all-distro new file mode 120000 index 00000000..ca82dde5 --- /dev/null +++ b/qa/suites/powercycle/osd/supported-all-distro @@ -0,0 +1 @@ +.qa/distros/supported-all-distro \ No newline at end of file diff --git a/qa/suites/powercycle/osd/tasks/.qa b/qa/suites/powercycle/osd/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/powercycle/osd/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/powercycle/osd/tasks/admin_socket_objecter_requests.yaml b/qa/suites/powercycle/osd/tasks/admin_socket_objecter_requests.yaml new file mode 100644 index 00000000..3b1a8920 --- /dev/null +++ b/qa/suites/powercycle/osd/tasks/admin_socket_objecter_requests.yaml @@ -0,0 +1,13 @@ +overrides: + ceph: + conf: + client.0: + admin socket: /var/run/ceph/ceph-$name.asok +tasks: +- radosbench: + clients: [client.0] + time: 60 +- admin_socket: + client.0: + objecter_requests: + test: "http://git.ceph.com/?p={repo};a=blob_plain;f=src/test/admin_socket/objecter_requests;hb={branch}" diff --git a/qa/suites/powercycle/osd/tasks/cfuse_workunit_kernel_untar_build.yaml b/qa/suites/powercycle/osd/tasks/cfuse_workunit_kernel_untar_build.yaml new file mode 100644 index 00000000..87f8f57c --- /dev/null +++ b/qa/suites/powercycle/osd/tasks/cfuse_workunit_kernel_untar_build.yaml @@ -0,0 +1,12 @@ +overrides: + ceph: + conf: + client: + fuse_default_permissions: 0 +tasks: +- ceph-fuse: +- workunit: + timeout: 6h + clients: + all: + - kernel_untar_build.sh diff --git a/qa/suites/powercycle/osd/tasks/cfuse_workunit_misc.yaml b/qa/suites/powercycle/osd/tasks/cfuse_workunit_misc.yaml new file mode 100644 index 00000000..683d3f59 --- /dev/null +++ b/qa/suites/powercycle/osd/tasks/cfuse_workunit_misc.yaml @@ -0,0 +1,7 @@ +tasks: +- ceph-fuse: +- workunit: + timeout: 6h + clients: + all: + - fs/misc diff --git a/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_ffsb.yaml b/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_ffsb.yaml new file mode 100644 index 00000000..9f3fa7b1 --- /dev/null +++ b/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_ffsb.yaml @@ -0,0 +1,14 @@ +overrides: + ceph: + conf: + osd: + filestore flush min: 0 + mds: + debug ms: 1 + debug mds: 20 +tasks: +- ceph-fuse: +- workunit: + clients: + all: + - suites/ffsb.sh diff --git a/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_fsstress.yaml b/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_fsstress.yaml new file mode 100644 index 00000000..5908d951 --- /dev/null +++ b/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_fsstress.yaml @@ -0,0 +1,6 @@ +tasks: +- ceph-fuse: +- workunit: + clients: + all: + - suites/fsstress.sh diff --git a/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_fsx.yaml b/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_fsx.yaml new file mode 100644 index 00000000..94031518 --- /dev/null +++ b/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_fsx.yaml @@ -0,0 +1,7 @@ +tasks: +- ceph-fuse: +- workunit: + timeout: 6h + clients: + all: + - suites/fsx.sh diff --git a/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_fsync.yaml b/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_fsync.yaml new file mode 100644 index 00000000..2cbb03c7 --- /dev/null +++ b/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_fsync.yaml @@ -0,0 +1,12 @@ +overrides: + ceph: + conf: + global: + osd_pg_log_dups_tracked: 10000 + +tasks: +- ceph-fuse: +- workunit: + clients: + all: + - suites/fsync-tester.sh diff --git a/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_pjd.yaml b/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_pjd.yaml new file mode 100644 index 00000000..664791c3 --- /dev/null +++ b/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_pjd.yaml @@ -0,0 +1,12 @@ +overrides: + ceph: + conf: + client: + fuse default permissions: false + fuse set user groups: true +tasks: +- ceph-fuse: +- workunit: + clients: + all: + - suites/pjd.sh diff --git a/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_truncate_delay.yaml b/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_truncate_delay.yaml new file mode 100644 index 00000000..f3efafa2 --- /dev/null +++ b/qa/suites/powercycle/osd/tasks/cfuse_workunit_suites_truncate_delay.yaml @@ -0,0 +1,15 @@ +overrides: + ceph: + conf: + client: + ms_inject_delay_probability: 1 + ms_inject_delay_type: osd + ms_inject_delay_max: 5 + client_oc_max_dirty_age: 1 +tasks: +- ceph-fuse: +- exec: + client.0: + - dd if=/dev/zero of=./foo count=100 + - sleep 2 + - truncate --size 0 ./foo diff --git a/qa/suites/powercycle/osd/tasks/rados_api_tests.yaml b/qa/suites/powercycle/osd/tasks/rados_api_tests.yaml new file mode 100644 index 00000000..4899bf16 --- /dev/null +++ b/qa/suites/powercycle/osd/tasks/rados_api_tests.yaml @@ -0,0 +1,15 @@ +overrides: + ceph: + log-whitelist: + - reached quota + - \(POOL_APP_NOT_ENABLED\) + - \(PG_AVAILABILITY\) + conf: + mon: + mon warn on pool no app: false +tasks: +- ceph-fuse: +- workunit: + clients: + client.0: + - rados/test.sh diff --git a/qa/suites/powercycle/osd/tasks/radosbench.yaml b/qa/suites/powercycle/osd/tasks/radosbench.yaml new file mode 100644 index 00000000..91573f90 --- /dev/null +++ b/qa/suites/powercycle/osd/tasks/radosbench.yaml @@ -0,0 +1,38 @@ +tasks: +- full_sequential: + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 diff --git a/qa/suites/powercycle/osd/tasks/readwrite.yaml b/qa/suites/powercycle/osd/tasks/readwrite.yaml new file mode 100644 index 00000000..c53e52b0 --- /dev/null +++ b/qa/suites/powercycle/osd/tasks/readwrite.yaml @@ -0,0 +1,9 @@ +tasks: +- rados: + clients: [client.0] + ops: 4000 + objects: 500 + op_weights: + read: 45 + write: 45 + delete: 10 diff --git a/qa/suites/powercycle/osd/tasks/snaps-few-objects.yaml b/qa/suites/powercycle/osd/tasks/snaps-few-objects.yaml new file mode 100644 index 00000000..aa82d973 --- /dev/null +++ b/qa/suites/powercycle/osd/tasks/snaps-few-objects.yaml @@ -0,0 +1,13 @@ +tasks: +- rados: + clients: [client.0] + ops: 4000 + objects: 50 + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 diff --git a/qa/suites/powercycle/osd/tasks/snaps-many-objects.yaml b/qa/suites/powercycle/osd/tasks/snaps-many-objects.yaml new file mode 100644 index 00000000..1ffe4e14 --- /dev/null +++ b/qa/suites/powercycle/osd/tasks/snaps-many-objects.yaml @@ -0,0 +1,13 @@ +tasks: +- rados: + clients: [client.0] + ops: 4000 + objects: 500 + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 diff --git a/qa/suites/powercycle/osd/thrashosds-health.yaml b/qa/suites/powercycle/osd/thrashosds-health.yaml new file mode 120000 index 00000000..9124eb1a --- /dev/null +++ b/qa/suites/powercycle/osd/thrashosds-health.yaml @@ -0,0 +1 @@ +.qa/tasks/thrashosds-health.yaml \ No newline at end of file diff --git a/qa/suites/powercycle/osd/whitelist_health.yaml b/qa/suites/powercycle/osd/whitelist_health.yaml new file mode 100644 index 00000000..f724302a --- /dev/null +++ b/qa/suites/powercycle/osd/whitelist_health.yaml @@ -0,0 +1,7 @@ +overrides: + ceph: + log-whitelist: + - \(MDS_TRIM\) + - \(MDS_SLOW_REQUEST\) + - MDS_SLOW_METADATA_IO + - Behind on trimming diff --git a/qa/suites/rados/.qa b/qa/suites/rados/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/basic/% b/qa/suites/rados/basic/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rados/basic/.qa b/qa/suites/rados/basic/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/basic/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/basic/ceph.yaml b/qa/suites/rados/basic/ceph.yaml new file mode 100644 index 00000000..c0857e14 --- /dev/null +++ b/qa/suites/rados/basic/ceph.yaml @@ -0,0 +1,13 @@ +overrides: + ceph: + conf: + mon: + mon min osdmap epochs: 50 + paxos service trim min: 10 + # prune full osdmaps regularly + mon osdmap full prune min: 15 + mon osdmap full prune interval: 2 + mon osdmap full prune txsize: 2 +tasks: +- install: +- ceph: diff --git a/qa/suites/rados/basic/clusters/+ b/qa/suites/rados/basic/clusters/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rados/basic/clusters/.qa b/qa/suites/rados/basic/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/basic/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/basic/clusters/fixed-2.yaml b/qa/suites/rados/basic/clusters/fixed-2.yaml new file mode 120000 index 00000000..230ff0fd --- /dev/null +++ b/qa/suites/rados/basic/clusters/fixed-2.yaml @@ -0,0 +1 @@ +.qa/clusters/fixed-2.yaml \ No newline at end of file diff --git a/qa/suites/rados/basic/clusters/openstack.yaml b/qa/suites/rados/basic/clusters/openstack.yaml new file mode 100644 index 00000000..e559d912 --- /dev/null +++ b/qa/suites/rados/basic/clusters/openstack.yaml @@ -0,0 +1,4 @@ +openstack: + - volumes: # attached to each instance + count: 4 + size: 10 # GB diff --git a/qa/suites/rados/basic/msgr b/qa/suites/rados/basic/msgr new file mode 120000 index 00000000..57bee80d --- /dev/null +++ b/qa/suites/rados/basic/msgr @@ -0,0 +1 @@ +.qa/msgr \ No newline at end of file diff --git a/qa/suites/rados/basic/msgr-failures/.qa b/qa/suites/rados/basic/msgr-failures/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/basic/msgr-failures/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/basic/msgr-failures/few.yaml b/qa/suites/rados/basic/msgr-failures/few.yaml new file mode 100644 index 00000000..4326fe23 --- /dev/null +++ b/qa/suites/rados/basic/msgr-failures/few.yaml @@ -0,0 +1,7 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 5000 + log-whitelist: + - \(OSD_SLOW_PING_TIME diff --git a/qa/suites/rados/basic/msgr-failures/many.yaml b/qa/suites/rados/basic/msgr-failures/many.yaml new file mode 100644 index 00000000..f4bb065b --- /dev/null +++ b/qa/suites/rados/basic/msgr-failures/many.yaml @@ -0,0 +1,7 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 1500 + log-whitelist: + - \(OSD_SLOW_PING_TIME diff --git a/qa/suites/rados/basic/objectstore b/qa/suites/rados/basic/objectstore new file mode 120000 index 00000000..c40bd326 --- /dev/null +++ b/qa/suites/rados/basic/objectstore @@ -0,0 +1 @@ +.qa/objectstore \ No newline at end of file diff --git a/qa/suites/rados/basic/rados.yaml b/qa/suites/rados/basic/rados.yaml new file mode 120000 index 00000000..d256979c --- /dev/null +++ b/qa/suites/rados/basic/rados.yaml @@ -0,0 +1 @@ +.qa/config/rados.yaml \ No newline at end of file diff --git a/qa/suites/rados/basic/supported-random-distro$ b/qa/suites/rados/basic/supported-random-distro$ new file mode 120000 index 00000000..0862b445 --- /dev/null +++ b/qa/suites/rados/basic/supported-random-distro$ @@ -0,0 +1 @@ +.qa/distros/supported-random-distro$ \ No newline at end of file diff --git a/qa/suites/rados/basic/tasks/.qa b/qa/suites/rados/basic/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/basic/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/basic/tasks/rados_api_tests.yaml b/qa/suites/rados/basic/tasks/rados_api_tests.yaml new file mode 100644 index 00000000..b90d8089 --- /dev/null +++ b/qa/suites/rados/basic/tasks/rados_api_tests.yaml @@ -0,0 +1,24 @@ +overrides: + ceph: + log-whitelist: + - reached quota + - but it is still running + - overall HEALTH_ + - \(POOL_FULL\) + - \(SMALLER_PGP_NUM\) + - \(CACHE_POOL_NO_HIT_SET\) + - \(CACHE_POOL_NEAR_FULL\) + - \(POOL_APP_NOT_ENABLED\) + - \(PG_AVAILABILITY\) + conf: + client: + debug ms: 1 + mon: + mon warn on pool no app: false +tasks: +- workunit: + clients: + client.0: + - rados/test.sh + - rados/test_pool_quota.sh + diff --git a/qa/suites/rados/basic/tasks/rados_cls_all.yaml b/qa/suites/rados/basic/tasks/rados_cls_all.yaml new file mode 100644 index 00000000..bcc58e19 --- /dev/null +++ b/qa/suites/rados/basic/tasks/rados_cls_all.yaml @@ -0,0 +1,13 @@ +overrides: + ceph: + conf: + osd: + osd_class_load_list: "cephfs hello journal lock log numops rbd refcount + rgw sdk timeindex user version" + osd_class_default_list: "cephfs hello journal lock log numops rbd refcount + rgw sdk timeindex user version" +tasks: +- workunit: + clients: + client.0: + - cls diff --git a/qa/suites/rados/basic/tasks/rados_python.yaml b/qa/suites/rados/basic/tasks/rados_python.yaml new file mode 100644 index 00000000..8c70304d --- /dev/null +++ b/qa/suites/rados/basic/tasks/rados_python.yaml @@ -0,0 +1,15 @@ +overrides: + ceph: + log-whitelist: + - but it is still running + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(PG_ + - \(OSD_ + - \(OBJECT_ + - \(POOL_APP_NOT_ENABLED\) +tasks: +- workunit: + clients: + client.0: + - rados/test_python.sh diff --git a/qa/suites/rados/basic/tasks/rados_stress_watch.yaml b/qa/suites/rados/basic/tasks/rados_stress_watch.yaml new file mode 100644 index 00000000..bee513eb --- /dev/null +++ b/qa/suites/rados/basic/tasks/rados_stress_watch.yaml @@ -0,0 +1,11 @@ +overrides: + ceph: + log-whitelist: + - overall HEALTH_ + - \(CACHE_POOL_NO_HIT_SET\) + - \(TOO_FEW_PGS\) +tasks: +- workunit: + clients: + client.0: + - rados/stress_watch.sh diff --git a/qa/suites/rados/basic/tasks/rados_striper.yaml b/qa/suites/rados/basic/tasks/rados_striper.yaml new file mode 100644 index 00000000..c19cc83a --- /dev/null +++ b/qa/suites/rados/basic/tasks/rados_striper.yaml @@ -0,0 +1,7 @@ +tasks: +- exec: + client.0: + - ceph_test_rados_striper_api_io + - ceph_test_rados_striper_api_aio + - ceph_test_rados_striper_api_striping + diff --git a/qa/suites/rados/basic/tasks/rados_workunit_loadgen_big.yaml b/qa/suites/rados/basic/tasks/rados_workunit_loadgen_big.yaml new file mode 100644 index 00000000..2dade6de --- /dev/null +++ b/qa/suites/rados/basic/tasks/rados_workunit_loadgen_big.yaml @@ -0,0 +1,11 @@ +overrides: + ceph: + log-whitelist: + - but it is still running + - overall HEALTH_ + - \(POOL_APP_NOT_ENABLED\) +tasks: +- workunit: + clients: + all: + - rados/load-gen-big.sh diff --git a/qa/suites/rados/basic/tasks/rados_workunit_loadgen_mix.yaml b/qa/suites/rados/basic/tasks/rados_workunit_loadgen_mix.yaml new file mode 100644 index 00000000..6b764a87 --- /dev/null +++ b/qa/suites/rados/basic/tasks/rados_workunit_loadgen_mix.yaml @@ -0,0 +1,11 @@ +overrides: + ceph: + log-whitelist: + - but it is still running + - overall HEALTH_ + - \(POOL_APP_NOT_ENABLED\) +tasks: +- workunit: + clients: + all: + - rados/load-gen-mix.sh diff --git a/qa/suites/rados/basic/tasks/rados_workunit_loadgen_mostlyread.yaml b/qa/suites/rados/basic/tasks/rados_workunit_loadgen_mostlyread.yaml new file mode 100644 index 00000000..c82023c2 --- /dev/null +++ b/qa/suites/rados/basic/tasks/rados_workunit_loadgen_mostlyread.yaml @@ -0,0 +1,11 @@ +overrides: + ceph: + log-whitelist: + - but it is still running + - overall HEALTH_ + - \(POOL_APP_NOT_ENABLED\) +tasks: +- workunit: + clients: + all: + - rados/load-gen-mostlyread.sh diff --git a/qa/suites/rados/basic/tasks/readwrite.yaml b/qa/suites/rados/basic/tasks/readwrite.yaml new file mode 100644 index 00000000..f135107c --- /dev/null +++ b/qa/suites/rados/basic/tasks/readwrite.yaml @@ -0,0 +1,17 @@ +overrides: + ceph: + crush_tunables: optimal + conf: + mon: + mon osd initial require min compat client: luminous + osd: + osd_discard_disconnected_ops: false +tasks: +- rados: + clients: [client.0] + ops: 4000 + objects: 500 + op_weights: + read: 45 + write: 45 + delete: 10 diff --git a/qa/suites/rados/basic/tasks/repair_test.yaml b/qa/suites/rados/basic/tasks/repair_test.yaml new file mode 100644 index 00000000..f3a7868d --- /dev/null +++ b/qa/suites/rados/basic/tasks/repair_test.yaml @@ -0,0 +1,31 @@ +overrides: + ceph: + wait-for-scrub: false + log-whitelist: + - candidate had a stat error + - candidate had a read error + - deep-scrub 0 missing, 1 inconsistent objects + - deep-scrub 0 missing, 4 inconsistent objects + - deep-scrub [0-9]+ errors + - '!= omap_digest' + - '!= data_digest' + - repair 0 missing, 1 inconsistent objects + - repair 0 missing, 4 inconsistent objects + - repair [0-9]+ errors, [0-9]+ fixed + - scrub 0 missing, 1 inconsistent objects + - scrub [0-9]+ errors + - 'size 1 != size' + - attr name mismatch + - Regular scrub request, deep-scrub details will be lost + - candidate size [0-9]+ info size [0-9]+ mismatch + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ + conf: + osd: + filestore debug inject read err: true + bluestore debug inject read err: true +tasks: +- repair_test: + diff --git a/qa/suites/rados/basic/tasks/rgw_snaps.yaml b/qa/suites/rados/basic/tasks/rgw_snaps.yaml new file mode 100644 index 00000000..c94b7b2b --- /dev/null +++ b/qa/suites/rados/basic/tasks/rgw_snaps.yaml @@ -0,0 +1,41 @@ +overrides: + ceph: + conf: + client: + debug rgw: 20 + debug ms: 1 + osd: + osd_max_omap_entries_per_request: 10 +tasks: +- rgw: + client.0: +- ceph_manager.wait_for_pools: + kwargs: + pools: + - default.rgw.buckets.data + - default.rgw.buckets.index + - .rgw.root + - default.rgw.control + - default.rgw.meta + - default.rgw.log +- thrash_pool_snaps: + pools: + - default.rgw.buckets.data + - default.rgw.buckets.index + - .rgw.root + - default.rgw.control + - default.rgw.meta + - default.rgw.log +- s3readwrite: + client.0: + force-branch: ceph-nautilus + rgw_server: client.0 + readwrite: + bucket: rwtest + readers: 10 + writers: 3 + duration: 300 + files: + num: 10 + size: 2000 + stddev: 500 diff --git a/qa/suites/rados/basic/tasks/scrub_test.yaml b/qa/suites/rados/basic/tasks/scrub_test.yaml new file mode 100644 index 00000000..00e85f9e --- /dev/null +++ b/qa/suites/rados/basic/tasks/scrub_test.yaml @@ -0,0 +1,30 @@ +overrides: + ceph: + wait-for-scrub: false + log-whitelist: + - '!= data_digest' + - '!= omap_digest' + - '!= size' + - 'deep-scrub 0 missing, 1 inconsistent objects' + - 'deep-scrub [0-9]+ errors' + - 'repair 0 missing, 1 inconsistent objects' + - 'repair [0-9]+ errors, [0-9]+ fixed' + - 'shard [0-9]+ .* : missing' + - 'deep-scrub 1 missing, 1 inconsistent objects' + - 'does not match object info size' + - 'attr name mistmatch' + - 'deep-scrub 1 missing, 0 inconsistent objects' + - 'failed to pick suitable auth object' + - 'candidate size [0-9]+ info size [0-9]+ mismatch' + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ + - \(OSD_SCRUB_ERRORS\) + - \(TOO_FEW_PGS\) + conf: + osd: + osd deep scrub update digest min age: 0 + osd skip data digest: false +tasks: +- scrub_test: diff --git a/qa/suites/rados/dashboard/% b/qa/suites/rados/dashboard/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rados/dashboard/.qa b/qa/suites/rados/dashboard/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/dashboard/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/dashboard/clusters/+ b/qa/suites/rados/dashboard/clusters/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rados/dashboard/clusters/.qa b/qa/suites/rados/dashboard/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/dashboard/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/dashboard/clusters/2-node-mgr.yaml b/qa/suites/rados/dashboard/clusters/2-node-mgr.yaml new file mode 120000 index 00000000..8a0b9123 --- /dev/null +++ b/qa/suites/rados/dashboard/clusters/2-node-mgr.yaml @@ -0,0 +1 @@ +.qa/clusters/2-node-mgr.yaml \ No newline at end of file diff --git a/qa/suites/rados/dashboard/debug/.qa b/qa/suites/rados/dashboard/debug/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/dashboard/debug/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/dashboard/debug/mgr.yaml b/qa/suites/rados/dashboard/debug/mgr.yaml new file mode 120000 index 00000000..651e5f8a --- /dev/null +++ b/qa/suites/rados/dashboard/debug/mgr.yaml @@ -0,0 +1 @@ +.qa/debug/mgr.yaml \ No newline at end of file diff --git a/qa/suites/rados/dashboard/objectstore b/qa/suites/rados/dashboard/objectstore new file mode 120000 index 00000000..c40bd326 --- /dev/null +++ b/qa/suites/rados/dashboard/objectstore @@ -0,0 +1 @@ +.qa/objectstore \ No newline at end of file diff --git a/qa/suites/rados/dashboard/supported-random-distro$ b/qa/suites/rados/dashboard/supported-random-distro$ new file mode 120000 index 00000000..7cef21ee --- /dev/null +++ b/qa/suites/rados/dashboard/supported-random-distro$ @@ -0,0 +1 @@ +../basic/supported-random-distro$ \ No newline at end of file diff --git a/qa/suites/rados/dashboard/tasks/.qa b/qa/suites/rados/dashboard/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/dashboard/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/dashboard/tasks/dashboard.yaml b/qa/suites/rados/dashboard/tasks/dashboard.yaml new file mode 100644 index 00000000..ad6adc7c --- /dev/null +++ b/qa/suites/rados/dashboard/tasks/dashboard.yaml @@ -0,0 +1,51 @@ + +tasks: + - install: + - ceph: + # tests may leave mgrs broken, so don't try and call into them + # to invoke e.g. pg dump during teardown. + wait-for-scrub: false + log-whitelist: + - overall HEALTH_ + - \(MGR_DOWN\) + - \(PG_ + - replacing it with standby + - No standby daemons available + - \(FS_DEGRADED\) + - \(MDS_FAILED\) + - \(MDS_DEGRADED\) + - \(FS_WITH_FAILED_MDS\) + - \(MDS_DAMAGE\) + - \(MDS_ALL_DOWN\) + - \(MDS_UP_LESS_THAN_MAX\) + - \(OSD_DOWN\) + - \(OSD_HOST_DOWN\) + - \(POOL_APP_NOT_ENABLED\) + - pauserd,pausewr flag\(s\) set + - Monitor daemon marked osd\.[[:digit:]]+ down, but it is still running + - evicting unresponsive client .+ + - rgw: [client.0] + - cephfs_test_runner: + fail_on_skip: false + modules: + - tasks.mgr.test_dashboard + - tasks.mgr.dashboard.test_auth + - tasks.mgr.dashboard.test_cephfs + - tasks.mgr.dashboard.test_cluster_configuration + - tasks.mgr.dashboard.test_health + - tasks.mgr.dashboard.test_host + - tasks.mgr.dashboard.test_logs + - tasks.mgr.dashboard.test_monitor + - tasks.mgr.dashboard.test_osd + - tasks.mgr.dashboard.test_perf_counters + - tasks.mgr.dashboard.test_summary + - tasks.mgr.dashboard.test_rgw + - tasks.mgr.dashboard.test_rbd + - tasks.mgr.dashboard.test_pool + - tasks.mgr.dashboard.test_requests + - tasks.mgr.dashboard.test_role + - tasks.mgr.dashboard.test_settings + - tasks.mgr.dashboard.test_user + - tasks.mgr.dashboard.test_erasure_code_profile + - tasks.mgr.dashboard.test_mgr_module + - tasks.mgr.dashboard.test_ganesha diff --git a/qa/suites/rados/mgr/% b/qa/suites/rados/mgr/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rados/mgr/.qa b/qa/suites/rados/mgr/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/mgr/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/mgr/clusters/+ b/qa/suites/rados/mgr/clusters/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rados/mgr/clusters/.qa b/qa/suites/rados/mgr/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/mgr/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/mgr/clusters/2-node-mgr.yaml b/qa/suites/rados/mgr/clusters/2-node-mgr.yaml new file mode 120000 index 00000000..8a0b9123 --- /dev/null +++ b/qa/suites/rados/mgr/clusters/2-node-mgr.yaml @@ -0,0 +1 @@ +.qa/clusters/2-node-mgr.yaml \ No newline at end of file diff --git a/qa/suites/rados/mgr/debug/.qa b/qa/suites/rados/mgr/debug/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/mgr/debug/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/mgr/debug/mgr.yaml b/qa/suites/rados/mgr/debug/mgr.yaml new file mode 120000 index 00000000..651e5f8a --- /dev/null +++ b/qa/suites/rados/mgr/debug/mgr.yaml @@ -0,0 +1 @@ +.qa/debug/mgr.yaml \ No newline at end of file diff --git a/qa/suites/rados/mgr/objectstore b/qa/suites/rados/mgr/objectstore new file mode 120000 index 00000000..c40bd326 --- /dev/null +++ b/qa/suites/rados/mgr/objectstore @@ -0,0 +1 @@ +.qa/objectstore \ No newline at end of file diff --git a/qa/suites/rados/mgr/supported-random-distro$ b/qa/suites/rados/mgr/supported-random-distro$ new file mode 120000 index 00000000..7cef21ee --- /dev/null +++ b/qa/suites/rados/mgr/supported-random-distro$ @@ -0,0 +1 @@ +../basic/supported-random-distro$ \ No newline at end of file diff --git a/qa/suites/rados/mgr/tasks/.qa b/qa/suites/rados/mgr/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/mgr/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/mgr/tasks/crash.yaml b/qa/suites/rados/mgr/tasks/crash.yaml new file mode 100644 index 00000000..7b4c4446 --- /dev/null +++ b/qa/suites/rados/mgr/tasks/crash.yaml @@ -0,0 +1,17 @@ + +tasks: + - install: + - ceph: + # tests may leave mgrs broken, so don't try and call into them + # to invoke e.g. pg dump during teardown. + wait-for-scrub: false + log-whitelist: + - overall HEALTH_ + - \(MGR_DOWN\) + - \(PG_ + - \(RECENT_CRASH\) + - replacing it with standby + - No standby daemons available + - cephfs_test_runner: + modules: + - tasks.mgr.test_crash diff --git a/qa/suites/rados/mgr/tasks/failover.yaml b/qa/suites/rados/mgr/tasks/failover.yaml new file mode 100644 index 00000000..34be4715 --- /dev/null +++ b/qa/suites/rados/mgr/tasks/failover.yaml @@ -0,0 +1,16 @@ + +tasks: + - install: + - ceph: + # tests may leave mgrs broken, so don't try and call into them + # to invoke e.g. pg dump during teardown. + wait-for-scrub: false + log-whitelist: + - overall HEALTH_ + - \(MGR_DOWN\) + - \(PG_ + - replacing it with standby + - No standby daemons available + - cephfs_test_runner: + modules: + - tasks.mgr.test_failover diff --git a/qa/suites/rados/mgr/tasks/insights.yaml b/qa/suites/rados/mgr/tasks/insights.yaml new file mode 100644 index 00000000..52160665 --- /dev/null +++ b/qa/suites/rados/mgr/tasks/insights.yaml @@ -0,0 +1,19 @@ + +tasks: + - install: + - ceph: + # tests may leave mgrs broken, so don't try and call into them + # to invoke e.g. pg dump during teardown. + wait-for-scrub: false + log-whitelist: + - overall HEALTH_ + - \(MGR_DOWN\) + - \(MGR_INSIGHTS_WARNING\) + - \(insights_health_check + - \(PG_ + - \(RECENT_CRASH\) + - replacing it with standby + - No standby daemons available + - cephfs_test_runner: + modules: + - tasks.mgr.test_insights diff --git a/qa/suites/rados/mgr/tasks/module_selftest.yaml b/qa/suites/rados/mgr/tasks/module_selftest.yaml new file mode 100644 index 00000000..11053d6a --- /dev/null +++ b/qa/suites/rados/mgr/tasks/module_selftest.yaml @@ -0,0 +1,25 @@ + +tasks: + - install: + - ceph: + # tests may leave mgrs broken, so don't try and call into them + # to invoke e.g. pg dump during teardown. + wait-for-scrub: false + log-whitelist: + - overall HEALTH_ + - \(MGR_DOWN\) + - \(PG_ + - replacing it with standby + - No standby daemons available + - Reduced data availability + - Degraded data redundancy + - objects misplaced + - Synthetic exception in serve + - influxdb python module not found + - \(MGR_ZABBIX_ + - foo bar + - Failed to open Telegraf + - evicting unresponsive client + - cephfs_test_runner: + modules: + - tasks.mgr.test_module_selftest diff --git a/qa/suites/rados/mgr/tasks/orchestrator_cli.yaml b/qa/suites/rados/mgr/tasks/orchestrator_cli.yaml new file mode 100644 index 00000000..65b1d78b --- /dev/null +++ b/qa/suites/rados/mgr/tasks/orchestrator_cli.yaml @@ -0,0 +1,18 @@ + +tasks: + - install: + - ceph: + # tests may leave mgrs broken, so don't try and call into them + # to invoke e.g. pg dump during teardown. + wait-for-scrub: false + log-whitelist: + - overall HEALTH_ + - \(MGR_DOWN\) + - \(MGR_INSIGHTS_WARNING\) + - \(insights_health_check + - \(PG_ + - replacing it with standby + - No standby daemons available + - cephfs_test_runner: + modules: + - tasks.mgr.test_orchestrator_cli \ No newline at end of file diff --git a/qa/suites/rados/mgr/tasks/progress.yaml b/qa/suites/rados/mgr/tasks/progress.yaml new file mode 100644 index 00000000..78462575 --- /dev/null +++ b/qa/suites/rados/mgr/tasks/progress.yaml @@ -0,0 +1,24 @@ + +tasks: + - install: + - ceph: + config: + global: + osd pool default size : 3 + osd pool default min size : 2 + # tests may leave mgrs broken, so don't try and call into them + # to invoke e.g. pg dump during teardown. + wait-for-scrub: false + log-whitelist: + - overall HEALTH_ + - \(MGR_DOWN\) + - \(MDS_ALL_DOWN\) + - \(MDS_UP_LESS_THAN_MAX\) + - \(FS_WITH_FAILED_MDS\) + - \(FS_DEGRADED\) + - \(PG_ + - replacing it with standby + - No standby daemons available + - cephfs_test_runner: + modules: + - tasks.mgr.test_progress diff --git a/qa/suites/rados/mgr/tasks/prometheus.yaml b/qa/suites/rados/mgr/tasks/prometheus.yaml new file mode 100644 index 00000000..1a777681 --- /dev/null +++ b/qa/suites/rados/mgr/tasks/prometheus.yaml @@ -0,0 +1,16 @@ + +tasks: + - install: + - ceph: + # tests may leave mgrs broken, so don't try and call into them + # to invoke e.g. pg dump during teardown. + wait-for-scrub: false + log-whitelist: + - overall HEALTH_ + - \(MGR_DOWN\) + - \(PG_ + - replacing it with standby + - No standby daemons available + - cephfs_test_runner: + modules: + - tasks.mgr.test_prometheus diff --git a/qa/suites/rados/mgr/tasks/ssh_orchestrator.yaml b/qa/suites/rados/mgr/tasks/ssh_orchestrator.yaml new file mode 100644 index 00000000..cd606f76 --- /dev/null +++ b/qa/suites/rados/mgr/tasks/ssh_orchestrator.yaml @@ -0,0 +1,18 @@ + +tasks: + - install: + - ceph: + # tests may leave mgrs broken, so don't try and call into them + # to invoke e.g. pg dump during teardown. + wait-for-scrub: false + log-whitelist: + - overall HEALTH_ + - \(MGR_DOWN\) + - \(MGR_INSIGHTS_WARNING\) + - \(insights_health_check + - \(PG_ + - replacing it with standby + - No standby daemons available + - cephfs_test_runner: + modules: + - tasks.mgr.test_ssh_orchestrator diff --git a/qa/suites/rados/mgr/tasks/workunits.yaml b/qa/suites/rados/mgr/tasks/workunits.yaml new file mode 100644 index 00000000..d7261f44 --- /dev/null +++ b/qa/suites/rados/mgr/tasks/workunits.yaml @@ -0,0 +1,16 @@ +tasks: + - install: + - ceph: + # tests may leave mgrs broken, so don't try and call into them + # to invoke e.g. pg dump during teardown. + wait-for-scrub: false + log-whitelist: + - overall HEALTH_ + - \(MGR_DOWN\) + - \(PG_ + - replacing it with standby + - No standby daemons available + - workunit: + clients: + client.0: + - mgr \ No newline at end of file diff --git a/qa/suites/rados/monthrash/% b/qa/suites/rados/monthrash/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rados/monthrash/.qa b/qa/suites/rados/monthrash/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/monthrash/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/monthrash/ceph.yaml b/qa/suites/rados/monthrash/ceph.yaml new file mode 100644 index 00000000..6c53b315 --- /dev/null +++ b/qa/suites/rados/monthrash/ceph.yaml @@ -0,0 +1,25 @@ +overrides: + ceph: + conf: + mon: + mon min osdmap epochs: 25 + paxos service trim min: 5 + # prune full osdmaps regularly + mon osdmap full prune min: 15 + mon osdmap full prune interval: 2 + mon osdmap full prune txsize: 2 + mon scrub inject crc mismatch: 0.01 + mon scrub inject missing keys: 0.05 +# thrashing monitors may make mgr have trouble w/ its keepalive + log-whitelist: + - ScrubResult + - scrub mismatch + - overall HEALTH_ + - \(MGR_DOWN\) +# slow mons -> slow peering -> PG_AVAILABILITY + - \(PG_AVAILABILITY\) + - \(SLOW_OPS\) + - slow request +tasks: +- install: +- ceph: diff --git a/qa/suites/rados/monthrash/clusters/.qa b/qa/suites/rados/monthrash/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/monthrash/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/monthrash/clusters/3-mons.yaml b/qa/suites/rados/monthrash/clusters/3-mons.yaml new file mode 100644 index 00000000..4b721ef8 --- /dev/null +++ b/qa/suites/rados/monthrash/clusters/3-mons.yaml @@ -0,0 +1,7 @@ +roles: +- [mon.a, mon.c, osd.0, osd.1, osd.2] +- [mon.b, mgr.x, osd.3, osd.4, osd.5, client.0] +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB diff --git a/qa/suites/rados/monthrash/clusters/9-mons.yaml b/qa/suites/rados/monthrash/clusters/9-mons.yaml new file mode 100644 index 00000000..a2874c1d --- /dev/null +++ b/qa/suites/rados/monthrash/clusters/9-mons.yaml @@ -0,0 +1,7 @@ +roles: +- [mon.a, mon.b, mon.c, mon.d, mon.e, osd.0, osd.1, osd.2] +- [mon.f, mon.g, mon.h, mon.i, mgr.x, osd.3, osd.4, osd.5, client.0] +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB diff --git a/qa/suites/rados/monthrash/msgr b/qa/suites/rados/monthrash/msgr new file mode 120000 index 00000000..57bee80d --- /dev/null +++ b/qa/suites/rados/monthrash/msgr @@ -0,0 +1 @@ +.qa/msgr \ No newline at end of file diff --git a/qa/suites/rados/monthrash/msgr-failures/.qa b/qa/suites/rados/monthrash/msgr-failures/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/monthrash/msgr-failures/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/monthrash/msgr-failures/few.yaml b/qa/suites/rados/monthrash/msgr-failures/few.yaml new file mode 100644 index 00000000..4326fe23 --- /dev/null +++ b/qa/suites/rados/monthrash/msgr-failures/few.yaml @@ -0,0 +1,7 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 5000 + log-whitelist: + - \(OSD_SLOW_PING_TIME diff --git a/qa/suites/rados/monthrash/msgr-failures/mon-delay.yaml b/qa/suites/rados/monthrash/msgr-failures/mon-delay.yaml new file mode 100644 index 00000000..fcd8ca7c --- /dev/null +++ b/qa/suites/rados/monthrash/msgr-failures/mon-delay.yaml @@ -0,0 +1,13 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 2500 + ms inject delay type: mon + ms inject delay probability: .005 + ms inject delay max: 1 + ms inject internal delays: .002 + mgr: + debug monc: 10 + log-whitelist: + - \(OSD_SLOW_PING_TIME diff --git a/qa/suites/rados/monthrash/objectstore b/qa/suites/rados/monthrash/objectstore new file mode 120000 index 00000000..c40bd326 --- /dev/null +++ b/qa/suites/rados/monthrash/objectstore @@ -0,0 +1 @@ +.qa/objectstore \ No newline at end of file diff --git a/qa/suites/rados/monthrash/rados.yaml b/qa/suites/rados/monthrash/rados.yaml new file mode 120000 index 00000000..d256979c --- /dev/null +++ b/qa/suites/rados/monthrash/rados.yaml @@ -0,0 +1 @@ +.qa/config/rados.yaml \ No newline at end of file diff --git a/qa/suites/rados/monthrash/supported-random-distro$ b/qa/suites/rados/monthrash/supported-random-distro$ new file mode 120000 index 00000000..7cef21ee --- /dev/null +++ b/qa/suites/rados/monthrash/supported-random-distro$ @@ -0,0 +1 @@ +../basic/supported-random-distro$ \ No newline at end of file diff --git a/qa/suites/rados/monthrash/thrashers/.qa b/qa/suites/rados/monthrash/thrashers/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/monthrash/thrashers/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/monthrash/thrashers/force-sync-many.yaml b/qa/suites/rados/monthrash/thrashers/force-sync-many.yaml new file mode 100644 index 00000000..2d1ba882 --- /dev/null +++ b/qa/suites/rados/monthrash/thrashers/force-sync-many.yaml @@ -0,0 +1,12 @@ +overrides: + ceph: + log-whitelist: + - overall HEALTH_ + - \(MON_DOWN\) + - \(TOO_FEW_PGS\) +tasks: +- mon_thrash: + revive_delay: 90 + thrash_delay: 1 + thrash_store: true + thrash_many: true diff --git a/qa/suites/rados/monthrash/thrashers/many.yaml b/qa/suites/rados/monthrash/thrashers/many.yaml new file mode 100644 index 00000000..fa829b34 --- /dev/null +++ b/qa/suites/rados/monthrash/thrashers/many.yaml @@ -0,0 +1,16 @@ +overrides: + ceph: + log-whitelist: + - overall HEALTH_ + - \(MON_DOWN\) + conf: + osd: + mon client ping interval: 4 + mon client ping timeout: 12 +tasks: +- mon_thrash: + revive_delay: 20 + thrash_delay: 1 + thrash_many: true + freeze_mon_duration: 20 + freeze_mon_probability: 10 diff --git a/qa/suites/rados/monthrash/thrashers/one.yaml b/qa/suites/rados/monthrash/thrashers/one.yaml new file mode 100644 index 00000000..041cee0b --- /dev/null +++ b/qa/suites/rados/monthrash/thrashers/one.yaml @@ -0,0 +1,9 @@ +overrides: + ceph: + log-whitelist: + - overall HEALTH_ + - \(MON_DOWN\) +tasks: +- mon_thrash: + revive_delay: 20 + thrash_delay: 1 diff --git a/qa/suites/rados/monthrash/thrashers/sync-many.yaml b/qa/suites/rados/monthrash/thrashers/sync-many.yaml new file mode 100644 index 00000000..14f41f7f --- /dev/null +++ b/qa/suites/rados/monthrash/thrashers/sync-many.yaml @@ -0,0 +1,14 @@ +overrides: + ceph: + log-whitelist: + - overall HEALTH_ + - \(MON_DOWN\) + conf: + mon: + paxos min: 10 + paxos trim min: 10 +tasks: +- mon_thrash: + revive_delay: 90 + thrash_delay: 1 + thrash_many: true diff --git a/qa/suites/rados/monthrash/thrashers/sync.yaml b/qa/suites/rados/monthrash/thrashers/sync.yaml new file mode 100644 index 00000000..08b1522c --- /dev/null +++ b/qa/suites/rados/monthrash/thrashers/sync.yaml @@ -0,0 +1,13 @@ +overrides: + ceph: + log-whitelist: + - overall HEALTH_ + - \(MON_DOWN\) + conf: + mon: + paxos min: 10 + paxos trim min: 10 +tasks: +- mon_thrash: + revive_delay: 90 + thrash_delay: 1 diff --git a/qa/suites/rados/monthrash/workloads/.qa b/qa/suites/rados/monthrash/workloads/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/monthrash/workloads/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/monthrash/workloads/pool-create-delete.yaml b/qa/suites/rados/monthrash/workloads/pool-create-delete.yaml new file mode 100644 index 00000000..c6b00b48 --- /dev/null +++ b/qa/suites/rados/monthrash/workloads/pool-create-delete.yaml @@ -0,0 +1,58 @@ +overrides: + ceph: + log-whitelist: + - slow request + - overall HEALTH_ + - \(POOL_APP_NOT_ENABLED\) +tasks: +- exec: + client.0: + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel + - ceph_test_rados_delete_pools_parallel diff --git a/qa/suites/rados/monthrash/workloads/rados_5925.yaml b/qa/suites/rados/monthrash/workloads/rados_5925.yaml new file mode 100644 index 00000000..940d3a8e --- /dev/null +++ b/qa/suites/rados/monthrash/workloads/rados_5925.yaml @@ -0,0 +1,9 @@ +overrides: + ceph: + log-whitelist: + - overall HEALTH_ + - \(POOL_APP_NOT_ENABLED\) +tasks: +- exec: + client.0: + - ceph_test_rados_delete_pools_parallel --debug_objecter 20 --debug_ms 1 --debug_rados 20 --debug_monc 20 diff --git a/qa/suites/rados/monthrash/workloads/rados_api_tests.yaml b/qa/suites/rados/monthrash/workloads/rados_api_tests.yaml new file mode 100644 index 00000000..f0bd5685 --- /dev/null +++ b/qa/suites/rados/monthrash/workloads/rados_api_tests.yaml @@ -0,0 +1,26 @@ +overrides: + ceph: + log-whitelist: + - reached quota + - overall HEALTH_ + - \(CACHE_POOL_NO_HIT_SET\) + - \(CACHE_POOL_NEAR_FULL\) + - \(POOL_FULL\) + - \(SLOW_OPS\) + - \(MON_DOWN\) + - \(PG_ + - \(POOL_APP_NOT_ENABLED\) + - \(SMALLER_PGP_NUM\) + - slow request + conf: + global: + debug objecter: 20 + debug rados: 20 + debug ms: 1 + mon: + mon warn on pool no app: false +tasks: +- workunit: + clients: + client.0: + - rados/test.sh diff --git a/qa/suites/rados/monthrash/workloads/rados_mon_osdmap_prune.yaml b/qa/suites/rados/monthrash/workloads/rados_mon_osdmap_prune.yaml new file mode 100644 index 00000000..cca902af --- /dev/null +++ b/qa/suites/rados/monthrash/workloads/rados_mon_osdmap_prune.yaml @@ -0,0 +1,22 @@ +overrides: + ceph: + conf: + mon: + mon debug extra checks: true + mon min osdmap epochs: 100 + mon osdmap full prune enabled: true + mon osdmap full prune min: 200 + mon osdmap full prune interval: 10 + mon osdmap full prune txsize: 100 + osd: + osd beacon report interval: 10 + log-whitelist: + # setting/unsetting noup will trigger health warns, + # causing tests to fail due to health warns, even if + # the tests themselves are successful. + - \(OSDMAP_FLAGS\) +tasks: +- workunit: + clients: + client.0: + - mon/test_mon_osdmap_prune.sh diff --git a/qa/suites/rados/monthrash/workloads/rados_mon_workunits.yaml b/qa/suites/rados/monthrash/workloads/rados_mon_workunits.yaml new file mode 100644 index 00000000..63b88c0d --- /dev/null +++ b/qa/suites/rados/monthrash/workloads/rados_mon_workunits.yaml @@ -0,0 +1,17 @@ +overrides: + ceph: + log-whitelist: + - but it is still running + - overall HEALTH_ + - \(PG_ + - \(MON_DOWN\) + - \(AUTH_BAD_CAPS\) +tasks: +- workunit: + clients: + client.0: + - mon/pool_ops.sh + - mon/crush_ops.sh + - mon/osd.sh + - mon/caps.sh + diff --git a/qa/suites/rados/monthrash/workloads/snaps-few-objects.yaml b/qa/suites/rados/monthrash/workloads/snaps-few-objects.yaml new file mode 100644 index 00000000..aa82d973 --- /dev/null +++ b/qa/suites/rados/monthrash/workloads/snaps-few-objects.yaml @@ -0,0 +1,13 @@ +tasks: +- rados: + clients: [client.0] + ops: 4000 + objects: 50 + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 diff --git a/qa/suites/rados/multimon/% b/qa/suites/rados/multimon/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rados/multimon/.qa b/qa/suites/rados/multimon/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/multimon/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/multimon/clusters/.qa b/qa/suites/rados/multimon/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/multimon/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/multimon/clusters/21.yaml b/qa/suites/rados/multimon/clusters/21.yaml new file mode 100644 index 00000000..aae96866 --- /dev/null +++ b/qa/suites/rados/multimon/clusters/21.yaml @@ -0,0 +1,8 @@ +roles: +- [mon.a, mon.d, mon.g, mon.j, mon.m, mon.p, mon.s] +- [mon.b, mon.e, mon.h, mon.k, mon.n, mon.q, mon.t, mgr.x] +- [mon.c, mon.f, mon.i, mon.l, mon.o, mon.r, mon.u] +openstack: +- volumes: # attached to each instance + count: 1 + size: 10 # GB diff --git a/qa/suites/rados/multimon/clusters/3.yaml b/qa/suites/rados/multimon/clusters/3.yaml new file mode 100644 index 00000000..11adef16 --- /dev/null +++ b/qa/suites/rados/multimon/clusters/3.yaml @@ -0,0 +1,7 @@ +roles: +- [mon.a, mon.c] +- [mon.b, mgr.x] +openstack: +- volumes: # attached to each instance + count: 2 + size: 10 # GB diff --git a/qa/suites/rados/multimon/clusters/6.yaml b/qa/suites/rados/multimon/clusters/6.yaml new file mode 100644 index 00000000..29c74dc7 --- /dev/null +++ b/qa/suites/rados/multimon/clusters/6.yaml @@ -0,0 +1,7 @@ +roles: +- [mon.a, mon.c, mon.e, mgr.x] +- [mon.b, mon.d, mon.f, mgr.y] +openstack: +- volumes: # attached to each instance + count: 1 + size: 10 # GB diff --git a/qa/suites/rados/multimon/clusters/9.yaml b/qa/suites/rados/multimon/clusters/9.yaml new file mode 100644 index 00000000..d5116855 --- /dev/null +++ b/qa/suites/rados/multimon/clusters/9.yaml @@ -0,0 +1,8 @@ +roles: +- [mon.a, mon.d, mon.g] +- [mon.b, mon.e, mon.h, mgr.x] +- [mon.c, mon.f, mon.i] +openstack: +- volumes: # attached to each instance + count: 1 + size: 10 # GB diff --git a/qa/suites/rados/multimon/msgr b/qa/suites/rados/multimon/msgr new file mode 120000 index 00000000..57bee80d --- /dev/null +++ b/qa/suites/rados/multimon/msgr @@ -0,0 +1 @@ +.qa/msgr \ No newline at end of file diff --git a/qa/suites/rados/multimon/msgr-failures/.qa b/qa/suites/rados/multimon/msgr-failures/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/multimon/msgr-failures/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/multimon/msgr-failures/few.yaml b/qa/suites/rados/multimon/msgr-failures/few.yaml new file mode 100644 index 00000000..4326fe23 --- /dev/null +++ b/qa/suites/rados/multimon/msgr-failures/few.yaml @@ -0,0 +1,7 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 5000 + log-whitelist: + - \(OSD_SLOW_PING_TIME diff --git a/qa/suites/rados/multimon/msgr-failures/many.yaml b/qa/suites/rados/multimon/msgr-failures/many.yaml new file mode 100644 index 00000000..ffeb5f68 --- /dev/null +++ b/qa/suites/rados/multimon/msgr-failures/many.yaml @@ -0,0 +1,8 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 1000 + mon mgr beacon grace: 90 + log-whitelist: + - \(OSD_SLOW_PING_TIME diff --git a/qa/suites/rados/multimon/no_pools.yaml b/qa/suites/rados/multimon/no_pools.yaml new file mode 100644 index 00000000..37d50105 --- /dev/null +++ b/qa/suites/rados/multimon/no_pools.yaml @@ -0,0 +1,3 @@ +overrides: + ceph: + create_rbd_pool: false diff --git a/qa/suites/rados/multimon/objectstore b/qa/suites/rados/multimon/objectstore new file mode 120000 index 00000000..c40bd326 --- /dev/null +++ b/qa/suites/rados/multimon/objectstore @@ -0,0 +1 @@ +.qa/objectstore \ No newline at end of file diff --git a/qa/suites/rados/multimon/rados.yaml b/qa/suites/rados/multimon/rados.yaml new file mode 120000 index 00000000..d256979c --- /dev/null +++ b/qa/suites/rados/multimon/rados.yaml @@ -0,0 +1 @@ +.qa/config/rados.yaml \ No newline at end of file diff --git a/qa/suites/rados/multimon/supported-random-distro$ b/qa/suites/rados/multimon/supported-random-distro$ new file mode 120000 index 00000000..7cef21ee --- /dev/null +++ b/qa/suites/rados/multimon/supported-random-distro$ @@ -0,0 +1 @@ +../basic/supported-random-distro$ \ No newline at end of file diff --git a/qa/suites/rados/multimon/tasks/.qa b/qa/suites/rados/multimon/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/multimon/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/multimon/tasks/mon_clock_no_skews.yaml b/qa/suites/rados/multimon/tasks/mon_clock_no_skews.yaml new file mode 100644 index 00000000..a4cea8f3 --- /dev/null +++ b/qa/suites/rados/multimon/tasks/mon_clock_no_skews.yaml @@ -0,0 +1,11 @@ +tasks: +- install: +- ceph: + log-whitelist: + - slow request + - .*clock.*skew.* + - clocks not synchronized + - overall HEALTH_ + - \(MON_CLOCK_SKEW\) +- mon_clock_skew_check: + expect-skew: false diff --git a/qa/suites/rados/multimon/tasks/mon_clock_with_skews.yaml b/qa/suites/rados/multimon/tasks/mon_clock_with_skews.yaml new file mode 100644 index 00000000..abfc3a1c --- /dev/null +++ b/qa/suites/rados/multimon/tasks/mon_clock_with_skews.yaml @@ -0,0 +1,24 @@ +tasks: +- install: +- exec: + mon.b: + - sudo systemctl stop chronyd.service || true + - sudo systemctl stop systemd-timesync.service || true + - sudo systemctl stop ntpd.service || true + - sudo systemctl stop ntp.service || true + - date -u -s @$(expr $(date -u +%s) + 2) +- ceph: + wait-for-healthy: false + log-whitelist: + - .*clock.*skew.* + - clocks not synchronized + - overall HEALTH_ + - \(MON_CLOCK_SKEW\) + - \(MGR_DOWN\) + - \(MON_DOWN\) + - \(PG_ + - \(SLOW_OPS\) + - No standby daemons available + - slow request +- mon_clock_skew_check: + expect-skew: true diff --git a/qa/suites/rados/multimon/tasks/mon_recovery.yaml b/qa/suites/rados/multimon/tasks/mon_recovery.yaml new file mode 100644 index 00000000..14da275e --- /dev/null +++ b/qa/suites/rados/multimon/tasks/mon_recovery.yaml @@ -0,0 +1,10 @@ +tasks: +- install: +- ceph: + log-whitelist: + - overall HEALTH_ + - \(MON_DOWN\) + - \(PG_AVAILABILITY\) + - \(SLOW_OPS\) + - slow request +- mon_recovery: diff --git a/qa/suites/rados/objectstore/% b/qa/suites/rados/objectstore/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rados/objectstore/.qa b/qa/suites/rados/objectstore/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/objectstore/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/objectstore/backends/.qa b/qa/suites/rados/objectstore/backends/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/objectstore/backends/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/objectstore/backends/alloc-hint.yaml b/qa/suites/rados/objectstore/backends/alloc-hint.yaml new file mode 100644 index 00000000..047b02fa --- /dev/null +++ b/qa/suites/rados/objectstore/backends/alloc-hint.yaml @@ -0,0 +1,22 @@ +roles: +- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0] +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB + +overrides: + ceph: + fs: xfs + conf: + osd: + filestore xfs extsize: true + osd objectstore: filestore + +tasks: +- install: +- ceph: +- workunit: + clients: + all: + - rados/test_alloc_hint.sh diff --git a/qa/suites/rados/objectstore/backends/ceph_objectstore_tool.yaml b/qa/suites/rados/objectstore/backends/ceph_objectstore_tool.yaml new file mode 100644 index 00000000..042bd065 --- /dev/null +++ b/qa/suites/rados/objectstore/backends/ceph_objectstore_tool.yaml @@ -0,0 +1,25 @@ +roles: +- [mon.a, mgr.x, osd.0, osd.1, osd.2, osd.3, osd.4, osd.5, client.0] +openstack: +- volumes: # attached to each instance + count: 6 + size: 10 # GB +tasks: +- install: +- ceph: + fs: xfs + conf: + global: + osd max object name len: 460 + osd max object namespace len: 64 + osd: + osd objectstore: filestore + log-whitelist: + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ + - \(TOO_FEW_PGS\) + - \(POOL_APP_NOT_ENABLED\) +- ceph_objectstore_tool: + objects: 20 diff --git a/qa/suites/rados/objectstore/backends/filejournal.yaml b/qa/suites/rados/objectstore/backends/filejournal.yaml new file mode 100644 index 00000000..b0af8008 --- /dev/null +++ b/qa/suites/rados/objectstore/backends/filejournal.yaml @@ -0,0 +1,13 @@ +roles: +- [mon.a, mgr.x, osd.0, osd.1, client.0] +openstack: +- volumes: # attached to each instance + count: 2 + size: 10 # GB +tasks: +- install: +- ceph: + fs: xfs +- exec: + client.0: + - ceph_test_filejournal diff --git a/qa/suites/rados/objectstore/backends/filestore-idempotent-aio-journal.yaml b/qa/suites/rados/objectstore/backends/filestore-idempotent-aio-journal.yaml new file mode 100644 index 00000000..58b5197d --- /dev/null +++ b/qa/suites/rados/objectstore/backends/filestore-idempotent-aio-journal.yaml @@ -0,0 +1,14 @@ +roles: +- [mon.a, mgr.x, osd.0, osd.1, client.0] +openstack: +- volumes: # attached to each instance + count: 2 + size: 10 # GB +tasks: +- install: +- ceph: + fs: xfs + conf: + global: + journal aio: true +- filestore_idempotent: diff --git a/qa/suites/rados/objectstore/backends/filestore-idempotent.yaml b/qa/suites/rados/objectstore/backends/filestore-idempotent.yaml new file mode 100644 index 00000000..2d3f3c69 --- /dev/null +++ b/qa/suites/rados/objectstore/backends/filestore-idempotent.yaml @@ -0,0 +1,11 @@ +roles: +- [mon.a, mgr.x, osd.0, osd.1, client.0] +openstack: +- volumes: # attached to each instance + count: 2 + size: 10 # GB +tasks: +- install: +- ceph: + fs: xfs +- filestore_idempotent: diff --git a/qa/suites/rados/objectstore/backends/fusestore.yaml b/qa/suites/rados/objectstore/backends/fusestore.yaml new file mode 100644 index 00000000..1c34fcae --- /dev/null +++ b/qa/suites/rados/objectstore/backends/fusestore.yaml @@ -0,0 +1,9 @@ +roles: +- [mon.a, mgr.x, osd.0, osd.1, client.0] +tasks: +- install: +- workunit: + clients: + all: + - objectstore/test_fuse.sh + diff --git a/qa/suites/rados/objectstore/backends/keyvaluedb.yaml b/qa/suites/rados/objectstore/backends/keyvaluedb.yaml new file mode 100644 index 00000000..efff8d37 --- /dev/null +++ b/qa/suites/rados/objectstore/backends/keyvaluedb.yaml @@ -0,0 +1,8 @@ +roles: +- [mon.a, mgr.x, osd.0, osd.1, client.0] +tasks: +- install: +- exec: + client.0: + - mkdir $TESTDIR/kvtest && cd $TESTDIR/kvtest && ceph_test_keyvaluedb + - rm -rf $TESTDIR/kvtest diff --git a/qa/suites/rados/objectstore/backends/objectcacher-stress.yaml b/qa/suites/rados/objectstore/backends/objectcacher-stress.yaml new file mode 100644 index 00000000..e407a391 --- /dev/null +++ b/qa/suites/rados/objectstore/backends/objectcacher-stress.yaml @@ -0,0 +1,14 @@ +roles: +- [mon.a, mgr.x, osd.0, osd.1, client.0] +openstack: +- volumes: # attached to each instance + count: 2 + size: 10 # GB +tasks: +- install: +- ceph: + fs: xfs +- workunit: + clients: + all: + - osdc/stress_objectcacher.sh diff --git a/qa/suites/rados/objectstore/backends/objectstore.yaml b/qa/suites/rados/objectstore/backends/objectstore.yaml new file mode 100644 index 00000000..d68270a8 --- /dev/null +++ b/qa/suites/rados/objectstore/backends/objectstore.yaml @@ -0,0 +1,12 @@ +roles: +- [mon.a, mgr.x, osd.0, osd.1, client.0] +openstack: +- volumes: # attached to each instance + count: 2 + size: 10 # GB +tasks: +- install: +- exec: + client.0: + - mkdir $TESTDIR/archive/ostest && cd $TESTDIR/archive/ostest && ulimit -Sn 16384 && CEPH_ARGS="--no-log-to-stderr --log-file $TESTDIR/archive/ceph_test_objectstore.log --debug-filestore 20 --debug-bluestore 20" ceph_test_objectstore --gtest_filter=-*/3 --gtest_catch_exceptions=0 + - rm -rf $TESTDIR/archive/ostest diff --git a/qa/suites/rados/objectstore/supported-random-distro$ b/qa/suites/rados/objectstore/supported-random-distro$ new file mode 120000 index 00000000..7cef21ee --- /dev/null +++ b/qa/suites/rados/objectstore/supported-random-distro$ @@ -0,0 +1 @@ +../basic/supported-random-distro$ \ No newline at end of file diff --git a/qa/suites/rados/perf/% b/qa/suites/rados/perf/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rados/perf/.qa b/qa/suites/rados/perf/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/perf/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/perf/ceph.yaml b/qa/suites/rados/perf/ceph.yaml new file mode 100644 index 00000000..912dcbdc --- /dev/null +++ b/qa/suites/rados/perf/ceph.yaml @@ -0,0 +1,14 @@ +roles: +- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0] +tasks: +- install: +- ceph: + fs: xfs + wait-for-scrub: false + log-whitelist: + - \(PG_ + - \(OSD_ + - \(OBJECT_ + - overall HEALTH +- rgw: [client.0] +- ssh_keys: diff --git a/qa/suites/rados/perf/distros/ubuntu_16.04.yaml b/qa/suites/rados/perf/distros/ubuntu_16.04.yaml new file mode 120000 index 00000000..a92e4060 --- /dev/null +++ b/qa/suites/rados/perf/distros/ubuntu_16.04.yaml @@ -0,0 +1 @@ +../../../../distros/supported-all-distro/ubuntu_16.04.yaml \ No newline at end of file diff --git a/qa/suites/rados/perf/distros/ubuntu_latest.yaml b/qa/suites/rados/perf/distros/ubuntu_latest.yaml new file mode 120000 index 00000000..f4d73c11 --- /dev/null +++ b/qa/suites/rados/perf/distros/ubuntu_latest.yaml @@ -0,0 +1 @@ +../../../../distros/supported-all-distro/ubuntu_latest.yaml \ No newline at end of file diff --git a/qa/suites/rados/perf/objectstore b/qa/suites/rados/perf/objectstore new file mode 120000 index 00000000..c40bd326 --- /dev/null +++ b/qa/suites/rados/perf/objectstore @@ -0,0 +1 @@ +.qa/objectstore \ No newline at end of file diff --git a/qa/suites/rados/perf/openstack.yaml b/qa/suites/rados/perf/openstack.yaml new file mode 100644 index 00000000..f4d1349b --- /dev/null +++ b/qa/suites/rados/perf/openstack.yaml @@ -0,0 +1,4 @@ +openstack: + - volumes: # attached to each instance + count: 3 + size: 30 # GB diff --git a/qa/suites/rados/perf/settings/.qa b/qa/suites/rados/perf/settings/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/perf/settings/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/perf/settings/optimized.yaml b/qa/suites/rados/perf/settings/optimized.yaml new file mode 100644 index 00000000..5ebcb3ae --- /dev/null +++ b/qa/suites/rados/perf/settings/optimized.yaml @@ -0,0 +1,76 @@ +overrides: + ceph: + conf: + mon: + debug mon: "0/0" + debug ms: "0/0" + debug paxos: "0/0" + osd: + debug filestore: "0/0" + debug journal: "0/0" + debug ms: "0/0" + debug osd: "0/0" + global: + auth client required: none + auth cluster required: none + auth service required: none + auth supported: none + + debug lockdep: "0/0" + debug context: "0/0" + debug crush: "0/0" + debug mds: "0/0" + debug mds balancer: "0/0" + debug mds locker: "0/0" + debug mds log: "0/0" + debug mds log expire: "0/0" + debug mds migrator: "0/0" + debug buffer: "0/0" + debug timer: "0/0" + debug filer: "0/0" + debug striper: "0/0" + debug objecter: "0/0" + debug rados: "0/0" + debug rbd: "0/0" + debug rbd mirror: "0/0" + debug rbd replay: "0/0" + debug journaler: "0/0" + debug objectcacher: "0/0" + debug client: "0/0" + debug osd: "0/0" + debug optracker: "0/0" + debug objclass: "0/0" + debug filestore: "0/0" + debug journal: "0/0" + debug ms: "0/0" + debug mon: "0/0" + debug monc: "0/0" + debug paxos: "0/0" + debug tp: "0/0" + debug auth: "0/0" + debug crypto: "0/0" + debug finisher: "0/0" + debug heartbeatmap: "0/0" + debug perfcounter: "0/0" + debug rgw: "0/0" + debug rgw sync: "0/0" + debug civetweb: "0/0" + debug javaclient: "0/0" + debug asok: "0/0" + debug throttle: "0/0" + debug refs: "0/0" + debug xio: "0/0" + debug compressor: "0/0" + debug bluestore: "0/0" + debug bluefs: "0/0" + debug bdev: "0/0" + debug kstore: "0/0" + debug rocksdb: "0/0" + debug leveldb: "0/0" + debug memdb: "0/0" + debug kinetic: "0/0" + debug fuse: "0/0" + debug mgr: "0/0" + debug mgrc: "0/0" + debug dpdk: "0/0" + debug eventtrace: "0/0" diff --git a/qa/suites/rados/perf/workloads/.qa b/qa/suites/rados/perf/workloads/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/perf/workloads/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/perf/workloads/cosbench_64K_read_write.yaml b/qa/suites/rados/perf/workloads/cosbench_64K_read_write.yaml new file mode 100644 index 00000000..e5bad129 --- /dev/null +++ b/qa/suites/rados/perf/workloads/cosbench_64K_read_write.yaml @@ -0,0 +1,26 @@ +overrides: + rgw: + data_pool_pg_size: 64 + index_pool_pg_size: 64 +tasks: +- cbt: + branch: 'nautilus' + benchmarks: + cosbench: + obj_size: [64KB] + osd_ra: [4096] + workers: 1 + containers_max: 1000 + objects_max: 100 + mode: [mix] + template: [default] + rampup: 30 + runtime: 300 + rampdown: 30 + containers: ["u(1,100)"] + objects: ["u(1,100)"] + ratio: [60] + cluster: + user: 'ubuntu' + osds_per_node: 1 + iterations: 1 diff --git a/qa/suites/rados/perf/workloads/cosbench_64K_write.yaml b/qa/suites/rados/perf/workloads/cosbench_64K_write.yaml new file mode 100644 index 00000000..81973da2 --- /dev/null +++ b/qa/suites/rados/perf/workloads/cosbench_64K_write.yaml @@ -0,0 +1,26 @@ +overrides: + rgw: + data_pool_pg_size: 64 + index_pool_pg_size: 64 +tasks: +- cbt: + branch: 'nautilus' + benchmarks: + cosbench: + obj_size: [64KB] + osd_ra: [4096] + workers: 1 + containers_max: 1000 + objects_max: 100 + mode: [write] + template: [default] + rampup: 30 + runtime: 300 + rampdown: 30 + containers: ["u(1,100)"] + objects: ["u(1,100)"] + ratio: [100] + cluster: + user: 'ubuntu' + osds_per_node: 1 + iterations: 1 diff --git a/qa/suites/rados/perf/workloads/fio_4K_rand_read.yaml b/qa/suites/rados/perf/workloads/fio_4K_rand_read.yaml new file mode 100644 index 00000000..61fdb5a9 --- /dev/null +++ b/qa/suites/rados/perf/workloads/fio_4K_rand_read.yaml @@ -0,0 +1,25 @@ +tasks: +- cbt: + branch: 'nautilus' + benchmarks: + librbdfio: + op_size: [4096] + time: 60 + mode: ['randread'] + norandommap: True + vol_size: 4096 + procs_per_volume: [1] + volumes_per_client: [2] + iodepth: [32] + osd_ra: [4096] + pool_profile: 'rbd' + log_avg_msec: 100 + cluster: + user: 'ubuntu' + osds_per_node: 3 + iterations: 1 + pool_profiles: + rbd: + pg_size: 128 + pgp_size: 128 + replication: 3 diff --git a/qa/suites/rados/perf/workloads/fio_4K_rand_rw.yaml b/qa/suites/rados/perf/workloads/fio_4K_rand_rw.yaml new file mode 100644 index 00000000..9de8196b --- /dev/null +++ b/qa/suites/rados/perf/workloads/fio_4K_rand_rw.yaml @@ -0,0 +1,25 @@ +tasks: +- cbt: + branch: 'nautilus' + benchmarks: + librbdfio: + op_size: [4096] + time: 60 + mode: ['randrw'] + norandommap: True + vol_size: 4096 + procs_per_volume: [1] + volumes_per_client: [2] + iodepth: [32] + osd_ra: [4096] + pool_profile: 'rbd' + log_avg_msec: 100 + cluster: + user: 'ubuntu' + osds_per_node: 3 + iterations: 1 + pool_profiles: + rbd: + pg_size: 128 + pgp_size: 128 + replication: 3 diff --git a/qa/suites/rados/perf/workloads/fio_4M_rand_read.yaml b/qa/suites/rados/perf/workloads/fio_4M_rand_read.yaml new file mode 100644 index 00000000..f64e9bb8 --- /dev/null +++ b/qa/suites/rados/perf/workloads/fio_4M_rand_read.yaml @@ -0,0 +1,25 @@ +tasks: +- cbt: + branch: 'nautilus' + benchmarks: + librbdfio: + op_size: [4194304] + time: 60 + mode: ['randread'] + norandommap: True + vol_size: 4096 + procs_per_volume: [1] + volumes_per_client: [2] + iodepth: [32] + osd_ra: [4096] + pool_profile: 'rbd' + log_avg_msec: 100 + cluster: + user: 'ubuntu' + osds_per_node: 3 + iterations: 1 + pool_profiles: + rbd: + pg_size: 128 + pgp_size: 128 + replication: 3 diff --git a/qa/suites/rados/perf/workloads/fio_4M_rand_rw.yaml b/qa/suites/rados/perf/workloads/fio_4M_rand_rw.yaml new file mode 100644 index 00000000..369fb268 --- /dev/null +++ b/qa/suites/rados/perf/workloads/fio_4M_rand_rw.yaml @@ -0,0 +1,25 @@ +tasks: +- cbt: + branch: 'nautilus' + benchmarks: + librbdfio: + op_size: [4194304] + time: 60 + mode: ['randrw'] + norandommap: True + vol_size: 4096 + procs_per_volume: [1] + volumes_per_client: [2] + iodepth: [32] + osd_ra: [4096] + pool_profile: 'rbd' + log_avg_msec: 100 + cluster: + user: 'ubuntu' + osds_per_node: 3 + iterations: 1 + pool_profiles: + rbd: + pg_size: 128 + pgp_size: 128 + replication: 3 diff --git a/qa/suites/rados/perf/workloads/fio_4M_rand_write.yaml b/qa/suites/rados/perf/workloads/fio_4M_rand_write.yaml new file mode 100644 index 00000000..2ac8ef9e --- /dev/null +++ b/qa/suites/rados/perf/workloads/fio_4M_rand_write.yaml @@ -0,0 +1,25 @@ +tasks: +- cbt: + branch: 'nautilus' + benchmarks: + librbdfio: + op_size: [4194304] + time: 60 + mode: ['randwrite'] + norandommap: True + vol_size: 4096 + procs_per_volume: [1] + volumes_per_client: [2] + iodepth: [32] + osd_ra: [4096] + pool_profile: 'rbd' + log_avg_msec: 100 + cluster: + user: 'ubuntu' + osds_per_node: 3 + iterations: 1 + pool_profiles: + rbd: + pg_size: 128 + pgp_size: 128 + replication: 3 diff --git a/qa/suites/rados/perf/workloads/radosbench_4K_rand_read.yaml b/qa/suites/rados/perf/workloads/radosbench_4K_rand_read.yaml new file mode 100644 index 00000000..420f1950 --- /dev/null +++ b/qa/suites/rados/perf/workloads/radosbench_4K_rand_read.yaml @@ -0,0 +1,25 @@ +tasks: +- cbt: + branch: 'nautilus' + benchmarks: + radosbench: + concurrent_ops: 4 + concurrent_procs: 2 + op_size: [4096] + pool_monitoring_list: + - collectl + pool_profile: 'replicated' + run_monitoring_list: + - collectl + time: 60 + write_only: false + readmode: 'rand' + cluster: + user: 'ubuntu' + osds_per_node: 3 + iterations: 1 + pool_profiles: + replicated: + pg_size: 256 + pgp_size: 256 + replication: 'replicated' diff --git a/qa/suites/rados/perf/workloads/radosbench_4K_seq_read.yaml b/qa/suites/rados/perf/workloads/radosbench_4K_seq_read.yaml new file mode 100644 index 00000000..e32cabaa --- /dev/null +++ b/qa/suites/rados/perf/workloads/radosbench_4K_seq_read.yaml @@ -0,0 +1,24 @@ +tasks: +- cbt: + branch: 'nautilus' + benchmarks: + radosbench: + concurrent_ops: 4 + concurrent_procs: 2 + op_size: [4096] + pool_monitoring_list: + - collectl + pool_profile: 'replicated' + run_monitoring_list: + - collectl + time: 60 + write_only: false + cluster: + user: 'ubuntu' + osds_per_node: 3 + iterations: 1 + pool_profiles: + replicated: + pg_size: 256 + pgp_size: 256 + replication: 'replicated' diff --git a/qa/suites/rados/perf/workloads/radosbench_4M_rand_read.yaml b/qa/suites/rados/perf/workloads/radosbench_4M_rand_read.yaml new file mode 100644 index 00000000..27b2466a --- /dev/null +++ b/qa/suites/rados/perf/workloads/radosbench_4M_rand_read.yaml @@ -0,0 +1,25 @@ +tasks: +- cbt: + branch: 'nautilus' + benchmarks: + radosbench: + concurrent_ops: 4 + concurrent_procs: 2 + op_size: [4194304] + pool_monitoring_list: + - collectl + pool_profile: 'replicated' + run_monitoring_list: + - collectl + time: 60 + write_only: false + readmode: 'rand' + cluster: + user: 'ubuntu' + osds_per_node: 3 + iterations: 1 + pool_profiles: + replicated: + pg_size: 256 + pgp_size: 256 + replication: 'replicated' diff --git a/qa/suites/rados/perf/workloads/radosbench_4M_seq_read.yaml b/qa/suites/rados/perf/workloads/radosbench_4M_seq_read.yaml new file mode 100644 index 00000000..a7107d85 --- /dev/null +++ b/qa/suites/rados/perf/workloads/radosbench_4M_seq_read.yaml @@ -0,0 +1,24 @@ +tasks: +- cbt: + branch: 'nautilus' + benchmarks: + radosbench: + concurrent_ops: 4 + concurrent_procs: 2 + op_size: [4194304] + pool_monitoring_list: + - collectl + pool_profile: 'replicated' + run_monitoring_list: + - collectl + time: 60 + write_only: false + cluster: + user: 'ubuntu' + osds_per_node: 3 + iterations: 1 + pool_profiles: + replicated: + pg_size: 256 + pgp_size: 256 + replication: 'replicated' diff --git a/qa/suites/rados/perf/workloads/radosbench_4M_write.yaml b/qa/suites/rados/perf/workloads/radosbench_4M_write.yaml new file mode 100644 index 00000000..0465e124 --- /dev/null +++ b/qa/suites/rados/perf/workloads/radosbench_4M_write.yaml @@ -0,0 +1,24 @@ +tasks: +- cbt: + branch: 'nautilus' + benchmarks: + radosbench: + concurrent_ops: 4 + concurrent_procs: 2 + op_size: [4194304] + pool_monitoring_list: + - collectl + pool_profile: 'replicated' + run_monitoring_list: + - collectl + time: 60 + write_only: true + cluster: + user: 'ubuntu' + osds_per_node: 3 + iterations: 1 + pool_profiles: + replicated: + pg_size: 256 + pgp_size: 256 + replication: 'replicated' diff --git a/qa/suites/rados/perf/workloads/sample_fio.yaml b/qa/suites/rados/perf/workloads/sample_fio.yaml new file mode 100644 index 00000000..c4bb2422 --- /dev/null +++ b/qa/suites/rados/perf/workloads/sample_fio.yaml @@ -0,0 +1,25 @@ +tasks: +- cbt: + branch: 'nautilus' + benchmarks: + librbdfio: + op_size: [4096] + time: 60 + mode: ['randwrite'] + norandommap: True + vol_size: 4096 + procs_per_volume: [1] + volumes_per_client: [2] + iodepth: [32] + osd_ra: [4096] + pool_profile: 'rbd' + log_avg_msec: 100 + cluster: + user: 'ubuntu' + osds_per_node: 3 + iterations: 1 + pool_profiles: + rbd: + pg_size: 128 + pgp_size: 128 + replication: 3 diff --git a/qa/suites/rados/perf/workloads/sample_radosbench.yaml b/qa/suites/rados/perf/workloads/sample_radosbench.yaml new file mode 100644 index 00000000..9d9f0ac2 --- /dev/null +++ b/qa/suites/rados/perf/workloads/sample_radosbench.yaml @@ -0,0 +1,24 @@ +tasks: +- cbt: + branch: 'nautilus' + benchmarks: + radosbench: + concurrent_ops: 4 + concurrent_procs: 2 + op_size: [4096] + pool_monitoring_list: + - collectl + pool_profile: 'replicated' + run_monitoring_list: + - collectl + time: 60 + write_only: true + cluster: + user: 'ubuntu' + osds_per_node: 3 + iterations: 1 + pool_profiles: + replicated: + pg_size: 256 + pgp_size: 256 + replication: 'replicated' diff --git a/qa/suites/rados/rest/% b/qa/suites/rados/rest/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rados/rest/.qa b/qa/suites/rados/rest/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/rest/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/rest/mgr-restful.yaml b/qa/suites/rados/rest/mgr-restful.yaml new file mode 100644 index 00000000..16653fc9 --- /dev/null +++ b/qa/suites/rados/rest/mgr-restful.yaml @@ -0,0 +1,29 @@ +openstack: +- volumes: # attached to each instance + count: 3 + size: 10 # GB +roles: +- [mon.a, mgr.x, osd.0, osd.1, osd.2, mds.a, client.a] +tasks: +- install: +- ceph: + log-whitelist: + - overall HEALTH_ + - \(MGR_DOWN\) + - \(PG_ + - \(OSD_ + - \(OBJECT_ +- exec: + mon.a: + - ceph restful create-key admin + - ceph restful create-self-signed-cert + - ceph restful restart +- workunit: + clients: + client.a: + - rest/test-restful.sh +- exec: + mon.a: + - ceph restful delete-key admin + - ceph restful list-keys | jq ".admin" | grep null + diff --git a/qa/suites/rados/rest/supported-random-distro$ b/qa/suites/rados/rest/supported-random-distro$ new file mode 120000 index 00000000..7cef21ee --- /dev/null +++ b/qa/suites/rados/rest/supported-random-distro$ @@ -0,0 +1 @@ +../basic/supported-random-distro$ \ No newline at end of file diff --git a/qa/suites/rados/singleton-bluestore/% b/qa/suites/rados/singleton-bluestore/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rados/singleton-bluestore/.qa b/qa/suites/rados/singleton-bluestore/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/singleton-bluestore/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/singleton-bluestore/all/.qa b/qa/suites/rados/singleton-bluestore/all/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/singleton-bluestore/all/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/singleton-bluestore/all/cephtool.yaml b/qa/suites/rados/singleton-bluestore/all/cephtool.yaml new file mode 100644 index 00000000..0567b603 --- /dev/null +++ b/qa/suites/rados/singleton-bluestore/all/cephtool.yaml @@ -0,0 +1,44 @@ +roles: +- - mon.a + - mon.b + - mon.c + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - client.0 +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +tasks: +- install: +- ceph: + log-whitelist: + - but it is still running + - had wrong client addr + - had wrong cluster addr + - must scrub before tier agent can activate + - failsafe engaged, dropping updates + - failsafe disengaged, no longer dropping updates + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ + - \(SMALLER_PG_NUM\) + - \(SMALLER_PGP_NUM\) + - \(CACHE_POOL_NO_HIT_SET\) + - \(CACHE_POOL_NEAR_FULL\) + - \(FS_WITH_FAILED_MDS\) + - \(FS_DEGRADED\) + - \(POOL_BACKFILLFULL\) + - \(POOL_FULL\) + - \(SMALLER_PGP_NUM\) + - \(POOL_NEARFULL\) + - \(POOL_APP_NOT_ENABLED\) + - \(AUTH_BAD_CAPS\) +- workunit: + clients: + all: + - cephtool + - mon/pool_ops.sh diff --git a/qa/suites/rados/singleton-bluestore/msgr b/qa/suites/rados/singleton-bluestore/msgr new file mode 120000 index 00000000..57bee80d --- /dev/null +++ b/qa/suites/rados/singleton-bluestore/msgr @@ -0,0 +1 @@ +.qa/msgr \ No newline at end of file diff --git a/qa/suites/rados/singleton-bluestore/msgr-failures/.qa b/qa/suites/rados/singleton-bluestore/msgr-failures/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/singleton-bluestore/msgr-failures/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/singleton-bluestore/msgr-failures/few.yaml b/qa/suites/rados/singleton-bluestore/msgr-failures/few.yaml new file mode 100644 index 00000000..4326fe23 --- /dev/null +++ b/qa/suites/rados/singleton-bluestore/msgr-failures/few.yaml @@ -0,0 +1,7 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 5000 + log-whitelist: + - \(OSD_SLOW_PING_TIME diff --git a/qa/suites/rados/singleton-bluestore/msgr-failures/many.yaml b/qa/suites/rados/singleton-bluestore/msgr-failures/many.yaml new file mode 100644 index 00000000..59ca5c0f --- /dev/null +++ b/qa/suites/rados/singleton-bluestore/msgr-failures/many.yaml @@ -0,0 +1,7 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 1000 + log-whitelist: + - \(OSD_SLOW_PING_TIME diff --git a/qa/suites/rados/singleton-bluestore/objectstore/.qa b/qa/suites/rados/singleton-bluestore/objectstore/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/singleton-bluestore/objectstore/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/singleton-bluestore/objectstore/bluestore-bitmap.yaml b/qa/suites/rados/singleton-bluestore/objectstore/bluestore-bitmap.yaml new file mode 120000 index 00000000..a59cf517 --- /dev/null +++ b/qa/suites/rados/singleton-bluestore/objectstore/bluestore-bitmap.yaml @@ -0,0 +1 @@ +.qa/objectstore/bluestore-bitmap.yaml \ No newline at end of file diff --git a/qa/suites/rados/singleton-bluestore/objectstore/bluestore-comp-lz4.yaml b/qa/suites/rados/singleton-bluestore/objectstore/bluestore-comp-lz4.yaml new file mode 120000 index 00000000..4fb2ff6c --- /dev/null +++ b/qa/suites/rados/singleton-bluestore/objectstore/bluestore-comp-lz4.yaml @@ -0,0 +1 @@ +.qa/objectstore/bluestore-comp-lz4.yaml \ No newline at end of file diff --git a/qa/suites/rados/singleton-bluestore/objectstore/bluestore-comp-snappy.yaml b/qa/suites/rados/singleton-bluestore/objectstore/bluestore-comp-snappy.yaml new file mode 120000 index 00000000..888caf55 --- /dev/null +++ b/qa/suites/rados/singleton-bluestore/objectstore/bluestore-comp-snappy.yaml @@ -0,0 +1 @@ +.qa/objectstore/bluestore-comp-snappy.yaml \ No newline at end of file diff --git a/qa/suites/rados/singleton-bluestore/rados.yaml b/qa/suites/rados/singleton-bluestore/rados.yaml new file mode 120000 index 00000000..d256979c --- /dev/null +++ b/qa/suites/rados/singleton-bluestore/rados.yaml @@ -0,0 +1 @@ +.qa/config/rados.yaml \ No newline at end of file diff --git a/qa/suites/rados/singleton-bluestore/supported-random-distro$ b/qa/suites/rados/singleton-bluestore/supported-random-distro$ new file mode 120000 index 00000000..7cef21ee --- /dev/null +++ b/qa/suites/rados/singleton-bluestore/supported-random-distro$ @@ -0,0 +1 @@ +../basic/supported-random-distro$ \ No newline at end of file diff --git a/qa/suites/rados/singleton-flat/.qa b/qa/suites/rados/singleton-flat/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/singleton-flat/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/singleton-flat/valgrind-leaks.yaml b/qa/suites/rados/singleton-flat/valgrind-leaks.yaml new file mode 100644 index 00000000..d3180d9b --- /dev/null +++ b/qa/suites/rados/singleton-flat/valgrind-leaks.yaml @@ -0,0 +1,36 @@ +# see http://tracker.ceph.com/issues/20360 and http://tracker.ceph.com/issues/18126 +os_type: centos +os_version: '7.8' + +openstack: + - volumes: # attached to each instance + count: 2 + size: 10 # GB + +overrides: + install: + ceph: + debuginfo: true + ceph: + log-whitelist: + - overall HEALTH_ + - \(PG_ + conf: + global: + osd heartbeat grace: 40 + debug deliberately leak memory: true + osd max object name len: 460 + osd max object namespace len: 64 + mon: + mon osd crush smoke test: false + osd: + osd fast shutdown: false + valgrind: + mon: [--tool=memcheck, --leak-check=full, --show-reachable=yes] + osd: [--tool=memcheck] +roles: +- [mon.a, mgr.x, osd.0, osd.1, client.0] +tasks: +- install: +- ceph: + expect_valgrind_errors: true diff --git a/qa/suites/rados/singleton-nomsgr/% b/qa/suites/rados/singleton-nomsgr/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rados/singleton-nomsgr/.qa b/qa/suites/rados/singleton-nomsgr/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/singleton-nomsgr/all/.qa b/qa/suites/rados/singleton-nomsgr/all/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/singleton-nomsgr/all/admin_socket_output.yaml b/qa/suites/rados/singleton-nomsgr/all/admin_socket_output.yaml new file mode 100644 index 00000000..49f06b9a --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/admin_socket_output.yaml @@ -0,0 +1,24 @@ +openstack: + - volumes: # attached to each instance + count: 2 + size: 10 # GB +roles: +- [mon.a, mds.a, mgr.x, osd.0, osd.1, client.0] +overrides: + ceph: + log-whitelist: + - MDS in read-only mode + - force file system read-only + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_FULL\) + - \(MDS_READ_ONLY\) + - \(POOL_FULL\) +tasks: +- install: +- ceph: +- rgw: + - client.0 +- exec: + client.0: + - ceph_test_admin_socket_output --all diff --git a/qa/suites/rados/singleton-nomsgr/all/balancer.yaml b/qa/suites/rados/singleton-nomsgr/all/balancer.yaml new file mode 100644 index 00000000..75410508 --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/balancer.yaml @@ -0,0 +1,10 @@ +roles: +- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0] +tasks: +- install: +- ceph: + fs: xfs +- cram: + clients: + client.0: + - src/test/cli-integration/balancer/misplaced.t diff --git a/qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml b/qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml new file mode 100644 index 00000000..0a4bc498 --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/cache-fs-trunc.yaml @@ -0,0 +1,52 @@ +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +roles: +- [mon.a, mgr.x, mds.a, osd.0, osd.1, osd.2, client.0, client.1] +tasks: +- install: +- ceph: + log-whitelist: + - overall HEALTH_ + - \(CACHE_POOL_NO_HIT_SET\) + conf: + global: + osd max object name len: 460 + osd max object namespace len: 64 + debug client: 20 + debug mds: 20 + debug ms: 1 +- exec: + client.0: + - ceph osd pool create data_cache 4 + - ceph osd tier add cephfs_data data_cache + - ceph osd tier cache-mode data_cache writeback + - ceph osd tier set-overlay cephfs_data data_cache + - ceph osd pool set data_cache hit_set_type bloom + - ceph osd pool set data_cache hit_set_count 8 + - ceph osd pool set data_cache hit_set_period 3600 + - ceph osd pool set data_cache min_read_recency_for_promote 0 +- ceph-fuse: +- exec: + client.0: + - sudo chmod 777 $TESTDIR/mnt.0/ + - dd if=/dev/urandom of=$TESTDIR/mnt.0/foo bs=1M count=5 + - ls -al $TESTDIR/mnt.0/foo + - truncate --size 0 $TESTDIR/mnt.0/foo + - ls -al $TESTDIR/mnt.0/foo + - dd if=/dev/urandom of=$TESTDIR/mnt.0/foo bs=1M count=5 + - ls -al $TESTDIR/mnt.0/foo + - cp $TESTDIR/mnt.0/foo /tmp/foo + - sync + - rados -p data_cache ls - + - sleep 10 + - rados -p data_cache ls - + - rados -p data_cache cache-flush-evict-all + - rados -p data_cache ls - + - sleep 1 +- exec: + client.1: + - hexdump -C /tmp/foo | head + - hexdump -C $TESTDIR/mnt.1/foo | head + - cmp $TESTDIR/mnt.1/foo /tmp/foo diff --git a/qa/suites/rados/singleton-nomsgr/all/ceph-kvstore-tool.yaml b/qa/suites/rados/singleton-nomsgr/all/ceph-kvstore-tool.yaml new file mode 100644 index 00000000..a386e74e --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/ceph-kvstore-tool.yaml @@ -0,0 +1,21 @@ +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +roles: +- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0] + +overrides: + ceph: + log-whitelist: + - but it is still running + - overall HEALTH_ + - \(POOL_APP_NOT_ENABLED\) + +tasks: +- install: +- ceph: +- workunit: + clients: + all: + - cephtool/test_kvstore_tool.sh diff --git a/qa/suites/rados/singleton-nomsgr/all/ceph-post-file.yaml b/qa/suites/rados/singleton-nomsgr/all/ceph-post-file.yaml new file mode 100644 index 00000000..530dc42a --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/ceph-post-file.yaml @@ -0,0 +1,12 @@ +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +roles: +- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0] +tasks: +- install: +- workunit: + clients: + all: + - post-file.sh diff --git a/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml b/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml new file mode 100644 index 00000000..e0887b85 --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/export-after-evict.yaml @@ -0,0 +1,38 @@ +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - client.0 +tasks: +- install: +- ceph: + log-whitelist: + - overall HEALTH_ + - \(CACHE_POOL_NO_HIT_SET\) + conf: + global: + osd max object name len: 460 + osd max object namespace len: 64 +- exec: + client.0: + - ceph osd pool create base-pool 4 + - ceph osd pool application enable base-pool rados + - ceph osd pool create cache-pool 4 + - ceph osd tier add base-pool cache-pool + - ceph osd tier cache-mode cache-pool writeback + - ceph osd tier set-overlay base-pool cache-pool + - dd if=/dev/urandom of=$TESTDIR/foo bs=1M count=1 + - rbd import --image-format 2 $TESTDIR/foo base-pool/bar + - rbd snap create base-pool/bar@snap + - rados -p base-pool cache-flush-evict-all + - rbd export base-pool/bar $TESTDIR/bar + - rbd export base-pool/bar@snap $TESTDIR/snap + - cmp $TESTDIR/foo $TESTDIR/bar + - cmp $TESTDIR/foo $TESTDIR/snap + - rm $TESTDIR/foo $TESTDIR/bar $TESTDIR/snap diff --git a/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml b/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml new file mode 100644 index 00000000..944b2f71 --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/full-tiering.yaml @@ -0,0 +1,38 @@ +# verify #13098 fix +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +roles: +- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0] +overrides: + ceph: + log-whitelist: + - is full + - overall HEALTH_ + - \(POOL_FULL\) + - \(POOL_NEAR_FULL\) + - \(CACHE_POOL_NO_HIT_SET\) + - \(CACHE_POOL_NEAR_FULL\) +tasks: +- install: +- ceph: + conf: + global: + osd max object name len: 460 + osd max object namespace len: 64 +- exec: + client.0: + - ceph osd pool create ec-ca 1 1 + - ceph osd pool create ec 1 1 erasure default + - ceph osd pool application enable ec rados + - ceph osd tier add ec ec-ca + - ceph osd tier cache-mode ec-ca readproxy + - ceph osd tier set-overlay ec ec-ca + - ceph osd pool set ec-ca hit_set_type bloom + - ceph osd pool set-quota ec-ca max_bytes 20480000 + - ceph osd pool set-quota ec max_bytes 20480000 + - ceph osd pool set ec-ca target_max_bytes 20480000 + - timeout 30 rados -p ec-ca bench 30 write || true + - ceph osd pool set-quota ec-ca max_bytes 0 + - ceph osd pool set-quota ec max_bytes 0 diff --git a/qa/suites/rados/singleton-nomsgr/all/health-warnings.yaml b/qa/suites/rados/singleton-nomsgr/all/health-warnings.yaml new file mode 100644 index 00000000..a28582fd --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/health-warnings.yaml @@ -0,0 +1,20 @@ +roles: +- [mon.a, mgr.x, osd.0, osd.1, osd.2, osd.3, osd.4, osd.5, osd.6, osd.7, osd.8, osd.9, client.0] +tasks: +- install: +- ceph: + conf: + osd: +# we may land on ext4 + osd max object name len: 400 + osd max object namespace len: 64 + log-whitelist: + - but it is still running + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ +- workunit: + clients: + all: + - rados/test_health_warnings.sh diff --git a/qa/suites/rados/singleton-nomsgr/all/large-omap-object-warnings.yaml b/qa/suites/rados/singleton-nomsgr/all/large-omap-object-warnings.yaml new file mode 100644 index 00000000..62794b4b --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/large-omap-object-warnings.yaml @@ -0,0 +1,27 @@ +openstack: + - volumes: # attached to each instance + count: 2 + size: 10 # GB +roles: +- [mon.a, mgr.x, osd.0, osd.1, client.0] +overrides: + ceph: + log-whitelist: + - \(OSDMAP_FLAGS\) + - \(OSD_FULL\) + - \(MDS_READ_ONLY\) + - large omap objects + - Large omap object found + - application not enabled + conf: + osd: + osd scrub backoff ratio: 0 + osd deep scrub large omap object value sum threshold: 8800000 + osd deep scrub large omap object key threshold: 20000 +tasks: +- install: +- ceph: +- workunit: + clients: + all: + - rados/test_large_omap_detection.py diff --git a/qa/suites/rados/singleton-nomsgr/all/lazy_omap_stats_output.yaml b/qa/suites/rados/singleton-nomsgr/all/lazy_omap_stats_output.yaml new file mode 100644 index 00000000..9fbdf0e0 --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/lazy_omap_stats_output.yaml @@ -0,0 +1,16 @@ +openstack: + - volumes: # attached to each instance + count: 2 + size: 10 # GB +roles: +- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0] +overrides: + ceph: + log-whitelist: + - \(POOL_APP_NOT_ENABLED\) +tasks: +- install: +- ceph: +- exec: + client.0: + - ceph_test_lazy_omap_stats diff --git a/qa/suites/rados/singleton-nomsgr/all/librados_hello_world.yaml b/qa/suites/rados/singleton-nomsgr/all/librados_hello_world.yaml new file mode 100644 index 00000000..2a96b94d --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/librados_hello_world.yaml @@ -0,0 +1,22 @@ +roles: +- [mon.a, mds.a, mgr.x, osd.0, osd.1, client.0] +overrides: + ceph: + log-whitelist: + - \(POOL_APP_NOT_ENABLED\) +tasks: +- install: + extra_packages: + deb: + - libradosstriper-dev + - librados-dev + - libradospp-dev + rpm: + - libradosstriper-devel + - librados-devel + - libradospp-devel +- ceph: +- workunit: + clients: + all: + - rados/test_librados_build.sh diff --git a/qa/suites/rados/singleton-nomsgr/all/msgr.yaml b/qa/suites/rados/singleton-nomsgr/all/msgr.yaml new file mode 100644 index 00000000..98b50952 --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/msgr.yaml @@ -0,0 +1,21 @@ +roles: +- [mon.a, mgr.x, osd.0, osd.1, client.0] +tasks: +- install: +- exec: + client.0: + - ceph_test_async_driver + - ceph_test_msgr +openstack: + - machine: + disk: 40 # GB + ram: 15000 # MB + cpus: 1 + volumes: # attached to each instance + count: 0 + size: 1 # GB +overrides: + ceph: + conf: + client: + debug ms: 20 diff --git a/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml b/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml new file mode 100644 index 00000000..9800b5dd --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/multi-backfill-reject.yaml @@ -0,0 +1,48 @@ +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - client.0 +- - osd.3 + - osd.4 + - osd.5 +tasks: +- install: +- ceph: + log-whitelist: + - overall HEALTH_ + - \(PG_ + - \(OSD_ + - \(OBJECT_ + conf: + osd: + osd debug reject backfill probability: .3 + osd min pg log entries: 25 + osd max pg log entries: 100 + osd max object name len: 460 + osd max object namespace len: 64 +- exec: + client.0: + - sudo ceph osd pool create foo 64 + - sudo ceph osd pool application enable foo rados + - rados -p foo bench 60 write -b 1024 --no-cleanup + - sudo ceph osd pool set foo size 3 + - sudo ceph osd out 0 1 +- sleep: + duration: 60 +- exec: + client.0: + - sudo ceph osd in 0 1 +- sleep: + duration: 60 +- exec: + client.0: + - sudo ceph osd pool set foo size 2 +- sleep: + duration: 300 diff --git a/qa/suites/rados/singleton-nomsgr/all/pool-access.yaml b/qa/suites/rados/singleton-nomsgr/all/pool-access.yaml new file mode 100644 index 00000000..c30aebb5 --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/pool-access.yaml @@ -0,0 +1,13 @@ +openstack: + - volumes: # attached to each instance + count: 2 + size: 10 # GB +roles: +- [mon.a, mgr.x, osd.0, osd.1, client.0] +tasks: +- install: +- ceph: +- workunit: + clients: + all: + - rados/test_pool_access.sh diff --git a/qa/suites/rados/singleton-nomsgr/all/recovery-unfound-found.yaml b/qa/suites/rados/singleton-nomsgr/all/recovery-unfound-found.yaml new file mode 100644 index 00000000..ce0cbd9f --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/all/recovery-unfound-found.yaml @@ -0,0 +1,58 @@ +roles: +- - mon.a + - mon.b + - mon.c + - mgr.x + - osd.0 + - osd.1 +openstack: + - volumes: # attached to each instance + count: 2 + size: 20 # GB +tasks: +- install: +- ceph: + fs: xfs + conf: + osd: + osd recovery sleep: .1 + osd objectstore: filestore + log-whitelist: + - \(POOL_APP_NOT_ENABLED\) + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(OBJECT_ + - \(PG_ + - overall HEALTH +- exec: + osd.0: + - ceph osd pool create foo 32 + - ceph osd pool application enable foo foo + - rados -p foo bench 30 write -b 4096 --no-cleanup + - ceph osd set noup +- ceph.restart: + daemons: [osd.0] + wait-for-up: false + wait-for-healthy: false +- exec: + osd.0: + - sleep 5 + - rados -p foo bench 3 write -b 4096 --no-cleanup + - ceph osd unset noup + - sleep 10 + - ceph osd set noup +- ceph.restart: + daemons: [osd.1] + wait-for-up: false + wait-for-healthy: false +- exec: + osd.0: + - ceph osd out 0 + - sleep 10 + - ceph osd unset noup +- ceph.healthy: + wait-for-healthy: false # only wait for osds up and pgs clean, ignore misplaced +- exec: + osd.0: + - ceph osd in 0 +- ceph.healthy: diff --git a/qa/suites/rados/singleton-nomsgr/rados.yaml b/qa/suites/rados/singleton-nomsgr/rados.yaml new file mode 120000 index 00000000..d256979c --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/rados.yaml @@ -0,0 +1 @@ +.qa/config/rados.yaml \ No newline at end of file diff --git a/qa/suites/rados/singleton-nomsgr/supported-random-distro$ b/qa/suites/rados/singleton-nomsgr/supported-random-distro$ new file mode 120000 index 00000000..7cef21ee --- /dev/null +++ b/qa/suites/rados/singleton-nomsgr/supported-random-distro$ @@ -0,0 +1 @@ +../basic/supported-random-distro$ \ No newline at end of file diff --git a/qa/suites/rados/singleton/% b/qa/suites/rados/singleton/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rados/singleton/.qa b/qa/suites/rados/singleton/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/singleton/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/singleton/all/.qa b/qa/suites/rados/singleton/all/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/singleton/all/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/singleton/all/admin-socket.yaml b/qa/suites/rados/singleton/all/admin-socket.yaml new file mode 100644 index 00000000..13af8131 --- /dev/null +++ b/qa/suites/rados/singleton/all/admin-socket.yaml @@ -0,0 +1,26 @@ +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - client.a +openstack: + - volumes: # attached to each instance + count: 2 + size: 10 # GB +tasks: +- install: +- ceph: +- admin_socket: + osd.0: + version: + git_version: + help: + config show: + config help: + config set filestore_dump_file /tmp/foo: + perf dump: + perf schema: + get_heap_property tcmalloc.max_total_thread_cache_byte: + set_heap_property tcmalloc.max_total_thread_cache_bytes 67108864: + set_heap_property tcmalloc.max_total_thread_cache_bytes 33554432: diff --git a/qa/suites/rados/singleton/all/deduptool.yaml b/qa/suites/rados/singleton/all/deduptool.yaml new file mode 100644 index 00000000..f2c54f1a --- /dev/null +++ b/qa/suites/rados/singleton/all/deduptool.yaml @@ -0,0 +1,26 @@ +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - client.0 +openstack: + - volumes: # attached to each instance + count: 2 + size: 10 # GB +tasks: +- install: +- ceph: + log-whitelist: + - but it is still running + - had wrong client addr + - had wrong cluster addr + - reached quota + - overall HEALTH_ + - \(POOL_FULL\) + - \(POOL_APP_NOT_ENABLED\) +- workunit: + clients: + all: + - rados/test_dedup_tool.sh diff --git a/qa/suites/rados/singleton/all/divergent_priors.yaml b/qa/suites/rados/singleton/all/divergent_priors.yaml new file mode 100644 index 00000000..743d73d4 --- /dev/null +++ b/qa/suites/rados/singleton/all/divergent_priors.yaml @@ -0,0 +1,26 @@ +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - client.0 +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB + +overrides: + ceph: + log-whitelist: + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ + - \(OBJECT_ + - \(POOL_APP_NOT_ENABLED\) + +tasks: +- install: +- ceph: +- divergent_priors: diff --git a/qa/suites/rados/singleton/all/divergent_priors2.yaml b/qa/suites/rados/singleton/all/divergent_priors2.yaml new file mode 100644 index 00000000..2da2c466 --- /dev/null +++ b/qa/suites/rados/singleton/all/divergent_priors2.yaml @@ -0,0 +1,26 @@ +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - client.0 +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB + +overrides: + ceph: + log-whitelist: + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ + - \(OBJECT_ + - \(POOL_APP_NOT_ENABLED\) + +tasks: +- install: +- ceph: +- divergent_priors2: diff --git a/qa/suites/rados/singleton/all/dump-stuck.yaml b/qa/suites/rados/singleton/all/dump-stuck.yaml new file mode 100644 index 00000000..59085ffa --- /dev/null +++ b/qa/suites/rados/singleton/all/dump-stuck.yaml @@ -0,0 +1,19 @@ +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 +openstack: + - volumes: # attached to each instance + count: 2 + size: 10 # GB +tasks: +- install: +- ceph: + log-whitelist: + - but it is still running + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ +- dump_stuck: diff --git a/qa/suites/rados/singleton/all/ec-lost-unfound.yaml b/qa/suites/rados/singleton/all/ec-lost-unfound.yaml new file mode 100644 index 00000000..aeb4b278 --- /dev/null +++ b/qa/suites/rados/singleton/all/ec-lost-unfound.yaml @@ -0,0 +1,26 @@ +roles: +- - mon.a + - mon.b + - mon.c + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - osd.3 +openstack: + - volumes: # attached to each instance + count: 4 + size: 10 # GB +tasks: +- install: +- ceph: + log-whitelist: + - objects unfound and apparently lost + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ + - \(OBJECT_ + - \(SLOW_OPS\) + - slow request +- ec_lost_unfound: diff --git a/qa/suites/rados/singleton/all/erasure-code-nonregression.yaml b/qa/suites/rados/singleton/all/erasure-code-nonregression.yaml new file mode 100644 index 00000000..e8201ee0 --- /dev/null +++ b/qa/suites/rados/singleton/all/erasure-code-nonregression.yaml @@ -0,0 +1,17 @@ +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - client.0 +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +tasks: +- install: +- workunit: + clients: + all: + - erasure-code/encode-decode-non-regression.sh diff --git a/qa/suites/rados/singleton/all/lost-unfound-delete.yaml b/qa/suites/rados/singleton/all/lost-unfound-delete.yaml new file mode 100644 index 00000000..636cb944 --- /dev/null +++ b/qa/suites/rados/singleton/all/lost-unfound-delete.yaml @@ -0,0 +1,25 @@ +roles: +- - mon.a + - mon.b + - mon.c + - mgr.x + - osd.0 + - osd.1 + - osd.2 +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +tasks: +- install: +- ceph: + log-whitelist: + - objects unfound and apparently lost + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ + - \(OBJECT_ + - \(SLOW_OPS\) + - slow request +- rep_lost_unfound_delete: diff --git a/qa/suites/rados/singleton/all/lost-unfound.yaml b/qa/suites/rados/singleton/all/lost-unfound.yaml new file mode 100644 index 00000000..2f60db16 --- /dev/null +++ b/qa/suites/rados/singleton/all/lost-unfound.yaml @@ -0,0 +1,25 @@ +roles: +- - mon.a + - mon.b + - mon.c + - mgr.x + - osd.0 + - osd.1 + - osd.2 +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +tasks: +- install: +- ceph: + log-whitelist: + - objects unfound and apparently lost + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ + - \(OBJECT_ + - \(SLOW_OPS\) + - slow request +- lost_unfound: diff --git a/qa/suites/rados/singleton/all/max-pg-per-osd.from-mon.yaml b/qa/suites/rados/singleton/all/max-pg-per-osd.from-mon.yaml new file mode 100644 index 00000000..b8a7feae --- /dev/null +++ b/qa/suites/rados/singleton/all/max-pg-per-osd.from-mon.yaml @@ -0,0 +1,27 @@ +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 +openstack: + - volumes: # attached to each instance + count: 2 + size: 10 # GB +overrides: + ceph: + create_rbd_pool: False + conf: + mon: + osd pool default size: 2 + osd: + mon max pg per osd : 2 + osd max pg per osd hard ratio : 1 + log-whitelist: + - \(TOO_FEW_PGS\) + - \(PENDING_CREATING_PGS\) +tasks: +- install: +- ceph: +- osd_max_pg_per_osd: + test_create_from_mon: True + pg_num: 2 diff --git a/qa/suites/rados/singleton/all/max-pg-per-osd.from-primary.yaml b/qa/suites/rados/singleton/all/max-pg-per-osd.from-primary.yaml new file mode 100644 index 00000000..8ffc9a31 --- /dev/null +++ b/qa/suites/rados/singleton/all/max-pg-per-osd.from-primary.yaml @@ -0,0 +1,32 @@ +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - osd.3 +openstack: + - volumes: # attached to each instance + count: 4 + size: 10 # GB +overrides: + ceph: + create_rbd_pool: False + conf: + mon: + osd pool default size: 2 + osd: + mon max pg per osd : 1 + osd max pg per osd hard ratio : 1 + log-whitelist: + - \(TOO_FEW_PGS\) + - \(PG_ + - \(PENDING_CREATING_PGS\) +tasks: +- install: +- ceph: +- osd_max_pg_per_osd: + test_create_from_mon: False + pg_num: 1 + pool_size: 2 + from_primary: True diff --git a/qa/suites/rados/singleton/all/max-pg-per-osd.from-replica.yaml b/qa/suites/rados/singleton/all/max-pg-per-osd.from-replica.yaml new file mode 100644 index 00000000..8da365dd --- /dev/null +++ b/qa/suites/rados/singleton/all/max-pg-per-osd.from-replica.yaml @@ -0,0 +1,32 @@ +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - osd.3 +openstack: + - volumes: # attached to each instance + count: 4 + size: 10 # GB +overrides: + ceph: + create_rbd_pool: False + conf: + mon: + osd pool default size: 2 + osd: + mon max pg per osd : 1 + osd max pg per osd hard ratio : 1 + log-whitelist: + - \(TOO_FEW_PGS\) + - \(PG_ + - \(PENDING_CREATING_PGS\) +tasks: +- install: +- ceph: +- osd_max_pg_per_osd: + test_create_from_mon: False + pg_num: 1 + pool_size: 2 + from_primary: False diff --git a/qa/suites/rados/singleton/all/mon-auth-caps.yaml b/qa/suites/rados/singleton/all/mon-auth-caps.yaml new file mode 100644 index 00000000..ae4a5d2e --- /dev/null +++ b/qa/suites/rados/singleton/all/mon-auth-caps.yaml @@ -0,0 +1,17 @@ +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - client.0 +tasks: +- install: +- ceph: + log-whitelist: + - overall HEALTH_ + - \(AUTH_BAD_CAPS\) +- workunit: + clients: + all: + - mon/auth_caps.sh diff --git a/qa/suites/rados/singleton/all/mon-config-key-caps.yaml b/qa/suites/rados/singleton/all/mon-config-key-caps.yaml new file mode 100644 index 00000000..0b0b95c5 --- /dev/null +++ b/qa/suites/rados/singleton/all/mon-config-key-caps.yaml @@ -0,0 +1,17 @@ +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - client.0 +tasks: +- install: +- ceph: + log-whitelist: + - overall HEALTH_ + - \(AUTH_BAD_CAPS\) +- workunit: + clients: + all: + - mon/test_config_key_caps.sh diff --git a/qa/suites/rados/singleton/all/mon-config-keys.yaml b/qa/suites/rados/singleton/all/mon-config-keys.yaml new file mode 100644 index 00000000..7bb4f650 --- /dev/null +++ b/qa/suites/rados/singleton/all/mon-config-keys.yaml @@ -0,0 +1,20 @@ +roles: +- - mon.a + - mon.b + - mon.c + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - client.0 +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +tasks: +- install: +- ceph: +- workunit: + clients: + all: + - mon/test_mon_config_key.py diff --git a/qa/suites/rados/singleton/all/mon-config.yaml b/qa/suites/rados/singleton/all/mon-config.yaml new file mode 100644 index 00000000..2d9de8bb --- /dev/null +++ b/qa/suites/rados/singleton/all/mon-config.yaml @@ -0,0 +1,20 @@ +roles: +- - mon.a + - mon.b + - mon.c + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - client.0 +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +tasks: +- install: +- ceph: +- workunit: + clients: + all: + - mon/config.sh diff --git a/qa/suites/rados/singleton/all/mon-memory-target-compliance.yaml.disabled b/qa/suites/rados/singleton/all/mon-memory-target-compliance.yaml.disabled new file mode 100644 index 00000000..7f9dd495 --- /dev/null +++ b/qa/suites/rados/singleton/all/mon-memory-target-compliance.yaml.disabled @@ -0,0 +1,152 @@ +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - osd.3 + - osd.4 + - osd.5 + - osd.6 + - osd.7 + - osd.8 + - osd.9 + - osd.10 + - osd.11 + - osd.12 + - osd.13 + - osd.14 + - client.0 +openstack: + - volumes: # attached to each instance + count: 4 + size: 1 # GB +overrides: + ceph: + conf: + mon: + mon memory target: 134217728 # reduced to 128_M + rocksdb cache size: 67108864 # reduced to 64_M + mon osd cache size: 100000 + mon osd cache size min: 134217728 + osd: + osd memory target: 1610612736 # reduced to 1.5_G + osd objectstore: bluestore + debug bluestore: 20 + osd scrub min interval: 60 + osd scrub max interval: 120 + osd max backfills: 9 + +tasks: +- install: + branch: wip-sseshasa2-testing-2019-07-30-1825 # change as appropriate +- ceph: + create_rbd_pool: false + log-whitelist: + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ + - \(POOL_ + - \(CACHE_POOL_ + - \(OBJECT_ + - \(SLOW_OPS\) + - \(REQUEST_SLOW\) + - \(TOO_FEW_PGS\) + - slow request +- interactive: +- parallel: + - log-mon-rss + - stress-tasks + - benchload +- exec: + client.0: + - "ceph_test_mon_memory_target 134217728" # mon memory target + - "ceph_test_mon_rss_usage 134217728" +log-mon-rss: +- background_exec: + client.0: + - while true + - do /usr/bin/ceph_test_log_rss_usage ceph-mon >> /var/log/ceph/ceph-mon-rss-usage.log + - sleep 300 # log rss usage every 5 mins. May be modified accordingly + - done +- exec: + client.0: + - sleep 37860 # sum total of the radosbench test times below plus 60 secs +benchload: # The total radosbench test below translates to 10.5 hrs +- full_sequential: + - radosbench: + clients: [client.0] + time: 1800 + - radosbench: + clients: [client.0] + time: 1800 + - radosbench: + clients: [client.0] + time: 1800 + - radosbench: + clients: [client.0] + time: 1800 + - radosbench: + clients: [client.0] + time: 1800 + - radosbench: + clients: [client.0] + time: 1800 + - radosbench: + clients: [client.0] + time: 1800 + - radosbench: + clients: [client.0] + time: 1800 + - radosbench: + clients: [client.0] + time: 1800 + - radosbench: + clients: [client.0] + time: 1800 + - radosbench: + clients: [client.0] + time: 1800 + - radosbench: + clients: [client.0] + time: 1800 + - radosbench: + clients: [client.0] + time: 1800 + - radosbench: + clients: [client.0] + time: 1800 + - radosbench: + clients: [client.0] + time: 1800 + - radosbench: + clients: [client.0] + time: 1800 + - radosbench: + clients: [client.0] + time: 1800 + - radosbench: + clients: [client.0] + time: 1800 + - radosbench: + clients: [client.0] + time: 1800 + - radosbench: + clients: [client.0] + time: 1800 + - radosbench: + clients: [client.0] + time: 1800 +stress-tasks: +- thrashosds: + op_delay: 1 + bdev_inject_crash: 1 + bdev_inject_crash_probability: .8 + chance_down: 80 + chance_pgnum_grow: 3 + chance_pgpnum_fix: 1 + chance_thrash_cluster_full: 0 + chance_thrash_pg_upmap: 3 + chance_thrash_pg_upmap_items: 3 + min_in: 2 diff --git a/qa/suites/rados/singleton/all/osd-backfill.yaml b/qa/suites/rados/singleton/all/osd-backfill.yaml new file mode 100644 index 00000000..5b374071 --- /dev/null +++ b/qa/suites/rados/singleton/all/osd-backfill.yaml @@ -0,0 +1,26 @@ +roles: +- - mon.a + - mon.b + - mon.c + - mgr.x + - osd.0 + - osd.1 + - osd.2 +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +tasks: +- install: +- ceph: + log-whitelist: + - but it is still running + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ + - \(OBJECT_ + conf: + osd: + osd min pg log entries: 5 +- osd_backfill: diff --git a/qa/suites/rados/singleton/all/osd-recovery-incomplete.yaml b/qa/suites/rados/singleton/all/osd-recovery-incomplete.yaml new file mode 100644 index 00000000..ed5b216b --- /dev/null +++ b/qa/suites/rados/singleton/all/osd-recovery-incomplete.yaml @@ -0,0 +1,28 @@ +roles: +- - mon.a + - mon.b + - mon.c + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - osd.3 +openstack: + - volumes: # attached to each instance + count: 4 + size: 10 # GB +tasks: +- install: +- ceph: + log-whitelist: + - but it is still running + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ + - \(OBJECT_ + conf: + osd: + osd min pg log entries: 5 + osd_fast_fail_on_connection_refused: false +- osd_recovery.test_incomplete_pgs: diff --git a/qa/suites/rados/singleton/all/osd-recovery.yaml b/qa/suites/rados/singleton/all/osd-recovery.yaml new file mode 100644 index 00000000..d937a8db --- /dev/null +++ b/qa/suites/rados/singleton/all/osd-recovery.yaml @@ -0,0 +1,30 @@ +roles: +- - mon.a + - mon.b + - mon.c + - mgr.x + - osd.0 + - osd.1 + - osd.2 +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +tasks: +- install: +- ceph: + log-whitelist: + - but it is still running + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ + - \(OBJECT_DEGRADED\) + - \(SLOW_OPS\) + - slow request + conf: + osd: + osd min pg log entries: 5 + osd pg log trim min: 0 + osd_fast_fail_on_connection_refused: false +- osd_recovery: diff --git a/qa/suites/rados/singleton/all/peer.yaml b/qa/suites/rados/singleton/all/peer.yaml new file mode 100644 index 00000000..645034a4 --- /dev/null +++ b/qa/suites/rados/singleton/all/peer.yaml @@ -0,0 +1,25 @@ +roles: +- - mon.a + - mon.b + - mon.c + - mgr.x + - osd.0 + - osd.1 + - osd.2 +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +tasks: +- install: +- ceph: + config: + global: + osd pool default min size : 1 + log-whitelist: + - objects unfound and apparently lost + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ +- peer: diff --git a/qa/suites/rados/singleton/all/pg-autoscaler-progress-off.yaml b/qa/suites/rados/singleton/all/pg-autoscaler-progress-off.yaml new file mode 100644 index 00000000..2784b7e3 --- /dev/null +++ b/qa/suites/rados/singleton/all/pg-autoscaler-progress-off.yaml @@ -0,0 +1,42 @@ +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - osd.3 + - client.0 +- - mon.b + - mon.c + - osd.4 + - osd.5 + - osd.6 + - osd.7 +openstack: + - volumes: # attached to each instance + count: 4 + size: 10 # GB +tasks: +- install: +- ceph: + create_rbd_pool: false + log-whitelist: + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ + - \(POOL_ + - \(CACHE_POOL_ + - \(OBJECT_ + - \(SLOW_OPS\) + - \(REQUEST_SLOW\) + - \(TOO_FEW_PGS\) + - slow request +- exec: + client.0: + - ceph progress off + +- workunit: + clients: + all: + - mon/pg_autoscaler.sh diff --git a/qa/suites/rados/singleton/all/pg-autoscaler.yaml b/qa/suites/rados/singleton/all/pg-autoscaler.yaml new file mode 100644 index 00000000..72e18d52 --- /dev/null +++ b/qa/suites/rados/singleton/all/pg-autoscaler.yaml @@ -0,0 +1,38 @@ +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - osd.3 + - client.0 +- - mon.b + - mon.c + - osd.4 + - osd.5 + - osd.6 + - osd.7 +openstack: + - volumes: # attached to each instance + count: 4 + size: 10 # GB +tasks: +- install: +- ceph: + create_rbd_pool: false + log-whitelist: + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ + - \(POOL_ + - \(CACHE_POOL_ + - \(OBJECT_ + - \(SLOW_OPS\) + - \(REQUEST_SLOW\) + - \(TOO_FEW_PGS\) + - slow request +- workunit: + clients: + all: + - mon/pg_autoscaler.sh diff --git a/qa/suites/rados/singleton/all/pg-removal-interruption.yaml b/qa/suites/rados/singleton/all/pg-removal-interruption.yaml new file mode 100644 index 00000000..3ada5518 --- /dev/null +++ b/qa/suites/rados/singleton/all/pg-removal-interruption.yaml @@ -0,0 +1,34 @@ +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - client.0 +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +tasks: +- install: +- ceph: + log-whitelist: + - but it is still running + - slow request + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ +- exec: + client.0: + - sudo ceph osd pool create foo 128 128 + - sudo ceph osd pool application enable foo rados + - sleep 5 + - sudo ceph tell osd.0 injectargs -- --osd-inject-failure-on-pg-removal + - sudo ceph osd pool delete foo foo --yes-i-really-really-mean-it +- ceph.wait_for_failure: [osd.0] +- exec: + client.0: + - sudo ceph osd down 0 +- ceph.restart: [osd.0] +- ceph.healthy: diff --git a/qa/suites/rados/singleton/all/radostool.yaml b/qa/suites/rados/singleton/all/radostool.yaml new file mode 100644 index 00000000..18277953 --- /dev/null +++ b/qa/suites/rados/singleton/all/radostool.yaml @@ -0,0 +1,26 @@ +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - client.0 +openstack: + - volumes: # attached to each instance + count: 2 + size: 10 # GB +tasks: +- install: +- ceph: + log-whitelist: + - but it is still running + - had wrong client addr + - had wrong cluster addr + - reached quota + - overall HEALTH_ + - \(POOL_FULL\) + - \(POOL_APP_NOT_ENABLED\) +- workunit: + clients: + all: + - rados/test_rados_tool.sh diff --git a/qa/suites/rados/singleton/all/random-eio.yaml b/qa/suites/rados/singleton/all/random-eio.yaml new file mode 100644 index 00000000..5df910b8 --- /dev/null +++ b/qa/suites/rados/singleton/all/random-eio.yaml @@ -0,0 +1,44 @@ +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 +- - osd.3 + - osd.4 + - osd.5 + - client.0 +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +tasks: +- install: +- ceph: + log-whitelist: + - missing primary copy of + - objects unfound and apparently lost + - had a read error + - overall HEALTH_ + - \(POOL_APP_NOT_ENABLED\) + - \(PG_DEGRADED\) + - \(OSD_TOO_MANY_REPAIRS\) +- full_sequential: + - exec: + client.0: + - sudo ceph tell osd.1 injectargs -- --filestore_debug_random_read_err=0.33 + - sudo ceph tell osd.1 injectargs -- --bluestore_debug_random_read_err=0.33 + - sudo ceph osd pool create test 16 16 + - sudo ceph osd pool set test size 3 + - sudo ceph pg dump pgs --format=json-pretty + - radosbench: + clients: [client.0] + time: 360 + type: rand + objectsize: 1048576 + pool: test + create_pool: false + - exec: + client.0: + - sudo ceph tell osd.1 injectargs -- --filestore_debug_random_read_err=0.0 + - sudo ceph tell osd.1 injectargs -- --bluestore_debug_random_read_err=0.0 diff --git a/qa/suites/rados/singleton/all/rebuild-mondb.yaml b/qa/suites/rados/singleton/all/rebuild-mondb.yaml new file mode 100644 index 00000000..cc1c6809 --- /dev/null +++ b/qa/suites/rados/singleton/all/rebuild-mondb.yaml @@ -0,0 +1,32 @@ +roles: +- - mon.a + - mon.b + - mon.c + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - client.0 +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +tasks: +- install: +- ceph: + log-whitelist: + - no reply from + - overall HEALTH_ + - \(MON_DOWN\) + - \(MGR_DOWN\) + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ +- full_sequential: + - radosbench: + clients: [client.0] + time: 30 + - rebuild_mondb: + - radosbench: + clients: [client.0] + time: 30 diff --git a/qa/suites/rados/singleton/all/recovery-preemption.yaml b/qa/suites/rados/singleton/all/recovery-preemption.yaml new file mode 100644 index 00000000..fbf1772c --- /dev/null +++ b/qa/suites/rados/singleton/all/recovery-preemption.yaml @@ -0,0 +1,57 @@ +roles: +- - mon.a + - mon.b + - mon.c + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - osd.3 +openstack: + - volumes: # attached to each instance + count: 3 + size: 20 # GB +tasks: +- install: +- ceph: + conf: + osd: + osd recovery sleep: .1 + osd min pg log entries: 10 + osd max pg log entries: 1000 + osd pg log trim min: 10 + log-whitelist: + - \(POOL_APP_NOT_ENABLED\) + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(OBJECT_ + - \(PG_ + - \(SLOW_OPS\) + - overall HEALTH + - slow request +- exec: + osd.0: + - ceph osd pool create foo 128 + - ceph osd pool application enable foo foo + - sleep 5 +- ceph.healthy: +- exec: + osd.0: + - rados -p foo bench 30 write -b 4096 --no-cleanup + - ceph osd out 0 + - sleep 5 + - ceph osd set noup +- ceph.restart: + daemons: [osd.1] + wait-for-up: false + wait-for-healthy: false +- exec: + osd.0: + - rados -p foo bench 3 write -b 4096 --no-cleanup + - ceph osd unset noup + - sleep 10 + - for f in 0 1 2 3 ; do sudo ceph daemon osd.$f config set osd_recovery_sleep 0 ; sudo ceph daemon osd.$f config set osd_recovery_max_active 20 ; done +- ceph.healthy: +- exec: + osd.0: + - egrep '(defer backfill|defer recovery)' /var/log/ceph/ceph-osd.*.log diff --git a/qa/suites/rados/singleton/all/resolve_stuck_peering.yaml b/qa/suites/rados/singleton/all/resolve_stuck_peering.yaml new file mode 100644 index 00000000..3eddce82 --- /dev/null +++ b/qa/suites/rados/singleton/all/resolve_stuck_peering.yaml @@ -0,0 +1,17 @@ +roles: +- [mon.a, mgr.x] +- [osd.0, osd.1, osd.2, client.0] + +tasks: +- install: +- ceph: + fs: xfs + log-whitelist: + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ + - \(OBJECT_DEGRADED\) + - \(POOL_APP_NOT_ENABLED\) +- resolve_stuck_peering: + diff --git a/qa/suites/rados/singleton/all/test-crash.yaml b/qa/suites/rados/singleton/all/test-crash.yaml new file mode 100644 index 00000000..8002deaa --- /dev/null +++ b/qa/suites/rados/singleton/all/test-crash.yaml @@ -0,0 +1,15 @@ +roles: + - [client.0, mon.a, mgr.x, osd.0, osd.1, osd.2] + +tasks: + - install: + - ceph: + log-whitelist: + - Reduced data availability + - OSD_.*DOWN + - \(RECENT_CRASH\) + - workunit: + clients: + client.0: + - rados/test_crash.sh + - ceph.restart: [osd.*] diff --git a/qa/suites/rados/singleton/all/test_envlibrados_for_rocksdb.yaml b/qa/suites/rados/singleton/all/test_envlibrados_for_rocksdb.yaml new file mode 100644 index 00000000..42c8ae39 --- /dev/null +++ b/qa/suites/rados/singleton/all/test_envlibrados_for_rocksdb.yaml @@ -0,0 +1,19 @@ +overrides: + ceph: + fs: ext4 + conf: + global: + osd max object name len: 460 + osd max object namespace len: 64 +roles: +- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0] +tasks: +- install: +- ceph: + log-whitelist: + - overall HEALTH_ + - \(POOL_APP_NOT_ENABLED\) +- workunit: + clients: + all: + - rados/test_envlibrados_for_rocksdb.sh diff --git a/qa/suites/rados/singleton/all/thrash-backfill-full.yaml b/qa/suites/rados/singleton/all/thrash-backfill-full.yaml new file mode 100644 index 00000000..5cd32bd5 --- /dev/null +++ b/qa/suites/rados/singleton/all/thrash-backfill-full.yaml @@ -0,0 +1,50 @@ +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 +- - osd.3 + - osd.4 + - osd.5 + - client.0 +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +override: + ceph: + conf: + mon: + osd default pool size: 3 + osd min pg log entries: 5 + osd max pg log entries: 10 +tasks: +- install: +- ceph: + log-whitelist: + - but it is still running + - missing primary copy of + - objects unfound and apparently lost + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(SLOW_OPS\) + - \(PG_ + - \(OBJECT_MISPLACED\) + - \(OSD_ + - \(OBJECT_ + - \(TOO_FEW_PGS\) + - \(POOL_BACKFILLFULL\) + - slow request +- thrashosds: + op_delay: 30 + clean_interval: 120 + chance_down: .75 + min_live: 5 + min_in: 5 + chance_test_backfill_full: .5 +- radosbench: + clients: [client.0] + time: 1800 + type: rand + objectsize: 1048576 diff --git a/qa/suites/rados/singleton/all/thrash-eio.yaml b/qa/suites/rados/singleton/all/thrash-eio.yaml new file mode 100644 index 00000000..0afb6c86 --- /dev/null +++ b/qa/suites/rados/singleton/all/thrash-eio.yaml @@ -0,0 +1,47 @@ +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 +- - osd.3 + - osd.4 + - osd.5 + - client.0 +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +override: + ceph: + conf: + mon: + osd default pool size: 3 +tasks: +- install: +- ceph: + log-whitelist: + - but it is still running + - missing primary copy of + - objects unfound and apparently lost + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(SLOW_OPS\) + - \(PG_ + - \(OBJECT_MISPLACED\) + - \(OSD_ + - \(OBJECT_ + - \(TOO_FEW_PGS\) + - slow request +- thrashosds: + op_delay: 30 + clean_interval: 120 + chance_down: .5 + random_eio: .33 + min_live: 5 + min_in: 5 +- radosbench: + clients: [client.0] + time: 720 + type: rand + objectsize: 1048576 diff --git a/qa/suites/rados/singleton/all/thrash-rados/+ b/qa/suites/rados/singleton/all/thrash-rados/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rados/singleton/all/thrash-rados/.qa b/qa/suites/rados/singleton/all/thrash-rados/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/singleton/all/thrash-rados/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/singleton/all/thrash-rados/thrash-rados.yaml b/qa/suites/rados/singleton/all/thrash-rados/thrash-rados.yaml new file mode 100644 index 00000000..37be8df9 --- /dev/null +++ b/qa/suites/rados/singleton/all/thrash-rados/thrash-rados.yaml @@ -0,0 +1,27 @@ +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 +- - osd.3 + - osd.4 + - osd.5 + - client.0 +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +tasks: +- install: +- ceph: + log-whitelist: + - but it is still running +- thrashosds: + op_delay: 30 + clean_interval: 120 + chance_down: .5 +- workunit: + clients: + all: + - rados/load-gen-mix-small.sh diff --git a/qa/suites/rados/singleton/all/thrash-rados/thrashosds-health.yaml b/qa/suites/rados/singleton/all/thrash-rados/thrashosds-health.yaml new file mode 120000 index 00000000..9124eb1a --- /dev/null +++ b/qa/suites/rados/singleton/all/thrash-rados/thrashosds-health.yaml @@ -0,0 +1 @@ +.qa/tasks/thrashosds-health.yaml \ No newline at end of file diff --git a/qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml b/qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml new file mode 100644 index 00000000..c0b27075 --- /dev/null +++ b/qa/suites/rados/singleton/all/thrash_cache_writeback_proxy_none.yaml @@ -0,0 +1,70 @@ +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 +- - osd.3 + - osd.4 + - osd.5 + - client.0 +openstack: + - volumes: # attached to each instance + count: 3 + size: 30 # GB +tasks: +- install: +- ceph: + log-whitelist: + - but it is still running + - slow request + - overall HEALTH_ + - \(CACHE_POOL_ +- exec: + client.0: + - sudo ceph osd pool create base 4 + - sudo ceph osd pool application enable base rados + - sudo ceph osd pool create cache 4 + - sudo ceph osd tier add base cache + - sudo ceph osd tier cache-mode cache writeback + - sudo ceph osd tier set-overlay base cache + - sudo ceph osd pool set cache hit_set_type bloom + - sudo ceph osd pool set cache hit_set_count 8 + - sudo ceph osd pool set cache hit_set_period 60 + - sudo ceph osd pool set cache target_max_objects 500 +- background_exec: + mon.a: + - while true + - do sleep 30 + - echo proxy + - sudo ceph osd tier cache-mode cache proxy + - sleep 10 + - sudo ceph osd pool set cache cache_target_full_ratio .001 + - echo cache-try-flush-evict-all + - rados -p cache cache-try-flush-evict-all + - sleep 5 + - echo cache-flush-evict-all + - rados -p cache cache-flush-evict-all + - sleep 5 + - echo remove overlay + - sudo ceph osd tier remove-overlay base + - sleep 20 + - echo add writeback overlay + - sudo ceph osd tier cache-mode cache writeback + - sudo ceph osd pool set cache cache_target_full_ratio .8 + - sudo ceph osd tier set-overlay base cache + - sleep 30 + - sudo ceph osd tier cache-mode cache readproxy + - done +- rados: + clients: [client.0] + pools: [base] + max_seconds: 600 + ops: 400000 + objects: 10000 + size: 1024 + op_weights: + read: 100 + write: 100 + delete: 50 + copy_from: 50 diff --git a/qa/suites/rados/singleton/all/watch-notify-same-primary.yaml b/qa/suites/rados/singleton/all/watch-notify-same-primary.yaml new file mode 100644 index 00000000..48ef78ff --- /dev/null +++ b/qa/suites/rados/singleton/all/watch-notify-same-primary.yaml @@ -0,0 +1,32 @@ +roles: +- - mon.a + - mon.b + - mon.c + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - client.0 +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +tasks: +- install: +- ceph: + config: + global: + osd pool default min size : 1 + client: + debug ms: 1 + debug objecter: 20 + debug rados: 20 + log-whitelist: + - objects unfound and apparently lost + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ + - \(OBJECT_DEGRADED\) +- watch_notify_same_primary: + clients: [client.0] diff --git a/qa/suites/rados/singleton/msgr b/qa/suites/rados/singleton/msgr new file mode 120000 index 00000000..57bee80d --- /dev/null +++ b/qa/suites/rados/singleton/msgr @@ -0,0 +1 @@ +.qa/msgr \ No newline at end of file diff --git a/qa/suites/rados/singleton/msgr-failures/.qa b/qa/suites/rados/singleton/msgr-failures/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/singleton/msgr-failures/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/singleton/msgr-failures/few.yaml b/qa/suites/rados/singleton/msgr-failures/few.yaml new file mode 100644 index 00000000..4326fe23 --- /dev/null +++ b/qa/suites/rados/singleton/msgr-failures/few.yaml @@ -0,0 +1,7 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 5000 + log-whitelist: + - \(OSD_SLOW_PING_TIME diff --git a/qa/suites/rados/singleton/msgr-failures/many.yaml b/qa/suites/rados/singleton/msgr-failures/many.yaml new file mode 100644 index 00000000..20aeb4df --- /dev/null +++ b/qa/suites/rados/singleton/msgr-failures/many.yaml @@ -0,0 +1,11 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 1000 + mon mgr beacon grace: 90 + mon client hunt interval max multiple: 2 + mgr: + debug monc: 10 + log-whitelist: + - \(OSD_SLOW_PING_TIME diff --git a/qa/suites/rados/singleton/objectstore b/qa/suites/rados/singleton/objectstore new file mode 120000 index 00000000..c40bd326 --- /dev/null +++ b/qa/suites/rados/singleton/objectstore @@ -0,0 +1 @@ +.qa/objectstore \ No newline at end of file diff --git a/qa/suites/rados/singleton/rados.yaml b/qa/suites/rados/singleton/rados.yaml new file mode 120000 index 00000000..d256979c --- /dev/null +++ b/qa/suites/rados/singleton/rados.yaml @@ -0,0 +1 @@ +.qa/config/rados.yaml \ No newline at end of file diff --git a/qa/suites/rados/singleton/supported-random-distro$ b/qa/suites/rados/singleton/supported-random-distro$ new file mode 120000 index 00000000..7cef21ee --- /dev/null +++ b/qa/suites/rados/singleton/supported-random-distro$ @@ -0,0 +1 @@ +../basic/supported-random-distro$ \ No newline at end of file diff --git a/qa/suites/rados/standalone/% b/qa/suites/rados/standalone/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rados/standalone/.qa b/qa/suites/rados/standalone/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/standalone/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/standalone/supported-random-distro$ b/qa/suites/rados/standalone/supported-random-distro$ new file mode 120000 index 00000000..7cef21ee --- /dev/null +++ b/qa/suites/rados/standalone/supported-random-distro$ @@ -0,0 +1 @@ +../basic/supported-random-distro$ \ No newline at end of file diff --git a/qa/suites/rados/standalone/workloads/.qa b/qa/suites/rados/standalone/workloads/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/standalone/workloads/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/standalone/workloads/crush.yaml b/qa/suites/rados/standalone/workloads/crush.yaml new file mode 100644 index 00000000..a62a0dd8 --- /dev/null +++ b/qa/suites/rados/standalone/workloads/crush.yaml @@ -0,0 +1,18 @@ +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - client.0 +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +tasks: +- install: +- workunit: + basedir: qa/standalone + clients: + all: + - crush diff --git a/qa/suites/rados/standalone/workloads/erasure-code.yaml b/qa/suites/rados/standalone/workloads/erasure-code.yaml new file mode 100644 index 00000000..7d79753c --- /dev/null +++ b/qa/suites/rados/standalone/workloads/erasure-code.yaml @@ -0,0 +1,18 @@ +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - client.0 +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +tasks: +- install: +- workunit: + basedir: qa/standalone + clients: + all: + - erasure-code diff --git a/qa/suites/rados/standalone/workloads/mgr.yaml b/qa/suites/rados/standalone/workloads/mgr.yaml new file mode 100644 index 00000000..997fae86 --- /dev/null +++ b/qa/suites/rados/standalone/workloads/mgr.yaml @@ -0,0 +1,18 @@ +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - client.0 +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +tasks: +- install: +- workunit: + basedir: qa/standalone + clients: + all: + - mgr diff --git a/qa/suites/rados/standalone/workloads/misc.yaml b/qa/suites/rados/standalone/workloads/misc.yaml new file mode 100644 index 00000000..4aa9ee27 --- /dev/null +++ b/qa/suites/rados/standalone/workloads/misc.yaml @@ -0,0 +1,18 @@ +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - client.0 +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +tasks: +- install: +- workunit: + basedir: qa/standalone + clients: + all: + - misc diff --git a/qa/suites/rados/standalone/workloads/mon.yaml b/qa/suites/rados/standalone/workloads/mon.yaml new file mode 100644 index 00000000..c19606f4 --- /dev/null +++ b/qa/suites/rados/standalone/workloads/mon.yaml @@ -0,0 +1,18 @@ +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - client.0 +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +tasks: +- install: +- workunit: + basedir: qa/standalone + clients: + all: + - mon diff --git a/qa/suites/rados/standalone/workloads/osd.yaml b/qa/suites/rados/standalone/workloads/osd.yaml new file mode 100644 index 00000000..e28b5221 --- /dev/null +++ b/qa/suites/rados/standalone/workloads/osd.yaml @@ -0,0 +1,18 @@ +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - client.0 +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +tasks: +- install: +- workunit: + basedir: qa/standalone + clients: + all: + - osd diff --git a/qa/suites/rados/standalone/workloads/scrub.yaml b/qa/suites/rados/standalone/workloads/scrub.yaml new file mode 100644 index 00000000..7f6fad40 --- /dev/null +++ b/qa/suites/rados/standalone/workloads/scrub.yaml @@ -0,0 +1,18 @@ +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - client.0 +openstack: + - volumes: # attached to each instance + count: 3 + size: 10 # GB +tasks: +- install: +- workunit: + basedir: qa/standalone + clients: + all: + - scrub diff --git a/qa/suites/rados/thrash-erasure-code-big/% b/qa/suites/rados/thrash-erasure-code-big/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rados/thrash-erasure-code-big/.qa b/qa/suites/rados/thrash-erasure-code-big/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-big/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code-big/ceph.yaml b/qa/suites/rados/thrash-erasure-code-big/ceph.yaml new file mode 120000 index 00000000..a2fd139c --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-big/ceph.yaml @@ -0,0 +1 @@ +../thrash/ceph.yaml \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code-big/cluster/+ b/qa/suites/rados/thrash-erasure-code-big/cluster/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rados/thrash-erasure-code-big/cluster/.qa b/qa/suites/rados/thrash-erasure-code-big/cluster/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-big/cluster/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code-big/cluster/12-osds.yaml b/qa/suites/rados/thrash-erasure-code-big/cluster/12-osds.yaml new file mode 100644 index 00000000..1c45ee35 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-big/cluster/12-osds.yaml @@ -0,0 +1,4 @@ +roles: +- [osd.0, osd.1, osd.2, osd.3, client.0, mon.a] +- [osd.4, osd.5, osd.6, osd.7, mon.b, mgr.x] +- [osd.8, osd.9, osd.10, osd.11, mon.c] diff --git a/qa/suites/rados/thrash-erasure-code-big/cluster/openstack.yaml b/qa/suites/rados/thrash-erasure-code-big/cluster/openstack.yaml new file mode 100644 index 00000000..e559d912 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-big/cluster/openstack.yaml @@ -0,0 +1,4 @@ +openstack: + - volumes: # attached to each instance + count: 4 + size: 10 # GB diff --git a/qa/suites/rados/thrash-erasure-code-big/msgr-failures b/qa/suites/rados/thrash-erasure-code-big/msgr-failures new file mode 120000 index 00000000..03689aa4 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-big/msgr-failures @@ -0,0 +1 @@ +../thrash/msgr-failures \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code-big/objectstore b/qa/suites/rados/thrash-erasure-code-big/objectstore new file mode 120000 index 00000000..c40bd326 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-big/objectstore @@ -0,0 +1 @@ +.qa/objectstore \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code-big/rados.yaml b/qa/suites/rados/thrash-erasure-code-big/rados.yaml new file mode 120000 index 00000000..d256979c --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-big/rados.yaml @@ -0,0 +1 @@ +.qa/config/rados.yaml \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code-big/recovery-overrides b/qa/suites/rados/thrash-erasure-code-big/recovery-overrides new file mode 120000 index 00000000..1957f2c4 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-big/recovery-overrides @@ -0,0 +1 @@ +../thrash/2-recovery-overrides \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code-big/supported-random-distro$ b/qa/suites/rados/thrash-erasure-code-big/supported-random-distro$ new file mode 120000 index 00000000..7cef21ee --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-big/supported-random-distro$ @@ -0,0 +1 @@ +../basic/supported-random-distro$ \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code-big/thrashers/.qa b/qa/suites/rados/thrash-erasure-code-big/thrashers/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-big/thrashers/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code-big/thrashers/careful.yaml b/qa/suites/rados/thrash-erasure-code-big/thrashers/careful.yaml new file mode 100644 index 00000000..42694359 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-big/thrashers/careful.yaml @@ -0,0 +1,20 @@ +overrides: + ceph: + log-whitelist: + - but it is still running + - objects unfound and apparently lost + - slow request + conf: + osd: + osd debug reject backfill probability: .3 + osd scrub min interval: 60 + osd scrub max interval: 120 + osd max backfills: 6 +tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgnum_shrink: 1 + chance_pgpnum_fix: 1 + min_in: 8 + aggressive_pg_num_changes: false diff --git a/qa/suites/rados/thrash-erasure-code-big/thrashers/default.yaml b/qa/suites/rados/thrash-erasure-code-big/thrashers/default.yaml new file mode 100644 index 00000000..13ca050f --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-big/thrashers/default.yaml @@ -0,0 +1,19 @@ +overrides: + ceph: + log-whitelist: + - but it is still running + - objects unfound and apparently lost + - slow request + conf: + osd: + osd debug reject backfill probability: .1 + osd scrub min interval: 60 + osd scrub max interval: 120 + osd max backfills: 6 +tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgnum_shrink: 1 + chance_pgpnum_fix: 1 + min_in: 8 diff --git a/qa/suites/rados/thrash-erasure-code-big/thrashers/fastread.yaml b/qa/suites/rados/thrash-erasure-code-big/thrashers/fastread.yaml new file mode 100644 index 00000000..17087078 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-big/thrashers/fastread.yaml @@ -0,0 +1,20 @@ +overrides: + ceph: + log-whitelist: + - but it is still running + - objects unfound and apparently lost + conf: + mon: + osd pool default ec fast read: true + osd: + osd debug reject backfill probability: .1 + osd scrub min interval: 60 + osd scrub max interval: 120 + osd max backfills: 2 +tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgnum_shrink: 1 + chance_pgpnum_fix: 1 + min_in: 4 diff --git a/qa/suites/rados/thrash-erasure-code-big/thrashers/mapgap.yaml b/qa/suites/rados/thrash-erasure-code-big/thrashers/mapgap.yaml new file mode 100644 index 00000000..fb3af982 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-big/thrashers/mapgap.yaml @@ -0,0 +1,21 @@ +overrides: + ceph: + log-whitelist: + - but it is still running + - objects unfound and apparently lost + - osd_map_cache_size + conf: + mon: + mon min osdmap epochs: 2 + osd: + osd map cache size: 1 + osd scrub min interval: 60 + osd scrub max interval: 120 +tasks: +- thrashosds: + timeout: 1800 + chance_pgnum_grow: 1 + chance_pgnum_shrink: 1 + chance_pgpnum_fix: 1 + chance_test_map_discontinuity: 0.5 + min_in: 8 diff --git a/qa/suites/rados/thrash-erasure-code-big/thrashers/morepggrow.yaml b/qa/suites/rados/thrash-erasure-code-big/thrashers/morepggrow.yaml new file mode 100644 index 00000000..572832d8 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-big/thrashers/morepggrow.yaml @@ -0,0 +1,16 @@ +overrides: + ceph: + conf: + osd: + osd scrub min interval: 60 + osd scrub max interval: 120 + osd max backfills: 9 + log-whitelist: + - but it is still running + - objects unfound and apparently lost +tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 3 + chance_pgpnum_fix: 1 + min_in: 8 diff --git a/qa/suites/rados/thrash-erasure-code-big/thrashers/pggrow.yaml b/qa/suites/rados/thrash-erasure-code-big/thrashers/pggrow.yaml new file mode 100644 index 00000000..148d9fe5 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-big/thrashers/pggrow.yaml @@ -0,0 +1,15 @@ +overrides: + ceph: + log-whitelist: + - but it is still running + - objects unfound and apparently lost + conf: + osd: + osd scrub min interval: 60 + osd scrub max interval: 120 +tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 2 + chance_pgpnum_fix: 1 + min_in: 8 diff --git a/qa/suites/rados/thrash-erasure-code-big/thrashosds-health.yaml b/qa/suites/rados/thrash-erasure-code-big/thrashosds-health.yaml new file mode 120000 index 00000000..9124eb1a --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-big/thrashosds-health.yaml @@ -0,0 +1 @@ +.qa/tasks/thrashosds-health.yaml \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code-big/workloads/.qa b/qa/suites/rados/thrash-erasure-code-big/workloads/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-big/workloads/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code-big/workloads/ec-rados-plugin=jerasure-k=4-m=2.yaml b/qa/suites/rados/thrash-erasure-code-big/workloads/ec-rados-plugin=jerasure-k=4-m=2.yaml new file mode 120000 index 00000000..c18bec16 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-big/workloads/ec-rados-plugin=jerasure-k=4-m=2.yaml @@ -0,0 +1 @@ +.qa/erasure-code/ec-rados-plugin=jerasure-k=4-m=2.yaml \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code-big/workloads/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml b/qa/suites/rados/thrash-erasure-code-big/workloads/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml new file mode 120000 index 00000000..d66fd796 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-big/workloads/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml @@ -0,0 +1 @@ +.qa/erasure-code/ec-rados-plugin=lrc-k=4-m=2-l=3.yaml \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code-isa/% b/qa/suites/rados/thrash-erasure-code-isa/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rados/thrash-erasure-code-isa/.qa b/qa/suites/rados/thrash-erasure-code-isa/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-isa/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code-isa/arch/.qa b/qa/suites/rados/thrash-erasure-code-isa/arch/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-isa/arch/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code-isa/arch/x86_64.yaml b/qa/suites/rados/thrash-erasure-code-isa/arch/x86_64.yaml new file mode 100644 index 00000000..c2409f5d --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-isa/arch/x86_64.yaml @@ -0,0 +1 @@ +arch: x86_64 diff --git a/qa/suites/rados/thrash-erasure-code-isa/ceph.yaml b/qa/suites/rados/thrash-erasure-code-isa/ceph.yaml new file mode 120000 index 00000000..a2fd139c --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-isa/ceph.yaml @@ -0,0 +1 @@ +../thrash/ceph.yaml \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code-isa/clusters b/qa/suites/rados/thrash-erasure-code-isa/clusters new file mode 120000 index 00000000..7aac47be --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-isa/clusters @@ -0,0 +1 @@ +../thrash/clusters \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code-isa/msgr-failures b/qa/suites/rados/thrash-erasure-code-isa/msgr-failures new file mode 120000 index 00000000..03689aa4 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-isa/msgr-failures @@ -0,0 +1 @@ +../thrash/msgr-failures \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code-isa/objectstore b/qa/suites/rados/thrash-erasure-code-isa/objectstore new file mode 120000 index 00000000..c40bd326 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-isa/objectstore @@ -0,0 +1 @@ +.qa/objectstore \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code-isa/rados.yaml b/qa/suites/rados/thrash-erasure-code-isa/rados.yaml new file mode 120000 index 00000000..d256979c --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-isa/rados.yaml @@ -0,0 +1 @@ +.qa/config/rados.yaml \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code-isa/recovery-overrides b/qa/suites/rados/thrash-erasure-code-isa/recovery-overrides new file mode 120000 index 00000000..1957f2c4 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-isa/recovery-overrides @@ -0,0 +1 @@ +../thrash/2-recovery-overrides \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code-isa/supported-random-distro$ b/qa/suites/rados/thrash-erasure-code-isa/supported-random-distro$ new file mode 120000 index 00000000..7cef21ee --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-isa/supported-random-distro$ @@ -0,0 +1 @@ +../basic/supported-random-distro$ \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code-isa/thrashers b/qa/suites/rados/thrash-erasure-code-isa/thrashers new file mode 120000 index 00000000..f461dadc --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-isa/thrashers @@ -0,0 +1 @@ +../thrash/thrashers \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code-isa/thrashosds-health.yaml b/qa/suites/rados/thrash-erasure-code-isa/thrashosds-health.yaml new file mode 120000 index 00000000..9124eb1a --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-isa/thrashosds-health.yaml @@ -0,0 +1 @@ +.qa/tasks/thrashosds-health.yaml \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code-isa/workloads/.qa b/qa/suites/rados/thrash-erasure-code-isa/workloads/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-isa/workloads/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code-isa/workloads/ec-rados-plugin=isa-k=2-m=1.yaml b/qa/suites/rados/thrash-erasure-code-isa/workloads/ec-rados-plugin=isa-k=2-m=1.yaml new file mode 120000 index 00000000..19342b9d --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-isa/workloads/ec-rados-plugin=isa-k=2-m=1.yaml @@ -0,0 +1 @@ +.qa/erasure-code/ec-rados-plugin=isa-k=2-m=1.yaml \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code-overwrites/% b/qa/suites/rados/thrash-erasure-code-overwrites/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rados/thrash-erasure-code-overwrites/.qa b/qa/suites/rados/thrash-erasure-code-overwrites/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-overwrites/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code-overwrites/bluestore-bitmap.yaml b/qa/suites/rados/thrash-erasure-code-overwrites/bluestore-bitmap.yaml new file mode 120000 index 00000000..635085f7 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-overwrites/bluestore-bitmap.yaml @@ -0,0 +1 @@ +../thrash-erasure-code/objectstore/bluestore-bitmap.yaml \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code-overwrites/ceph.yaml b/qa/suites/rados/thrash-erasure-code-overwrites/ceph.yaml new file mode 120000 index 00000000..a2fd139c --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-overwrites/ceph.yaml @@ -0,0 +1 @@ +../thrash/ceph.yaml \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code-overwrites/clusters b/qa/suites/rados/thrash-erasure-code-overwrites/clusters new file mode 120000 index 00000000..646ea04c --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-overwrites/clusters @@ -0,0 +1 @@ +../thrash-erasure-code/clusters \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code-overwrites/fast b/qa/suites/rados/thrash-erasure-code-overwrites/fast new file mode 120000 index 00000000..6170b30e --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-overwrites/fast @@ -0,0 +1 @@ +../thrash-erasure-code/fast \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code-overwrites/msgr-failures b/qa/suites/rados/thrash-erasure-code-overwrites/msgr-failures new file mode 120000 index 00000000..70c9ca13 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-overwrites/msgr-failures @@ -0,0 +1 @@ +../thrash-erasure-code/msgr-failures \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code-overwrites/rados.yaml b/qa/suites/rados/thrash-erasure-code-overwrites/rados.yaml new file mode 120000 index 00000000..017df6f6 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-overwrites/rados.yaml @@ -0,0 +1 @@ +../thrash-erasure-code/rados.yaml \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code-overwrites/recovery-overrides b/qa/suites/rados/thrash-erasure-code-overwrites/recovery-overrides new file mode 120000 index 00000000..1957f2c4 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-overwrites/recovery-overrides @@ -0,0 +1 @@ +../thrash/2-recovery-overrides \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code-overwrites/supported-random-distro$ b/qa/suites/rados/thrash-erasure-code-overwrites/supported-random-distro$ new file mode 120000 index 00000000..7cef21ee --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-overwrites/supported-random-distro$ @@ -0,0 +1 @@ +../basic/supported-random-distro$ \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code-overwrites/thrashers b/qa/suites/rados/thrash-erasure-code-overwrites/thrashers new file mode 120000 index 00000000..40ff82cf --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-overwrites/thrashers @@ -0,0 +1 @@ +../thrash-erasure-code/thrashers \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code-overwrites/thrashosds-health.yaml b/qa/suites/rados/thrash-erasure-code-overwrites/thrashosds-health.yaml new file mode 120000 index 00000000..9124eb1a --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-overwrites/thrashosds-health.yaml @@ -0,0 +1 @@ +.qa/tasks/thrashosds-health.yaml \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code-overwrites/workloads/.qa b/qa/suites/rados/thrash-erasure-code-overwrites/workloads/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-overwrites/workloads/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-pool-snaps-few-objects-overwrites.yaml b/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-pool-snaps-few-objects-overwrites.yaml new file mode 100644 index 00000000..d2ad70a5 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-pool-snaps-few-objects-overwrites.yaml @@ -0,0 +1,23 @@ +overrides: + ceph: + conf: + global: + enable experimental unrecoverable data corrupting features: '*' + thrashosds: + disable_objectstore_tool_tests: true +tasks: +- rados: + clients: [client.0] + ops: 4000 + objects: 50 + pool_snaps: true + ec_pool: true + erasure_code_use_overwrites: true + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 diff --git a/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-small-objects-fast-read-overwrites.yaml b/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-small-objects-fast-read-overwrites.yaml new file mode 100644 index 00000000..b3f831b7 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-small-objects-fast-read-overwrites.yaml @@ -0,0 +1,29 @@ +overrides: + ceph: + conf: + global: + enable experimental unrecoverable data corrupting features: '*' + thrashosds: + disable_objectstore_tool_tests: true +tasks: +- rados: + clients: [client.0] + ops: 400000 + max_seconds: 600 + max_in_flight: 64 + objects: 1024 + size: 16384 + ec_pool: true + erasure_code_use_overwrites: true + fast_read: true + op_weights: + read: 100 + write: 100 + append: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 + setattr: 25 + rmattr: 25 diff --git a/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-small-objects-overwrites.yaml b/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-small-objects-overwrites.yaml new file mode 100644 index 00000000..9baacef4 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-small-objects-overwrites.yaml @@ -0,0 +1,28 @@ +overrides: + ceph: + conf: + global: + enable experimental unrecoverable data corrupting features: '*' + thrashosds: + disable_objectstore_tool_tests: true +tasks: +- rados: + clients: [client.0] + ops: 400000 + max_seconds: 600 + max_in_flight: 64 + objects: 1024 + size: 16384 + ec_pool: true + erasure_code_use_overwrites: true + op_weights: + read: 100 + write: 100 + append: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 + setattr: 25 + rmattr: 25 diff --git a/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-snaps-few-objects-overwrites.yaml b/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-snaps-few-objects-overwrites.yaml new file mode 100644 index 00000000..b7c53819 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-overwrites/workloads/ec-snaps-few-objects-overwrites.yaml @@ -0,0 +1,22 @@ +overrides: + ceph: + conf: + global: + enable experimental unrecoverable data corrupting features: '*' + thrashosds: + disable_objectstore_tool_tests: true +tasks: +- rados: + clients: [client.0] + ops: 4000 + objects: 50 + ec_pool: true + erasure_code_use_overwrites: true + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 diff --git a/qa/suites/rados/thrash-erasure-code-shec/% b/qa/suites/rados/thrash-erasure-code-shec/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rados/thrash-erasure-code-shec/.qa b/qa/suites/rados/thrash-erasure-code-shec/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-shec/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code-shec/ceph.yaml b/qa/suites/rados/thrash-erasure-code-shec/ceph.yaml new file mode 120000 index 00000000..a2fd139c --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-shec/ceph.yaml @@ -0,0 +1 @@ +../thrash/ceph.yaml \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code-shec/clusters/+ b/qa/suites/rados/thrash-erasure-code-shec/clusters/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rados/thrash-erasure-code-shec/clusters/.qa b/qa/suites/rados/thrash-erasure-code-shec/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-shec/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code-shec/clusters/fixed-4.yaml b/qa/suites/rados/thrash-erasure-code-shec/clusters/fixed-4.yaml new file mode 120000 index 00000000..aa883007 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-shec/clusters/fixed-4.yaml @@ -0,0 +1 @@ +.qa/clusters/fixed-4.yaml \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code-shec/clusters/openstack.yaml b/qa/suites/rados/thrash-erasure-code-shec/clusters/openstack.yaml new file mode 100644 index 00000000..e559d912 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-shec/clusters/openstack.yaml @@ -0,0 +1,4 @@ +openstack: + - volumes: # attached to each instance + count: 4 + size: 10 # GB diff --git a/qa/suites/rados/thrash-erasure-code-shec/msgr-failures b/qa/suites/rados/thrash-erasure-code-shec/msgr-failures new file mode 120000 index 00000000..03689aa4 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-shec/msgr-failures @@ -0,0 +1 @@ +../thrash/msgr-failures \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code-shec/objectstore b/qa/suites/rados/thrash-erasure-code-shec/objectstore new file mode 120000 index 00000000..c40bd326 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-shec/objectstore @@ -0,0 +1 @@ +.qa/objectstore \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code-shec/rados.yaml b/qa/suites/rados/thrash-erasure-code-shec/rados.yaml new file mode 120000 index 00000000..d256979c --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-shec/rados.yaml @@ -0,0 +1 @@ +.qa/config/rados.yaml \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code-shec/recovery-overrides b/qa/suites/rados/thrash-erasure-code-shec/recovery-overrides new file mode 120000 index 00000000..1957f2c4 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-shec/recovery-overrides @@ -0,0 +1 @@ +../thrash/2-recovery-overrides \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code-shec/supported-random-distro$ b/qa/suites/rados/thrash-erasure-code-shec/supported-random-distro$ new file mode 120000 index 00000000..7cef21ee --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-shec/supported-random-distro$ @@ -0,0 +1 @@ +../basic/supported-random-distro$ \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code-shec/thrashers/.qa b/qa/suites/rados/thrash-erasure-code-shec/thrashers/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-shec/thrashers/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code-shec/thrashers/careful.yaml b/qa/suites/rados/thrash-erasure-code-shec/thrashers/careful.yaml new file mode 100644 index 00000000..6f2f7a44 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-shec/thrashers/careful.yaml @@ -0,0 +1,20 @@ +overrides: + ceph: + log-whitelist: + - but it is still running + - objects unfound and apparently lost + - slow request + conf: + osd: + osd debug reject backfill probability: .3 + osd scrub min interval: 60 + osd scrub max interval: 120 + osd max backfills: 3 +tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgnum_shrink: 1 + chance_pgpnum_fix: 1 + min_in: 8 + aggressive_pg_num_changes: false diff --git a/qa/suites/rados/thrash-erasure-code-shec/thrashers/default.yaml b/qa/suites/rados/thrash-erasure-code-shec/thrashers/default.yaml new file mode 100644 index 00000000..a438f43f --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-shec/thrashers/default.yaml @@ -0,0 +1,19 @@ +overrides: + ceph: + log-whitelist: + - but it is still running + - objects unfound and apparently lost + - slow request + conf: + osd: + osd debug reject backfill probability: .1 + osd scrub min interval: 60 + osd scrub max interval: 120 + osd max backfills: 3 +tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgnum_shrink: 1 + chance_pgpnum_fix: 1 + min_in: 8 diff --git a/qa/suites/rados/thrash-erasure-code-shec/thrashosds-health.yaml b/qa/suites/rados/thrash-erasure-code-shec/thrashosds-health.yaml new file mode 120000 index 00000000..9124eb1a --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-shec/thrashosds-health.yaml @@ -0,0 +1 @@ +.qa/tasks/thrashosds-health.yaml \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code-shec/workloads/.qa b/qa/suites/rados/thrash-erasure-code-shec/workloads/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-shec/workloads/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code-shec/workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml b/qa/suites/rados/thrash-erasure-code-shec/workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml new file mode 120000 index 00000000..8f318cc3 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code-shec/workloads/ec-rados-plugin=shec-k=4-m=3-c=2.yaml @@ -0,0 +1 @@ +.qa/erasure-code/ec-rados-plugin=shec-k=4-m=3-c=2.yaml \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code/% b/qa/suites/rados/thrash-erasure-code/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rados/thrash-erasure-code/.qa b/qa/suites/rados/thrash-erasure-code/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code/ceph.yaml b/qa/suites/rados/thrash-erasure-code/ceph.yaml new file mode 100644 index 00000000..2030acb9 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code/ceph.yaml @@ -0,0 +1,3 @@ +tasks: +- install: +- ceph: diff --git a/qa/suites/rados/thrash-erasure-code/clusters b/qa/suites/rados/thrash-erasure-code/clusters new file mode 120000 index 00000000..7aac47be --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code/clusters @@ -0,0 +1 @@ +../thrash/clusters \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code/fast/.qa b/qa/suites/rados/thrash-erasure-code/fast/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code/fast/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code/fast/fast.yaml b/qa/suites/rados/thrash-erasure-code/fast/fast.yaml new file mode 100644 index 00000000..8ebfee0a --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code/fast/fast.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + global: + osd pool default ec fast read: true diff --git a/qa/suites/rados/thrash-erasure-code/fast/normal.yaml b/qa/suites/rados/thrash-erasure-code/fast/normal.yaml new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rados/thrash-erasure-code/msgr-failures b/qa/suites/rados/thrash-erasure-code/msgr-failures new file mode 120000 index 00000000..03689aa4 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code/msgr-failures @@ -0,0 +1 @@ +../thrash/msgr-failures \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code/objectstore b/qa/suites/rados/thrash-erasure-code/objectstore new file mode 120000 index 00000000..c40bd326 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code/objectstore @@ -0,0 +1 @@ +.qa/objectstore \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code/rados.yaml b/qa/suites/rados/thrash-erasure-code/rados.yaml new file mode 120000 index 00000000..d256979c --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code/rados.yaml @@ -0,0 +1 @@ +.qa/config/rados.yaml \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code/recovery-overrides b/qa/suites/rados/thrash-erasure-code/recovery-overrides new file mode 120000 index 00000000..1957f2c4 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code/recovery-overrides @@ -0,0 +1 @@ +../thrash/2-recovery-overrides \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code/supported-random-distro$ b/qa/suites/rados/thrash-erasure-code/supported-random-distro$ new file mode 120000 index 00000000..7cef21ee --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code/supported-random-distro$ @@ -0,0 +1 @@ +../basic/supported-random-distro$ \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code/thrashers/.qa b/qa/suites/rados/thrash-erasure-code/thrashers/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code/thrashers/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code/thrashers/careful.yaml b/qa/suites/rados/thrash-erasure-code/thrashers/careful.yaml new file mode 100644 index 00000000..018267f0 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code/thrashers/careful.yaml @@ -0,0 +1,19 @@ +overrides: + ceph: + log-whitelist: + - but it is still running + - objects unfound and apparently lost + conf: + osd: + osd debug reject backfill probability: .3 + osd scrub min interval: 60 + osd scrub max interval: 120 + osd max backfills: 2 +tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgnum_shrink: 1 + chance_pgpnum_fix: 1 + min_in: 4 + aggressive_pg_num_changes: false diff --git a/qa/suites/rados/thrash-erasure-code/thrashers/default.yaml b/qa/suites/rados/thrash-erasure-code/thrashers/default.yaml new file mode 100644 index 00000000..31c19704 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code/thrashers/default.yaml @@ -0,0 +1,18 @@ +overrides: + ceph: + log-whitelist: + - but it is still running + - objects unfound and apparently lost + conf: + osd: + osd debug reject backfill probability: .1 + osd scrub min interval: 60 + osd scrub max interval: 120 + osd max backfills: 2 +tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgnum_shrink: 1 + chance_pgpnum_fix: 1 + min_in: 4 diff --git a/qa/suites/rados/thrash-erasure-code/thrashers/fastread.yaml b/qa/suites/rados/thrash-erasure-code/thrashers/fastread.yaml new file mode 100644 index 00000000..4701fae5 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code/thrashers/fastread.yaml @@ -0,0 +1,20 @@ +overrides: + ceph: + log-whitelist: + - but it is still running + - objects unfound and apparently lost + conf: + mon: + osd pool default ec fast read: true + osd: + osd debug reject backfill probability: .1 + osd scrub min interval: 60 + osd scrub max interval: 120 + osd max backfills: 3 +tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgnum_shrink: 1 + chance_pgpnum_fix: 1 + min_in: 4 diff --git a/qa/suites/rados/thrash-erasure-code/thrashers/morepggrow.yaml b/qa/suites/rados/thrash-erasure-code/thrashers/morepggrow.yaml new file mode 100644 index 00000000..12c11fa3 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code/thrashers/morepggrow.yaml @@ -0,0 +1,16 @@ +overrides: + ceph: + conf: + osd: + osd scrub min interval: 60 + osd scrub max interval: 120 + osd max backfills: 9 + log-whitelist: + - but it is still running + - objects unfound and apparently lost +tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 3 + chance_pgpnum_fix: 1 + min_in: 4 diff --git a/qa/suites/rados/thrash-erasure-code/thrashers/pggrow.yaml b/qa/suites/rados/thrash-erasure-code/thrashers/pggrow.yaml new file mode 100644 index 00000000..2bbe5e5f --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code/thrashers/pggrow.yaml @@ -0,0 +1,16 @@ +overrides: + ceph: + log-whitelist: + - but it is still running + - objects unfound and apparently lost + conf: + osd: + osd scrub min interval: 60 + osd scrub max interval: 120 + osd max backfills: 4 +tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 2 + chance_pgpnum_fix: 1 + min_in: 4 diff --git a/qa/suites/rados/thrash-erasure-code/thrashosds-health.yaml b/qa/suites/rados/thrash-erasure-code/thrashosds-health.yaml new file mode 120000 index 00000000..9124eb1a --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code/thrashosds-health.yaml @@ -0,0 +1 @@ +.qa/tasks/thrashosds-health.yaml \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code/workloads/.qa b/qa/suites/rados/thrash-erasure-code/workloads/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code/workloads/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code/workloads/ec-rados-plugin=clay-k=4-m=2.yaml b/qa/suites/rados/thrash-erasure-code/workloads/ec-rados-plugin=clay-k=4-m=2.yaml new file mode 120000 index 00000000..08155ed6 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code/workloads/ec-rados-plugin=clay-k=4-m=2.yaml @@ -0,0 +1 @@ +.qa/erasure-code/ec-rados-plugin=clay-k=4-m=2.yaml \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code/workloads/ec-rados-plugin=jerasure-k=2-m=1.yaml b/qa/suites/rados/thrash-erasure-code/workloads/ec-rados-plugin=jerasure-k=2-m=1.yaml new file mode 120000 index 00000000..af6d8042 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code/workloads/ec-rados-plugin=jerasure-k=2-m=1.yaml @@ -0,0 +1 @@ +.qa/erasure-code/ec-rados-plugin=jerasure-k=2-m=1.yaml \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code/workloads/ec-rados-plugin=jerasure-k=3-m=1.yaml b/qa/suites/rados/thrash-erasure-code/workloads/ec-rados-plugin=jerasure-k=3-m=1.yaml new file mode 120000 index 00000000..cdf55199 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code/workloads/ec-rados-plugin=jerasure-k=3-m=1.yaml @@ -0,0 +1 @@ +.qa/erasure-code/ec-rados-plugin=jerasure-k=3-m=1.yaml \ No newline at end of file diff --git a/qa/suites/rados/thrash-erasure-code/workloads/ec-radosbench.yaml b/qa/suites/rados/thrash-erasure-code/workloads/ec-radosbench.yaml new file mode 100644 index 00000000..3c2ff7af --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code/workloads/ec-radosbench.yaml @@ -0,0 +1,27 @@ +tasks: +- full_sequential: + - radosbench: + clients: [client.0] + time: 150 + unique_pool: true + ec_pool: true + - radosbench: + clients: [client.0] + time: 150 + unique_pool: true + ec_pool: true + - radosbench: + clients: [client.0] + time: 150 + unique_pool: true + ec_pool: true + - radosbench: + clients: [client.0] + time: 150 + unique_pool: true + ec_pool: true + - radosbench: + clients: [client.0] + time: 150 + unique_pool: true + ec_pool: true diff --git a/qa/suites/rados/thrash-erasure-code/workloads/ec-small-objects-fast-read.yaml b/qa/suites/rados/thrash-erasure-code/workloads/ec-small-objects-fast-read.yaml new file mode 100644 index 00000000..e732ec6f --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code/workloads/ec-small-objects-fast-read.yaml @@ -0,0 +1,21 @@ +tasks: +- rados: + clients: [client.0] + ops: 400000 + max_seconds: 600 + max_in_flight: 64 + objects: 1024 + size: 16384 + ec_pool: true + fast_read: true + op_weights: + read: 100 + write: 0 + append: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 + setattr: 25 + rmattr: 25 diff --git a/qa/suites/rados/thrash-erasure-code/workloads/ec-small-objects-many-deletes.yaml b/qa/suites/rados/thrash-erasure-code/workloads/ec-small-objects-many-deletes.yaml new file mode 100644 index 00000000..25b38e14 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code/workloads/ec-small-objects-many-deletes.yaml @@ -0,0 +1,14 @@ +tasks: +- rados: + clients: [client.0] + ops: 400000 + max_seconds: 600 + max_in_flight: 8 + objects: 20 + size: 16384 + ec_pool: true + op_weights: + write: 0 + read: 0 + append: 10 + delete: 20 diff --git a/qa/suites/rados/thrash-erasure-code/workloads/ec-small-objects.yaml b/qa/suites/rados/thrash-erasure-code/workloads/ec-small-objects.yaml new file mode 100644 index 00000000..a8ac3971 --- /dev/null +++ b/qa/suites/rados/thrash-erasure-code/workloads/ec-small-objects.yaml @@ -0,0 +1,20 @@ +tasks: +- rados: + clients: [client.0] + ops: 400000 + max_seconds: 600 + max_in_flight: 64 + objects: 1024 + size: 16384 + ec_pool: true + op_weights: + read: 100 + write: 0 + append: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 + setattr: 25 + rmattr: 25 diff --git a/qa/suites/rados/thrash-old-clients/% b/qa/suites/rados/thrash-old-clients/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rados/thrash-old-clients/.qa b/qa/suites/rados/thrash-old-clients/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/thrash-old-clients/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/thrash-old-clients/0-size-min-size-overrides/.qa b/qa/suites/rados/thrash-old-clients/0-size-min-size-overrides/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/thrash-old-clients/0-size-min-size-overrides/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/thrash-old-clients/0-size-min-size-overrides/2-size-2-min-size.yaml b/qa/suites/rados/thrash-old-clients/0-size-min-size-overrides/2-size-2-min-size.yaml new file mode 120000 index 00000000..5393a755 --- /dev/null +++ b/qa/suites/rados/thrash-old-clients/0-size-min-size-overrides/2-size-2-min-size.yaml @@ -0,0 +1 @@ +.qa/overrides/2-size-2-min-size.yaml \ No newline at end of file diff --git a/qa/suites/rados/thrash-old-clients/0-size-min-size-overrides/3-size-2-min-size.yaml b/qa/suites/rados/thrash-old-clients/0-size-min-size-overrides/3-size-2-min-size.yaml new file mode 120000 index 00000000..5ff70ead --- /dev/null +++ b/qa/suites/rados/thrash-old-clients/0-size-min-size-overrides/3-size-2-min-size.yaml @@ -0,0 +1 @@ +.qa/overrides/3-size-2-min-size.yaml \ No newline at end of file diff --git a/qa/suites/rados/thrash-old-clients/1-install/.qa b/qa/suites/rados/thrash-old-clients/1-install/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/thrash-old-clients/1-install/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/thrash-old-clients/1-install/hammer.yaml b/qa/suites/rados/thrash-old-clients/1-install/hammer.yaml new file mode 100644 index 00000000..ed620c68 --- /dev/null +++ b/qa/suites/rados/thrash-old-clients/1-install/hammer.yaml @@ -0,0 +1,29 @@ +overrides: + ceph: + crush_tunables: hammer + conf: + mon: + mon osd initial require min compat client: hammer + client: + ms type: simple +tasks: +- install: + branch: hammer + downgrade_packages: ['librbd1', 'librados2'] + exclude_packages: + - librados3 + - ceph-mgr-dashboard + - ceph-mgr-diskprediction-local + - ceph-mgr-diskprediction-cloud + - ceph-mgr-rook + - ceph-mgr-ssh + - ceph-mgr + - libcephfs2 + - libcephfs-devel + - libcephfs-dev + - libradospp-devel + extra_packages: ['librados2'] +- install.upgrade: + mon.a: + mon.b: + mon.c: diff --git a/qa/suites/rados/thrash-old-clients/1-install/jewel.yaml b/qa/suites/rados/thrash-old-clients/1-install/jewel.yaml new file mode 100644 index 00000000..eae5ffc2 --- /dev/null +++ b/qa/suites/rados/thrash-old-clients/1-install/jewel.yaml @@ -0,0 +1,19 @@ +tasks: +- install: + branch: jewel + exclude_packages: + - librados3 + - ceph-mgr-dashboard + - ceph-mgr-diskprediction-local + - ceph-mgr-diskprediction-cloud + - ceph-mgr-rook + - ceph-mgr-ssh + - ceph-mgr + - libcephfs2 + - libcephfs-devel + - libcephfs-dev + extra_packages: ['librados2'] +- install.upgrade: + mon.a: + mon.b: + mon.c: diff --git a/qa/suites/rados/thrash-old-clients/1-install/luminous.yaml b/qa/suites/rados/thrash-old-clients/1-install/luminous.yaml new file mode 100644 index 00000000..eb9a3a2e --- /dev/null +++ b/qa/suites/rados/thrash-old-clients/1-install/luminous.yaml @@ -0,0 +1,15 @@ +tasks: +- install: + branch: luminous + exclude_packages: + - librados3 + - ceph-mgr-dashboard + - ceph-mgr-diskprediction-local + - ceph-mgr-diskprediction-cloud + - ceph-mgr-rook + - ceph-mgr-ssh + extra_packages: ['librados2'] +- install.upgrade: + mon.a: + mon.b: + mon.c: diff --git a/qa/suites/rados/thrash-old-clients/backoff/.qa b/qa/suites/rados/thrash-old-clients/backoff/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/thrash-old-clients/backoff/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/thrash-old-clients/backoff/normal.yaml b/qa/suites/rados/thrash-old-clients/backoff/normal.yaml new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rados/thrash-old-clients/backoff/peering.yaml b/qa/suites/rados/thrash-old-clients/backoff/peering.yaml new file mode 100644 index 00000000..66d06117 --- /dev/null +++ b/qa/suites/rados/thrash-old-clients/backoff/peering.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + osd: + osd backoff on peering: true diff --git a/qa/suites/rados/thrash-old-clients/backoff/peering_and_degraded.yaml b/qa/suites/rados/thrash-old-clients/backoff/peering_and_degraded.yaml new file mode 100644 index 00000000..e6109906 --- /dev/null +++ b/qa/suites/rados/thrash-old-clients/backoff/peering_and_degraded.yaml @@ -0,0 +1,6 @@ +overrides: + ceph: + conf: + osd: + osd backoff on peering: true + osd backoff on degraded: true diff --git a/qa/suites/rados/thrash-old-clients/ceph.yaml b/qa/suites/rados/thrash-old-clients/ceph.yaml new file mode 100644 index 00000000..364f9d03 --- /dev/null +++ b/qa/suites/rados/thrash-old-clients/ceph.yaml @@ -0,0 +1,7 @@ +tasks: +- ceph: + mon_bind_addrvec: false + mon_bind_msgr2: false + conf: + global: + ms bind msgr2: false diff --git a/qa/suites/rados/thrash-old-clients/clusters/+ b/qa/suites/rados/thrash-old-clients/clusters/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rados/thrash-old-clients/clusters/.qa b/qa/suites/rados/thrash-old-clients/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/thrash-old-clients/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/thrash-old-clients/clusters/openstack.yaml b/qa/suites/rados/thrash-old-clients/clusters/openstack.yaml new file mode 100644 index 00000000..b0f3b9b4 --- /dev/null +++ b/qa/suites/rados/thrash-old-clients/clusters/openstack.yaml @@ -0,0 +1,4 @@ +openstack: + - volumes: # attached to each instance + count: 4 + size: 30 # GB diff --git a/qa/suites/rados/thrash-old-clients/clusters/three-plus-one.yaml b/qa/suites/rados/thrash-old-clients/clusters/three-plus-one.yaml new file mode 100644 index 00000000..35cfc3c1 --- /dev/null +++ b/qa/suites/rados/thrash-old-clients/clusters/three-plus-one.yaml @@ -0,0 +1,14 @@ +roles: +- [mon.a, mgr.y, osd.0, osd.1, osd.2, osd.3, client.0] +- [mon.b, mgr.x, osd.4, osd.5, osd.6, osd.7, client.1] +- [mon.c, osd.8, osd.9, osd.10, osd.11] +- [client.2] +openstack: +- volumes: # attached to each instance + count: 4 + size: 10 # GB +overrides: + ceph: + conf: + osd: + osd shutdown pgref assert: true diff --git a/qa/suites/rados/thrash-old-clients/d-balancer/.qa b/qa/suites/rados/thrash-old-clients/d-balancer/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/thrash-old-clients/d-balancer/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/thrash-old-clients/d-balancer/crush-compat.yaml b/qa/suites/rados/thrash-old-clients/d-balancer/crush-compat.yaml new file mode 100644 index 00000000..aa867660 --- /dev/null +++ b/qa/suites/rados/thrash-old-clients/d-balancer/crush-compat.yaml @@ -0,0 +1,6 @@ +tasks: +- exec: + mon.a: + - while ! ceph balancer status ; do sleep 1 ; done + - ceph balancer mode crush-compat + - ceph balancer on diff --git a/qa/suites/rados/thrash-old-clients/d-balancer/off.yaml b/qa/suites/rados/thrash-old-clients/d-balancer/off.yaml new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rados/thrash-old-clients/distro$/.qa b/qa/suites/rados/thrash-old-clients/distro$/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/thrash-old-clients/distro$/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/thrash-old-clients/distro$/centos_latest.yaml b/qa/suites/rados/thrash-old-clients/distro$/centos_latest.yaml new file mode 120000 index 00000000..bd9854e7 --- /dev/null +++ b/qa/suites/rados/thrash-old-clients/distro$/centos_latest.yaml @@ -0,0 +1 @@ +.qa/distros/supported/centos_latest.yaml \ No newline at end of file diff --git a/qa/suites/rados/thrash-old-clients/distro$/ubuntu_16.04.yaml b/qa/suites/rados/thrash-old-clients/distro$/ubuntu_16.04.yaml new file mode 120000 index 00000000..053d801e --- /dev/null +++ b/qa/suites/rados/thrash-old-clients/distro$/ubuntu_16.04.yaml @@ -0,0 +1 @@ +.qa/distros/all/ubuntu_16.04.yaml \ No newline at end of file diff --git a/qa/suites/rados/thrash-old-clients/msgr-failures/.qa b/qa/suites/rados/thrash-old-clients/msgr-failures/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/thrash-old-clients/msgr-failures/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/thrash-old-clients/msgr-failures/fastclose.yaml b/qa/suites/rados/thrash-old-clients/msgr-failures/fastclose.yaml new file mode 100644 index 00000000..02121726 --- /dev/null +++ b/qa/suites/rados/thrash-old-clients/msgr-failures/fastclose.yaml @@ -0,0 +1,8 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 2500 + ms tcp read timeout: 5 + log-whitelist: + - \(OSD_SLOW_PING_TIME diff --git a/qa/suites/rados/thrash-old-clients/msgr-failures/few.yaml b/qa/suites/rados/thrash-old-clients/msgr-failures/few.yaml new file mode 100644 index 00000000..527eadb4 --- /dev/null +++ b/qa/suites/rados/thrash-old-clients/msgr-failures/few.yaml @@ -0,0 +1,9 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 5000 + osd: + osd heartbeat use min delay socket: true + log-whitelist: + - \(OSD_SLOW_PING_TIME diff --git a/qa/suites/rados/thrash-old-clients/msgr-failures/osd-delay.yaml b/qa/suites/rados/thrash-old-clients/msgr-failures/osd-delay.yaml new file mode 100644 index 00000000..91c14725 --- /dev/null +++ b/qa/suites/rados/thrash-old-clients/msgr-failures/osd-delay.yaml @@ -0,0 +1,11 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 2500 + ms inject delay type: osd + ms inject delay probability: .005 + ms inject delay max: 1 + ms inject internal delays: .002 + log-whitelist: + - \(OSD_SLOW_PING_TIME diff --git a/qa/suites/rados/thrash-old-clients/msgr/.qa b/qa/suites/rados/thrash-old-clients/msgr/.qa new file mode 120000 index 00000000..fea2489f --- /dev/null +++ b/qa/suites/rados/thrash-old-clients/msgr/.qa @@ -0,0 +1 @@ +../.qa \ No newline at end of file diff --git a/qa/suites/rados/thrash-old-clients/msgr/async-v1only.yaml b/qa/suites/rados/thrash-old-clients/msgr/async-v1only.yaml new file mode 120000 index 00000000..9673dbb0 --- /dev/null +++ b/qa/suites/rados/thrash-old-clients/msgr/async-v1only.yaml @@ -0,0 +1 @@ +.qa/msgr/async-v1only.yaml \ No newline at end of file diff --git a/qa/suites/rados/thrash-old-clients/msgr/async.yaml b/qa/suites/rados/thrash-old-clients/msgr/async.yaml new file mode 120000 index 00000000..b7f05e4c --- /dev/null +++ b/qa/suites/rados/thrash-old-clients/msgr/async.yaml @@ -0,0 +1 @@ +.qa/msgr/async.yaml \ No newline at end of file diff --git a/qa/suites/rados/thrash-old-clients/msgr/random.yaml b/qa/suites/rados/thrash-old-clients/msgr/random.yaml new file mode 120000 index 00000000..e0dcb145 --- /dev/null +++ b/qa/suites/rados/thrash-old-clients/msgr/random.yaml @@ -0,0 +1 @@ +.qa/msgr/random.yaml \ No newline at end of file diff --git a/qa/suites/rados/thrash-old-clients/msgr/simple.yaml b/qa/suites/rados/thrash-old-clients/msgr/simple.yaml new file mode 120000 index 00000000..780dc0ea --- /dev/null +++ b/qa/suites/rados/thrash-old-clients/msgr/simple.yaml @@ -0,0 +1 @@ +.qa/msgr/simple.yaml \ No newline at end of file diff --git a/qa/suites/rados/thrash-old-clients/rados.yaml b/qa/suites/rados/thrash-old-clients/rados.yaml new file mode 120000 index 00000000..d256979c --- /dev/null +++ b/qa/suites/rados/thrash-old-clients/rados.yaml @@ -0,0 +1 @@ +.qa/config/rados.yaml \ No newline at end of file diff --git a/qa/suites/rados/thrash-old-clients/thrashers/.qa b/qa/suites/rados/thrash-old-clients/thrashers/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/thrash-old-clients/thrashers/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/thrash-old-clients/thrashers/careful.yaml b/qa/suites/rados/thrash-old-clients/thrashers/careful.yaml new file mode 100644 index 00000000..df77f73a --- /dev/null +++ b/qa/suites/rados/thrash-old-clients/thrashers/careful.yaml @@ -0,0 +1,25 @@ +overrides: + ceph: + log-whitelist: + - but it is still running + - objects unfound and apparently lost + conf: + osd: + osd debug reject backfill probability: .3 + osd scrub min interval: 60 + osd scrub max interval: 120 + osd max backfills: 3 + osd snap trim sleep: 2 + mon: + mon min osdmap epochs: 50 + paxos service trim min: 10 + # prune full osdmaps regularly + mon osdmap full prune min: 15 + mon osdmap full prune interval: 2 + mon osdmap full prune txsize: 2 +tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgpnum_fix: 1 + aggressive_pg_num_changes: false diff --git a/qa/suites/rados/thrash-old-clients/thrashers/default.yaml b/qa/suites/rados/thrash-old-clients/thrashers/default.yaml new file mode 100644 index 00000000..e8e2007f --- /dev/null +++ b/qa/suites/rados/thrash-old-clients/thrashers/default.yaml @@ -0,0 +1,24 @@ +overrides: + ceph: + log-whitelist: + - but it is still running + - objects unfound and apparently lost + conf: + osd: + osd debug reject backfill probability: .3 + osd scrub min interval: 60 + osd scrub max interval: 120 + osd max backfills: 3 + osd snap trim sleep: 2 + mon: + mon min osdmap epochs: 50 + paxos service trim min: 10 + # prune full osdmaps regularly + mon osdmap full prune min: 15 + mon osdmap full prune interval: 2 + mon osdmap full prune txsize: 2 +tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgpnum_fix: 1 diff --git a/qa/suites/rados/thrash-old-clients/thrashers/mapgap.yaml b/qa/suites/rados/thrash-old-clients/thrashers/mapgap.yaml new file mode 100644 index 00000000..7b55097f --- /dev/null +++ b/qa/suites/rados/thrash-old-clients/thrashers/mapgap.yaml @@ -0,0 +1,26 @@ +overrides: + ceph: + log-whitelist: + - but it is still running + - objects unfound and apparently lost + - osd_map_cache_size + conf: + mon: + mon min osdmap epochs: 50 + paxos service trim min: 10 + # prune full osdmaps regularly + mon osdmap full prune min: 15 + mon osdmap full prune interval: 2 + mon osdmap full prune txsize: 2 + osd: + osd map cache size: 1 + osd scrub min interval: 60 + osd scrub max interval: 120 + osd scrub during recovery: false + osd max backfills: 6 +tasks: +- thrashosds: + timeout: 1800 + chance_pgnum_grow: 0.25 + chance_pgpnum_fix: 0.25 + chance_test_map_discontinuity: 2 diff --git a/qa/suites/rados/thrash-old-clients/thrashers/morepggrow.yaml b/qa/suites/rados/thrash-old-clients/thrashers/morepggrow.yaml new file mode 100644 index 00000000..91d2173e --- /dev/null +++ b/qa/suites/rados/thrash-old-clients/thrashers/morepggrow.yaml @@ -0,0 +1,22 @@ +overrides: + ceph: + conf: + osd: + osd scrub min interval: 60 + osd scrub max interval: 120 + journal throttle high multiple: 2 + journal throttle max multiple: 10 + filestore queue throttle high multiple: 2 + filestore queue throttle max multiple: 10 + osd max backfills: 9 + log-whitelist: + - but it is still running + - objects unfound and apparently lost +tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 3 + chance_pgpnum_fix: 1 +openstack: +- volumes: + size: 50 diff --git a/qa/suites/rados/thrash-old-clients/thrashers/none.yaml b/qa/suites/rados/thrash-old-clients/thrashers/none.yaml new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rados/thrash-old-clients/thrashers/pggrow.yaml b/qa/suites/rados/thrash-old-clients/thrashers/pggrow.yaml new file mode 100644 index 00000000..8721fd18 --- /dev/null +++ b/qa/suites/rados/thrash-old-clients/thrashers/pggrow.yaml @@ -0,0 +1,24 @@ +overrides: + ceph: + log-whitelist: + - but it is still running + - objects unfound and apparently lost + conf: + osd: + osd scrub min interval: 60 + osd scrub max interval: 120 + filestore odsync write: true + osd max backfills: 2 + osd snap trim sleep: .5 + mon: + mon min osdmap epochs: 50 + paxos service trim min: 10 + # prune full osdmaps regularly + mon osdmap full prune min: 15 + mon osdmap full prune interval: 2 + mon osdmap full prune txsize: 2 +tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 2 + chance_pgpnum_fix: 1 diff --git a/qa/suites/rados/thrash-old-clients/thrashosds-health.yaml b/qa/suites/rados/thrash-old-clients/thrashosds-health.yaml new file mode 120000 index 00000000..9124eb1a --- /dev/null +++ b/qa/suites/rados/thrash-old-clients/thrashosds-health.yaml @@ -0,0 +1 @@ +.qa/tasks/thrashosds-health.yaml \ No newline at end of file diff --git a/qa/suites/rados/thrash-old-clients/workloads/.qa b/qa/suites/rados/thrash-old-clients/workloads/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/thrash-old-clients/workloads/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/thrash-old-clients/workloads/cache-snaps.yaml b/qa/suites/rados/thrash-old-clients/workloads/cache-snaps.yaml new file mode 100644 index 00000000..fc1f5b45 --- /dev/null +++ b/qa/suites/rados/thrash-old-clients/workloads/cache-snaps.yaml @@ -0,0 +1,34 @@ +overrides: + ceph: + log-whitelist: + - must scrub before tier agent can activate +tasks: +- exec: + client.0: + - sudo ceph osd pool create base 4 + - sudo ceph osd pool application enable base rados + - sudo ceph osd pool create cache 4 + - sudo ceph osd tier add base cache + - sudo ceph osd tier cache-mode cache writeback + - sudo ceph osd tier set-overlay base cache + - sudo ceph osd pool set cache hit_set_type bloom + - sudo ceph osd pool set cache hit_set_count 8 + - sudo ceph osd pool set cache hit_set_period 3600 + - sudo ceph osd pool set cache target_max_objects 250 + - sudo ceph osd pool set cache min_read_recency_for_promote 2 +- rados: + clients: [client.2] + pools: [base] + ops: 4000 + objects: 500 + op_weights: + read: 100 + write: 100 + delete: 50 + copy_from: 50 + cache_flush: 50 + cache_try_flush: 50 + cache_evict: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 diff --git a/qa/suites/rados/thrash-old-clients/workloads/radosbench.yaml b/qa/suites/rados/thrash-old-clients/workloads/radosbench.yaml new file mode 100644 index 00000000..d0022fef --- /dev/null +++ b/qa/suites/rados/thrash-old-clients/workloads/radosbench.yaml @@ -0,0 +1,41 @@ +overrides: + ceph: + conf: + client.2: + debug ms: 1 + debug objecter: 20 + debug rados: 20 +tasks: +- full_sequential: + - radosbench: + objectsize: 0 + clients: [client.2] + time: 90 + - radosbench: + objectsize: 0 + clients: [client.2] + time: 90 + - radosbench: + objectsize: 0 + clients: [client.2] + time: 90 + - radosbench: + objectsize: 0 + clients: [client.2] + time: 90 + - radosbench: + objectsize: 0 + clients: [client.2] + time: 90 + - radosbench: + objectsize: 0 + clients: [client.2] + time: 90 + - radosbench: + objectsize: 0 + clients: [client.2] + time: 90 + - radosbench: + objectsize: 0 + clients: [client.2] + time: 90 diff --git a/qa/suites/rados/thrash-old-clients/workloads/rbd_cls.yaml b/qa/suites/rados/thrash-old-clients/workloads/rbd_cls.yaml new file mode 100644 index 00000000..31ccad9f --- /dev/null +++ b/qa/suites/rados/thrash-old-clients/workloads/rbd_cls.yaml @@ -0,0 +1,7 @@ +meta: +- desc: | + rbd object class functional tests +tasks: +- exec: + client.2: + - ceph_test_cls_rbd --gtest_filter=-TestClsRbd.get_features:TestClsRbd.parents diff --git a/qa/suites/rados/thrash-old-clients/workloads/snaps-few-objects.yaml b/qa/suites/rados/thrash-old-clients/workloads/snaps-few-objects.yaml new file mode 100644 index 00000000..f0a5735a --- /dev/null +++ b/qa/suites/rados/thrash-old-clients/workloads/snaps-few-objects.yaml @@ -0,0 +1,13 @@ +tasks: +- rados: + clients: [client.2] + ops: 4000 + objects: 50 + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 diff --git a/qa/suites/rados/thrash-old-clients/workloads/test_rbd_api.yaml b/qa/suites/rados/thrash-old-clients/workloads/test_rbd_api.yaml new file mode 100644 index 00000000..39617b37 --- /dev/null +++ b/qa/suites/rados/thrash-old-clients/workloads/test_rbd_api.yaml @@ -0,0 +1,8 @@ +meta: +- desc: | + librbd C and C++ api tests +workload: +- workunit: + clients: + client.2: + - rbd/test_librbd.sh diff --git a/qa/suites/rados/thrash/% b/qa/suites/rados/thrash/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rados/thrash/.qa b/qa/suites/rados/thrash/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/thrash/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/thrash/0-size-min-size-overrides/.qa b/qa/suites/rados/thrash/0-size-min-size-overrides/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/thrash/0-size-min-size-overrides/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/thrash/0-size-min-size-overrides/2-size-2-min-size.yaml b/qa/suites/rados/thrash/0-size-min-size-overrides/2-size-2-min-size.yaml new file mode 120000 index 00000000..5393a755 --- /dev/null +++ b/qa/suites/rados/thrash/0-size-min-size-overrides/2-size-2-min-size.yaml @@ -0,0 +1 @@ +.qa/overrides/2-size-2-min-size.yaml \ No newline at end of file diff --git a/qa/suites/rados/thrash/0-size-min-size-overrides/3-size-2-min-size.yaml b/qa/suites/rados/thrash/0-size-min-size-overrides/3-size-2-min-size.yaml new file mode 120000 index 00000000..5ff70ead --- /dev/null +++ b/qa/suites/rados/thrash/0-size-min-size-overrides/3-size-2-min-size.yaml @@ -0,0 +1 @@ +.qa/overrides/3-size-2-min-size.yaml \ No newline at end of file diff --git a/qa/suites/rados/thrash/1-pg-log-overrides/.qa b/qa/suites/rados/thrash/1-pg-log-overrides/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/thrash/1-pg-log-overrides/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/thrash/1-pg-log-overrides/normal_pg_log.yaml b/qa/suites/rados/thrash/1-pg-log-overrides/normal_pg_log.yaml new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rados/thrash/1-pg-log-overrides/short_pg_log.yaml b/qa/suites/rados/thrash/1-pg-log-overrides/short_pg_log.yaml new file mode 120000 index 00000000..abd86d7d --- /dev/null +++ b/qa/suites/rados/thrash/1-pg-log-overrides/short_pg_log.yaml @@ -0,0 +1 @@ +.qa/overrides/short_pg_log.yaml \ No newline at end of file diff --git a/qa/suites/rados/thrash/2-recovery-overrides/$ b/qa/suites/rados/thrash/2-recovery-overrides/$ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rados/thrash/2-recovery-overrides/.qa b/qa/suites/rados/thrash/2-recovery-overrides/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/thrash/2-recovery-overrides/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/thrash/2-recovery-overrides/default.yaml b/qa/suites/rados/thrash/2-recovery-overrides/default.yaml new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rados/thrash/2-recovery-overrides/more-active-recovery.yaml b/qa/suites/rados/thrash/2-recovery-overrides/more-active-recovery.yaml new file mode 120000 index 00000000..47afd702 --- /dev/null +++ b/qa/suites/rados/thrash/2-recovery-overrides/more-active-recovery.yaml @@ -0,0 +1 @@ +.qa/overrides/more-active-recovery.yaml \ No newline at end of file diff --git a/qa/suites/rados/thrash/backoff/.qa b/qa/suites/rados/thrash/backoff/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/thrash/backoff/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/thrash/backoff/normal.yaml b/qa/suites/rados/thrash/backoff/normal.yaml new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rados/thrash/backoff/peering.yaml b/qa/suites/rados/thrash/backoff/peering.yaml new file mode 100644 index 00000000..66d06117 --- /dev/null +++ b/qa/suites/rados/thrash/backoff/peering.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + osd: + osd backoff on peering: true diff --git a/qa/suites/rados/thrash/backoff/peering_and_degraded.yaml b/qa/suites/rados/thrash/backoff/peering_and_degraded.yaml new file mode 100644 index 00000000..e6109906 --- /dev/null +++ b/qa/suites/rados/thrash/backoff/peering_and_degraded.yaml @@ -0,0 +1,6 @@ +overrides: + ceph: + conf: + osd: + osd backoff on peering: true + osd backoff on degraded: true diff --git a/qa/suites/rados/thrash/ceph.yaml b/qa/suites/rados/thrash/ceph.yaml new file mode 100644 index 00000000..2030acb9 --- /dev/null +++ b/qa/suites/rados/thrash/ceph.yaml @@ -0,0 +1,3 @@ +tasks: +- install: +- ceph: diff --git a/qa/suites/rados/thrash/clusters/+ b/qa/suites/rados/thrash/clusters/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rados/thrash/clusters/.qa b/qa/suites/rados/thrash/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/thrash/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/thrash/clusters/fixed-2.yaml b/qa/suites/rados/thrash/clusters/fixed-2.yaml new file mode 120000 index 00000000..230ff0fd --- /dev/null +++ b/qa/suites/rados/thrash/clusters/fixed-2.yaml @@ -0,0 +1 @@ +.qa/clusters/fixed-2.yaml \ No newline at end of file diff --git a/qa/suites/rados/thrash/clusters/openstack.yaml b/qa/suites/rados/thrash/clusters/openstack.yaml new file mode 100644 index 00000000..b0f3b9b4 --- /dev/null +++ b/qa/suites/rados/thrash/clusters/openstack.yaml @@ -0,0 +1,4 @@ +openstack: + - volumes: # attached to each instance + count: 4 + size: 30 # GB diff --git a/qa/suites/rados/thrash/crc-failures/bad_map_crc_failure.yaml b/qa/suites/rados/thrash/crc-failures/bad_map_crc_failure.yaml new file mode 100644 index 00000000..1e04fb36 --- /dev/null +++ b/qa/suites/rados/thrash/crc-failures/bad_map_crc_failure.yaml @@ -0,0 +1,7 @@ +overrides: + ceph: + conf: + osd: + osd inject bad map crc probability: 0.1 + log-whitelist: + - failed to encode map diff --git a/qa/suites/rados/thrash/crc-failures/default.yaml b/qa/suites/rados/thrash/crc-failures/default.yaml new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rados/thrash/d-balancer/.qa b/qa/suites/rados/thrash/d-balancer/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/thrash/d-balancer/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/thrash/d-balancer/crush-compat.yaml b/qa/suites/rados/thrash/d-balancer/crush-compat.yaml new file mode 100644 index 00000000..aa867660 --- /dev/null +++ b/qa/suites/rados/thrash/d-balancer/crush-compat.yaml @@ -0,0 +1,6 @@ +tasks: +- exec: + mon.a: + - while ! ceph balancer status ; do sleep 1 ; done + - ceph balancer mode crush-compat + - ceph balancer on diff --git a/qa/suites/rados/thrash/d-balancer/off.yaml b/qa/suites/rados/thrash/d-balancer/off.yaml new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rados/thrash/d-balancer/upmap.yaml b/qa/suites/rados/thrash/d-balancer/upmap.yaml new file mode 100644 index 00000000..788eebee --- /dev/null +++ b/qa/suites/rados/thrash/d-balancer/upmap.yaml @@ -0,0 +1,7 @@ +tasks: +- exec: + mon.a: + - while ! ceph balancer status ; do sleep 1 ; done + - ceph osd set-require-min-compat-client luminous + - ceph balancer mode upmap + - ceph balancer on diff --git a/qa/suites/rados/thrash/msgr b/qa/suites/rados/thrash/msgr new file mode 120000 index 00000000..57bee80d --- /dev/null +++ b/qa/suites/rados/thrash/msgr @@ -0,0 +1 @@ +.qa/msgr \ No newline at end of file diff --git a/qa/suites/rados/thrash/msgr-failures/.qa b/qa/suites/rados/thrash/msgr-failures/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/thrash/msgr-failures/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/thrash/msgr-failures/fastclose.yaml b/qa/suites/rados/thrash/msgr-failures/fastclose.yaml new file mode 100644 index 00000000..02121726 --- /dev/null +++ b/qa/suites/rados/thrash/msgr-failures/fastclose.yaml @@ -0,0 +1,8 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 2500 + ms tcp read timeout: 5 + log-whitelist: + - \(OSD_SLOW_PING_TIME diff --git a/qa/suites/rados/thrash/msgr-failures/few.yaml b/qa/suites/rados/thrash/msgr-failures/few.yaml new file mode 100644 index 00000000..527eadb4 --- /dev/null +++ b/qa/suites/rados/thrash/msgr-failures/few.yaml @@ -0,0 +1,9 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 5000 + osd: + osd heartbeat use min delay socket: true + log-whitelist: + - \(OSD_SLOW_PING_TIME diff --git a/qa/suites/rados/thrash/msgr-failures/osd-delay.yaml b/qa/suites/rados/thrash/msgr-failures/osd-delay.yaml new file mode 100644 index 00000000..91c14725 --- /dev/null +++ b/qa/suites/rados/thrash/msgr-failures/osd-delay.yaml @@ -0,0 +1,11 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 2500 + ms inject delay type: osd + ms inject delay probability: .005 + ms inject delay max: 1 + ms inject internal delays: .002 + log-whitelist: + - \(OSD_SLOW_PING_TIME diff --git a/qa/suites/rados/thrash/objectstore b/qa/suites/rados/thrash/objectstore new file mode 120000 index 00000000..c40bd326 --- /dev/null +++ b/qa/suites/rados/thrash/objectstore @@ -0,0 +1 @@ +.qa/objectstore \ No newline at end of file diff --git a/qa/suites/rados/thrash/rados.yaml b/qa/suites/rados/thrash/rados.yaml new file mode 120000 index 00000000..d256979c --- /dev/null +++ b/qa/suites/rados/thrash/rados.yaml @@ -0,0 +1 @@ +.qa/config/rados.yaml \ No newline at end of file diff --git a/qa/suites/rados/thrash/supported-random-distro$ b/qa/suites/rados/thrash/supported-random-distro$ new file mode 120000 index 00000000..7cef21ee --- /dev/null +++ b/qa/suites/rados/thrash/supported-random-distro$ @@ -0,0 +1 @@ +../basic/supported-random-distro$ \ No newline at end of file diff --git a/qa/suites/rados/thrash/thrashers/.qa b/qa/suites/rados/thrash/thrashers/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/thrash/thrashers/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/thrash/thrashers/careful.yaml b/qa/suites/rados/thrash/thrashers/careful.yaml new file mode 100644 index 00000000..85e0c268 --- /dev/null +++ b/qa/suites/rados/thrash/thrashers/careful.yaml @@ -0,0 +1,26 @@ +overrides: + ceph: + log-whitelist: + - but it is still running + - objects unfound and apparently lost + conf: + osd: + osd debug reject backfill probability: .3 + osd scrub min interval: 60 + osd scrub max interval: 120 + osd max backfills: 3 + osd snap trim sleep: 2 + mon: + mon min osdmap epochs: 50 + paxos service trim min: 10 + # prune full osdmaps regularly + mon osdmap full prune min: 15 + mon osdmap full prune interval: 2 + mon osdmap full prune txsize: 2 +tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgnum_shrink: 1 + chance_pgpnum_fix: 1 + aggressive_pg_num_changes: false diff --git a/qa/suites/rados/thrash/thrashers/default.yaml b/qa/suites/rados/thrash/thrashers/default.yaml new file mode 100644 index 00000000..536e85cb --- /dev/null +++ b/qa/suites/rados/thrash/thrashers/default.yaml @@ -0,0 +1,26 @@ +overrides: + ceph: + log-whitelist: + - but it is still running + - objects unfound and apparently lost + conf: + osd: + osd debug reject backfill probability: .3 + osd scrub min interval: 60 + osd scrub max interval: 120 + osd max backfills: 3 + osd snap trim sleep: 2 + osd delete sleep: 1 + mon: + mon min osdmap epochs: 50 + paxos service trim min: 10 + # prune full osdmaps regularly + mon osdmap full prune min: 15 + mon osdmap full prune interval: 2 + mon osdmap full prune txsize: 2 +tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgnum_shrink: 1 + chance_pgpnum_fix: 1 diff --git a/qa/suites/rados/thrash/thrashers/mapgap.yaml b/qa/suites/rados/thrash/thrashers/mapgap.yaml new file mode 100644 index 00000000..bbc3dbdc --- /dev/null +++ b/qa/suites/rados/thrash/thrashers/mapgap.yaml @@ -0,0 +1,27 @@ +overrides: + ceph: + log-whitelist: + - but it is still running + - objects unfound and apparently lost + - osd_map_cache_size + conf: + mon: + mon min osdmap epochs: 50 + paxos service trim min: 10 + # prune full osdmaps regularly + mon osdmap full prune min: 15 + mon osdmap full prune interval: 2 + mon osdmap full prune txsize: 2 + osd: + osd map cache size: 1 + osd scrub min interval: 60 + osd scrub max interval: 120 + osd scrub during recovery: false + osd max backfills: 6 +tasks: +- thrashosds: + timeout: 1800 + chance_pgnum_grow: 0.25 + chance_pgnum_shrink: 0.25 + chance_pgpnum_fix: 0.25 + chance_test_map_discontinuity: 2 diff --git a/qa/suites/rados/thrash/thrashers/morepggrow.yaml b/qa/suites/rados/thrash/thrashers/morepggrow.yaml new file mode 100644 index 00000000..91d2173e --- /dev/null +++ b/qa/suites/rados/thrash/thrashers/morepggrow.yaml @@ -0,0 +1,22 @@ +overrides: + ceph: + conf: + osd: + osd scrub min interval: 60 + osd scrub max interval: 120 + journal throttle high multiple: 2 + journal throttle max multiple: 10 + filestore queue throttle high multiple: 2 + filestore queue throttle max multiple: 10 + osd max backfills: 9 + log-whitelist: + - but it is still running + - objects unfound and apparently lost +tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 3 + chance_pgpnum_fix: 1 +openstack: +- volumes: + size: 50 diff --git a/qa/suites/rados/thrash/thrashers/none.yaml b/qa/suites/rados/thrash/thrashers/none.yaml new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rados/thrash/thrashers/pggrow.yaml b/qa/suites/rados/thrash/thrashers/pggrow.yaml new file mode 100644 index 00000000..8721fd18 --- /dev/null +++ b/qa/suites/rados/thrash/thrashers/pggrow.yaml @@ -0,0 +1,24 @@ +overrides: + ceph: + log-whitelist: + - but it is still running + - objects unfound and apparently lost + conf: + osd: + osd scrub min interval: 60 + osd scrub max interval: 120 + filestore odsync write: true + osd max backfills: 2 + osd snap trim sleep: .5 + mon: + mon min osdmap epochs: 50 + paxos service trim min: 10 + # prune full osdmaps regularly + mon osdmap full prune min: 15 + mon osdmap full prune interval: 2 + mon osdmap full prune txsize: 2 +tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 2 + chance_pgpnum_fix: 1 diff --git a/qa/suites/rados/thrash/thrashosds-health.yaml b/qa/suites/rados/thrash/thrashosds-health.yaml new file mode 120000 index 00000000..9124eb1a --- /dev/null +++ b/qa/suites/rados/thrash/thrashosds-health.yaml @@ -0,0 +1 @@ +.qa/tasks/thrashosds-health.yaml \ No newline at end of file diff --git a/qa/suites/rados/thrash/workloads/.qa b/qa/suites/rados/thrash/workloads/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/thrash/workloads/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/thrash/workloads/admin_socket_objecter_requests.yaml b/qa/suites/rados/thrash/workloads/admin_socket_objecter_requests.yaml new file mode 100644 index 00000000..8c9764ad --- /dev/null +++ b/qa/suites/rados/thrash/workloads/admin_socket_objecter_requests.yaml @@ -0,0 +1,13 @@ +overrides: + ceph: + conf: + client.0: + admin socket: /var/run/ceph/ceph-$name.asok +tasks: +- radosbench: + clients: [client.0] + time: 150 +- admin_socket: + client.0: + objecter_requests: + test: "http://git.ceph.com/?p={repo};a=blob_plain;f=src/test/admin_socket/objecter_requests;hb={branch}" diff --git a/qa/suites/rados/thrash/workloads/cache-agent-big.yaml b/qa/suites/rados/thrash/workloads/cache-agent-big.yaml new file mode 100644 index 00000000..31a964d1 --- /dev/null +++ b/qa/suites/rados/thrash/workloads/cache-agent-big.yaml @@ -0,0 +1,36 @@ +overrides: + ceph: + log-whitelist: + - must scrub before tier agent can activate + conf: + osd: + # override short_pg_log_entries.yaml (which sets these under [global]) + osd_min_pg_log_entries: 3000 + osd_max_pg_log_entries: 3000 +tasks: +- exec: + client.0: + - sudo ceph osd erasure-code-profile set myprofile crush-failure-domain=osd m=2 k=2 + - sudo ceph osd pool create base 4 4 erasure myprofile + - sudo ceph osd pool application enable base rados + - sudo ceph osd pool set base min_size 2 + - sudo ceph osd pool create cache 4 + - sudo ceph osd tier add base cache + - sudo ceph osd tier cache-mode cache writeback + - sudo ceph osd tier set-overlay base cache + - sudo ceph osd pool set cache hit_set_type bloom + - sudo ceph osd pool set cache hit_set_count 8 + - sudo ceph osd pool set cache hit_set_period 60 + - sudo ceph osd pool set cache target_max_objects 5000 +- rados: + clients: [client.0] + pools: [base] + ops: 10000 + objects: 6600 + max_seconds: 1200 + size: 1024 + op_weights: + read: 100 + write: 100 + delete: 50 + copy_from: 50 diff --git a/qa/suites/rados/thrash/workloads/cache-agent-small.yaml b/qa/suites/rados/thrash/workloads/cache-agent-small.yaml new file mode 100644 index 00000000..f082b0b9 --- /dev/null +++ b/qa/suites/rados/thrash/workloads/cache-agent-small.yaml @@ -0,0 +1,34 @@ +overrides: + ceph: + log-whitelist: + - must scrub before tier agent can activate + conf: + osd: + # override short_pg_log_entries.yaml (which sets these under [global]) + osd_min_pg_log_entries: 3000 + osd_max_pg_log_entries: 3000 +tasks: +- exec: + client.0: + - sudo ceph osd pool create base 4 + - sudo ceph osd pool application enable base rados + - sudo ceph osd pool create cache 4 + - sudo ceph osd tier add base cache + - sudo ceph osd tier cache-mode cache writeback + - sudo ceph osd tier set-overlay base cache + - sudo ceph osd pool set cache hit_set_type bloom + - sudo ceph osd pool set cache hit_set_count 8 + - sudo ceph osd pool set cache hit_set_period 60 + - sudo ceph osd pool set cache target_max_objects 250 + - sudo ceph osd pool set cache min_read_recency_for_promote 2 + - sudo ceph osd pool set cache min_write_recency_for_promote 2 +- rados: + clients: [client.0] + pools: [base] + ops: 4000 + objects: 500 + op_weights: + read: 100 + write: 100 + delete: 50 + copy_from: 50 diff --git a/qa/suites/rados/thrash/workloads/cache-pool-snaps-readproxy.yaml b/qa/suites/rados/thrash/workloads/cache-pool-snaps-readproxy.yaml new file mode 100644 index 00000000..b84d4d95 --- /dev/null +++ b/qa/suites/rados/thrash/workloads/cache-pool-snaps-readproxy.yaml @@ -0,0 +1,39 @@ +overrides: + ceph: + log-whitelist: + - must scrub before tier agent can activate + conf: + osd: + # override short_pg_log_entries.yaml (which sets these under [global]) + osd_min_pg_log_entries: 3000 + osd_max_pg_log_entries: 3000 +tasks: +- exec: + client.0: + - sudo ceph osd pool create base 4 + - sudo ceph osd pool application enable base rados + - sudo ceph osd pool create cache 4 + - sudo ceph osd tier add base cache + - sudo ceph osd tier cache-mode cache readproxy + - sudo ceph osd tier set-overlay base cache + - sudo ceph osd pool set cache hit_set_type bloom + - sudo ceph osd pool set cache hit_set_count 8 + - sudo ceph osd pool set cache hit_set_period 3600 + - sudo ceph osd pool set cache target_max_objects 250 +- rados: + clients: [client.0] + pools: [base] + ops: 4000 + objects: 500 + pool_snaps: true + op_weights: + read: 100 + write: 100 + delete: 50 + copy_from: 50 + cache_flush: 50 + cache_try_flush: 50 + cache_evict: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 diff --git a/qa/suites/rados/thrash/workloads/cache-pool-snaps.yaml b/qa/suites/rados/thrash/workloads/cache-pool-snaps.yaml new file mode 100644 index 00000000..8d712e86 --- /dev/null +++ b/qa/suites/rados/thrash/workloads/cache-pool-snaps.yaml @@ -0,0 +1,44 @@ +overrides: + ceph: + log-whitelist: + - must scrub before tier agent can activate + conf: + osd: + # override short_pg_log_entries.yaml (which sets these under [global]) + osd_min_pg_log_entries: 3000 + osd_max_pg_log_entries: 3000 +tasks: +- exec: + client.0: + - sudo ceph osd pool create base 4 + - sudo ceph osd pool application enable base rados + - sudo ceph osd pool create cache 4 + - sudo ceph osd tier add base cache + - sudo ceph osd tier cache-mode cache writeback + - sudo ceph osd tier set-overlay base cache + - sudo ceph osd pool set cache hit_set_type bloom + - sudo ceph osd pool set cache hit_set_count 8 + - sudo ceph osd pool set cache hit_set_period 3600 + - sudo ceph osd pool set cache target_max_objects 250 + - sudo ceph osd pool set cache min_read_recency_for_promote 0 + - sudo ceph osd pool set cache min_write_recency_for_promote 0 +- rados: + clients: [client.0] + pools: [base] + ops: 4000 + objects: 500 + pool_snaps: true + op_weights: + read: 100 + write: 100 + delete: 50 + copy_from: 50 + cache_flush: 50 + cache_try_flush: 50 + cache_evict: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 +openstack: + - machine: + ram: 15000 # MB diff --git a/qa/suites/rados/thrash/workloads/cache-snaps.yaml b/qa/suites/rados/thrash/workloads/cache-snaps.yaml new file mode 100644 index 00000000..7ece997e --- /dev/null +++ b/qa/suites/rados/thrash/workloads/cache-snaps.yaml @@ -0,0 +1,39 @@ +overrides: + ceph: + log-whitelist: + - must scrub before tier agent can activate + conf: + osd: + # override short_pg_log_entries.yaml (which sets these under [global]) + osd_min_pg_log_entries: 3000 + osd_max_pg_log_entries: 3000 +tasks: +- exec: + client.0: + - sudo ceph osd pool create base 4 + - sudo ceph osd pool application enable base rados + - sudo ceph osd pool create cache 4 + - sudo ceph osd tier add base cache + - sudo ceph osd tier cache-mode cache writeback + - sudo ceph osd tier set-overlay base cache + - sudo ceph osd pool set cache hit_set_type bloom + - sudo ceph osd pool set cache hit_set_count 8 + - sudo ceph osd pool set cache hit_set_period 3600 + - sudo ceph osd pool set cache target_max_objects 250 + - sudo ceph osd pool set cache min_read_recency_for_promote 2 +- rados: + clients: [client.0] + pools: [base] + ops: 4000 + objects: 500 + op_weights: + read: 100 + write: 100 + delete: 50 + copy_from: 50 + cache_flush: 50 + cache_try_flush: 50 + cache_evict: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 diff --git a/qa/suites/rados/thrash/workloads/cache.yaml b/qa/suites/rados/thrash/workloads/cache.yaml new file mode 100644 index 00000000..42cfa6cb --- /dev/null +++ b/qa/suites/rados/thrash/workloads/cache.yaml @@ -0,0 +1,36 @@ +overrides: + ceph: + log-whitelist: + - must scrub before tier agent can activate + conf: + osd: + # override short_pg_log_entries.yaml (which sets these under [global]) + osd_min_pg_log_entries: 3000 + osd_max_pg_log_entries: 3000 +tasks: +- exec: + client.0: + - sudo ceph osd pool create base 4 + - sudo ceph osd pool application enable base rados + - sudo ceph osd pool create cache 4 + - sudo ceph osd tier add base cache + - sudo ceph osd tier cache-mode cache writeback + - sudo ceph osd tier set-overlay base cache + - sudo ceph osd pool set cache hit_set_type bloom + - sudo ceph osd pool set cache hit_set_count 8 + - sudo ceph osd pool set cache hit_set_period 3600 + - sudo ceph osd pool set cache min_read_recency_for_promote 0 + - sudo ceph osd pool set cache min_write_recency_for_promote 0 +- rados: + clients: [client.0] + pools: [base] + ops: 4000 + objects: 500 + op_weights: + read: 100 + write: 100 + delete: 50 + copy_from: 50 + cache_flush: 50 + cache_try_flush: 50 + cache_evict: 50 diff --git a/qa/suites/rados/thrash/workloads/pool-snaps-few-objects.yaml b/qa/suites/rados/thrash/workloads/pool-snaps-few-objects.yaml new file mode 100644 index 00000000..1f0759d9 --- /dev/null +++ b/qa/suites/rados/thrash/workloads/pool-snaps-few-objects.yaml @@ -0,0 +1,18 @@ +override: + conf: + osd: + osd deep scrub update digest min age: 0 +tasks: +- rados: + clients: [client.0] + ops: 4000 + objects: 50 + pool_snaps: true + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 diff --git a/qa/suites/rados/thrash/workloads/rados_api_tests.yaml b/qa/suites/rados/thrash/workloads/rados_api_tests.yaml new file mode 100644 index 00000000..7c23a5ca --- /dev/null +++ b/qa/suites/rados/thrash/workloads/rados_api_tests.yaml @@ -0,0 +1,20 @@ +overrides: + ceph: + log-whitelist: + - reached quota + - \(POOL_APP_NOT_ENABLED\) + - \(PG_AVAILABILITY\) + crush_tunables: jewel + conf: + client: + debug ms: 1 + debug objecter: 20 + debug rados: 20 + mon: + mon warn on pool no app: false + debug mgrc: 20 +tasks: +- workunit: + clients: + client.0: + - rados/test.sh diff --git a/qa/suites/rados/thrash/workloads/radosbench-high-concurrency.yaml b/qa/suites/rados/thrash/workloads/radosbench-high-concurrency.yaml new file mode 100644 index 00000000..902c4b56 --- /dev/null +++ b/qa/suites/rados/thrash/workloads/radosbench-high-concurrency.yaml @@ -0,0 +1,49 @@ +overrides: + ceph: + conf: + client.0: + debug ms: 1 + debug objecter: 20 + debug rados: 20 +tasks: +- full_sequential: + - radosbench: + clients: [client.0] + concurrency: 128 + size: 8192 + time: 90 + - radosbench: + clients: [client.0] + concurrency: 128 + size: 8192 + time: 90 + - radosbench: + clients: [client.0] + concurrency: 128 + size: 8192 + time: 90 + - radosbench: + clients: [client.0] + concurrency: 128 + size: 8192 + time: 90 + - radosbench: + clients: [client.0] + concurrency: 128 + size: 8192 + time: 90 + - radosbench: + clients: [client.0] + concurrency: 128 + size: 8192 + time: 90 + - radosbench: + clients: [client.0] + concurrency: 128 + size: 8192 + time: 90 + - radosbench: + clients: [client.0] + concurrency: 128 + size: 8192 + time: 90 diff --git a/qa/suites/rados/thrash/workloads/radosbench.yaml b/qa/suites/rados/thrash/workloads/radosbench.yaml new file mode 100644 index 00000000..1b25004a --- /dev/null +++ b/qa/suites/rados/thrash/workloads/radosbench.yaml @@ -0,0 +1,33 @@ +overrides: + ceph: + conf: + client.0: + debug ms: 1 + debug objecter: 20 + debug rados: 20 +tasks: +- full_sequential: + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 diff --git a/qa/suites/rados/thrash/workloads/redirect.yaml b/qa/suites/rados/thrash/workloads/redirect.yaml new file mode 100644 index 00000000..bebce845 --- /dev/null +++ b/qa/suites/rados/thrash/workloads/redirect.yaml @@ -0,0 +1,15 @@ +tasks: +- exec: + client.0: + - sudo ceph osd pool create low_tier 4 +- rados: + clients: [client.0] + low_tier_pool: 'low_tier' + ops: 4000 + objects: 500 + set_redirect: true + op_weights: + read: 100 + write: 100 + delete: 50 + copy_from: 50 diff --git a/qa/suites/rados/thrash/workloads/redirect_promote_tests.yaml b/qa/suites/rados/thrash/workloads/redirect_promote_tests.yaml new file mode 100644 index 00000000..c2787c43 --- /dev/null +++ b/qa/suites/rados/thrash/workloads/redirect_promote_tests.yaml @@ -0,0 +1,14 @@ +tasks: +- exec: + client.0: + - sudo ceph osd pool create low_tier 4 +- rados: + clients: [client.0] + low_tier_pool: 'low_tier' + ops: 4000 + objects: 500 + set_redirect: true + op_weights: + set_redirect: 100 + read: 50 + tier_promote: 30 diff --git a/qa/suites/rados/thrash/workloads/redirect_set_object.yaml b/qa/suites/rados/thrash/workloads/redirect_set_object.yaml new file mode 100644 index 00000000..06ba60c7 --- /dev/null +++ b/qa/suites/rados/thrash/workloads/redirect_set_object.yaml @@ -0,0 +1,13 @@ +tasks: +- exec: + client.0: + - sudo ceph osd pool create low_tier 4 +- rados: + clients: [client.0] + low_tier_pool: 'low_tier' + ops: 4000 + objects: 500 + set_redirect: true + op_weights: + set_redirect: 100 + copy_from: 100 diff --git a/qa/suites/rados/thrash/workloads/set-chunks-read.yaml b/qa/suites/rados/thrash/workloads/set-chunks-read.yaml new file mode 100644 index 00000000..1abbdd75 --- /dev/null +++ b/qa/suites/rados/thrash/workloads/set-chunks-read.yaml @@ -0,0 +1,13 @@ +tasks: +- exec: + client.0: + - sudo ceph osd pool create low_tier 4 +- rados: + clients: [client.0] + low_tier_pool: 'low_tier' + ops: 4000 + objects: 300 + set_chunk: true + op_weights: + chunk_read: 100 + tier_promote: 10 diff --git a/qa/suites/rados/thrash/workloads/small-objects.yaml b/qa/suites/rados/thrash/workloads/small-objects.yaml new file mode 100644 index 00000000..f5a18ae6 --- /dev/null +++ b/qa/suites/rados/thrash/workloads/small-objects.yaml @@ -0,0 +1,24 @@ +overrides: + ceph: + crush_tunables: jewel + conf: + mon: + mon osd initial require min compat client: jewel +tasks: +- rados: + clients: [client.0] + ops: 400000 + max_seconds: 600 + max_in_flight: 64 + objects: 1024 + size: 16384 + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 + setattr: 25 + rmattr: 25 diff --git a/qa/suites/rados/thrash/workloads/snaps-few-objects.yaml b/qa/suites/rados/thrash/workloads/snaps-few-objects.yaml new file mode 100644 index 00000000..aa82d973 --- /dev/null +++ b/qa/suites/rados/thrash/workloads/snaps-few-objects.yaml @@ -0,0 +1,13 @@ +tasks: +- rados: + clients: [client.0] + ops: 4000 + objects: 50 + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 diff --git a/qa/suites/rados/thrash/workloads/write_fadvise_dontneed.yaml b/qa/suites/rados/thrash/workloads/write_fadvise_dontneed.yaml new file mode 100644 index 00000000..606dcae6 --- /dev/null +++ b/qa/suites/rados/thrash/workloads/write_fadvise_dontneed.yaml @@ -0,0 +1,8 @@ +tasks: +- rados: + clients: [client.0] + ops: 4000 + objects: 500 + write_fadvise_dontneed: true + op_weights: + write: 100 diff --git a/qa/suites/rados/upgrade/.qa b/qa/suites/rados/upgrade/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/upgrade/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/upgrade/mimic-x-singleton b/qa/suites/rados/upgrade/mimic-x-singleton new file mode 120000 index 00000000..ebecaf67 --- /dev/null +++ b/qa/suites/rados/upgrade/mimic-x-singleton @@ -0,0 +1 @@ +../../upgrade/mimic-x-singleton \ No newline at end of file diff --git a/qa/suites/rados/verify/% b/qa/suites/rados/verify/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rados/verify/.qa b/qa/suites/rados/verify/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/verify/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/verify/ceph.yaml b/qa/suites/rados/verify/ceph.yaml new file mode 100644 index 00000000..c0857e14 --- /dev/null +++ b/qa/suites/rados/verify/ceph.yaml @@ -0,0 +1,13 @@ +overrides: + ceph: + conf: + mon: + mon min osdmap epochs: 50 + paxos service trim min: 10 + # prune full osdmaps regularly + mon osdmap full prune min: 15 + mon osdmap full prune interval: 2 + mon osdmap full prune txsize: 2 +tasks: +- install: +- ceph: diff --git a/qa/suites/rados/verify/clusters/+ b/qa/suites/rados/verify/clusters/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rados/verify/clusters/.qa b/qa/suites/rados/verify/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/verify/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/verify/clusters/fixed-2.yaml b/qa/suites/rados/verify/clusters/fixed-2.yaml new file mode 120000 index 00000000..230ff0fd --- /dev/null +++ b/qa/suites/rados/verify/clusters/fixed-2.yaml @@ -0,0 +1 @@ +.qa/clusters/fixed-2.yaml \ No newline at end of file diff --git a/qa/suites/rados/verify/clusters/openstack.yaml b/qa/suites/rados/verify/clusters/openstack.yaml new file mode 100644 index 00000000..e559d912 --- /dev/null +++ b/qa/suites/rados/verify/clusters/openstack.yaml @@ -0,0 +1,4 @@ +openstack: + - volumes: # attached to each instance + count: 4 + size: 10 # GB diff --git a/qa/suites/rados/verify/d-thrash/.qa b/qa/suites/rados/verify/d-thrash/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/verify/d-thrash/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/verify/d-thrash/default/+ b/qa/suites/rados/verify/d-thrash/default/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rados/verify/d-thrash/default/.qa b/qa/suites/rados/verify/d-thrash/default/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/verify/d-thrash/default/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/verify/d-thrash/default/default.yaml b/qa/suites/rados/verify/d-thrash/default/default.yaml new file mode 100644 index 00000000..8f2b2667 --- /dev/null +++ b/qa/suites/rados/verify/d-thrash/default/default.yaml @@ -0,0 +1,11 @@ +overrides: + ceph: + log-whitelist: + - but it is still running + - objects unfound and apparently lost +tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgnum_shrink: 1 + chance_pgpnum_fix: 1 diff --git a/qa/suites/rados/verify/d-thrash/default/thrashosds-health.yaml b/qa/suites/rados/verify/d-thrash/default/thrashosds-health.yaml new file mode 120000 index 00000000..9124eb1a --- /dev/null +++ b/qa/suites/rados/verify/d-thrash/default/thrashosds-health.yaml @@ -0,0 +1 @@ +.qa/tasks/thrashosds-health.yaml \ No newline at end of file diff --git a/qa/suites/rados/verify/d-thrash/none.yaml b/qa/suites/rados/verify/d-thrash/none.yaml new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rados/verify/msgr b/qa/suites/rados/verify/msgr new file mode 120000 index 00000000..57bee80d --- /dev/null +++ b/qa/suites/rados/verify/msgr @@ -0,0 +1 @@ +.qa/msgr \ No newline at end of file diff --git a/qa/suites/rados/verify/msgr-failures/.qa b/qa/suites/rados/verify/msgr-failures/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/verify/msgr-failures/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/verify/msgr-failures/few.yaml b/qa/suites/rados/verify/msgr-failures/few.yaml new file mode 100644 index 00000000..4326fe23 --- /dev/null +++ b/qa/suites/rados/verify/msgr-failures/few.yaml @@ -0,0 +1,7 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 5000 + log-whitelist: + - \(OSD_SLOW_PING_TIME diff --git a/qa/suites/rados/verify/objectstore b/qa/suites/rados/verify/objectstore new file mode 120000 index 00000000..c40bd326 --- /dev/null +++ b/qa/suites/rados/verify/objectstore @@ -0,0 +1 @@ +.qa/objectstore \ No newline at end of file diff --git a/qa/suites/rados/verify/rados.yaml b/qa/suites/rados/verify/rados.yaml new file mode 120000 index 00000000..d256979c --- /dev/null +++ b/qa/suites/rados/verify/rados.yaml @@ -0,0 +1 @@ +.qa/config/rados.yaml \ No newline at end of file diff --git a/qa/suites/rados/verify/tasks/.qa b/qa/suites/rados/verify/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/verify/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/verify/tasks/mon_recovery.yaml b/qa/suites/rados/verify/tasks/mon_recovery.yaml new file mode 100644 index 00000000..266a4e47 --- /dev/null +++ b/qa/suites/rados/verify/tasks/mon_recovery.yaml @@ -0,0 +1,10 @@ +overrides: + ceph: + log-whitelist: + - overall HEALTH_ + - \(MON_DOWN\) + - \(OSDMAP_FLAGS\) + - \(SMALLER_PGP_NUM\) + - \(POOL_APP_NOT_ENABLED\) +tasks: +- mon_recovery: diff --git a/qa/suites/rados/verify/tasks/rados_api_tests.yaml b/qa/suites/rados/verify/tasks/rados_api_tests.yaml new file mode 100644 index 00000000..79f24479 --- /dev/null +++ b/qa/suites/rados/verify/tasks/rados_api_tests.yaml @@ -0,0 +1,30 @@ +overrides: + ceph: + log-whitelist: + - reached quota + - overall HEALTH_ + - \(CACHE_POOL_NO_HIT_SET\) + - \(POOL_FULL\) + - \(SMALLER_PGP_NUM\) + - \(SLOW_OPS\) + - \(CACHE_POOL_NEAR_FULL\) + - \(POOL_APP_NOT_ENABLED\) + - \(PG_AVAILABILITY\) + - \(OBJECT_MISPLACED\) + - slow request + conf: + client: + debug ms: 1 + debug objecter: 20 + debug rados: 20 + debug monc: 20 + mon: + mon warn on pool no app: false +tasks: +- workunit: + timeout: 6h + env: + ALLOW_TIMEOUTS: "1" + clients: + client.0: + - rados/test.sh diff --git a/qa/suites/rados/verify/tasks/rados_cls_all.yaml b/qa/suites/rados/verify/tasks/rados_cls_all.yaml new file mode 100644 index 00000000..bcc58e19 --- /dev/null +++ b/qa/suites/rados/verify/tasks/rados_cls_all.yaml @@ -0,0 +1,13 @@ +overrides: + ceph: + conf: + osd: + osd_class_load_list: "cephfs hello journal lock log numops rbd refcount + rgw sdk timeindex user version" + osd_class_default_list: "cephfs hello journal lock log numops rbd refcount + rgw sdk timeindex user version" +tasks: +- workunit: + clients: + client.0: + - cls diff --git a/qa/suites/rados/verify/validater/.qa b/qa/suites/rados/verify/validater/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rados/verify/validater/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rados/verify/validater/lockdep.yaml b/qa/suites/rados/verify/validater/lockdep.yaml new file mode 100644 index 00000000..25f84355 --- /dev/null +++ b/qa/suites/rados/verify/validater/lockdep.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + global: + lockdep: true diff --git a/qa/suites/rados/verify/validater/valgrind.yaml b/qa/suites/rados/verify/validater/valgrind.yaml new file mode 100644 index 00000000..5ac297cc --- /dev/null +++ b/qa/suites/rados/verify/validater/valgrind.yaml @@ -0,0 +1,32 @@ +# see http://tracker.ceph.com/issues/20360 and http://tracker.ceph.com/issues/18126 +os_type: centos +os_version: '7.8' + +overrides: + install: + ceph: + debuginfo: true + ceph: + conf: + global: + osd heartbeat grace: 80 + mon: + mon osd crush smoke test: false + osd: + osd fast shutdown: false + debug bluestore: 1 + debug bluefs: 1 + log-whitelist: + - overall HEALTH_ +# valgrind is slow.. we might get PGs stuck peering etc + - \(PG_ +# mons sometimes are left off of initial quorum due to valgrind slowness. ok to whitelist here because we'll still catch an actual crash due to the core + - \(MON_DOWN\) + - \(SLOW_OPS\) + - slow request + valgrind: + mon: [--tool=memcheck, --leak-check=full, --show-reachable=yes] + osd: [--tool=memcheck] + mds: [--tool=memcheck] +# https://tracker.ceph.com/issues/38621 +# mgr: [--tool=memcheck] diff --git a/qa/suites/rbd/.qa b/qa/suites/rbd/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/basic/% b/qa/suites/rbd/basic/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rbd/basic/.qa b/qa/suites/rbd/basic/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/basic/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/basic/base/.qa b/qa/suites/rbd/basic/base/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/basic/base/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/basic/base/install.yaml b/qa/suites/rbd/basic/base/install.yaml new file mode 100644 index 00000000..2030acb9 --- /dev/null +++ b/qa/suites/rbd/basic/base/install.yaml @@ -0,0 +1,3 @@ +tasks: +- install: +- ceph: diff --git a/qa/suites/rbd/basic/cachepool/.qa b/qa/suites/rbd/basic/cachepool/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/basic/cachepool/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/basic/cachepool/none.yaml b/qa/suites/rbd/basic/cachepool/none.yaml new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rbd/basic/cachepool/small.yaml b/qa/suites/rbd/basic/cachepool/small.yaml new file mode 100644 index 00000000..1b505657 --- /dev/null +++ b/qa/suites/rbd/basic/cachepool/small.yaml @@ -0,0 +1,17 @@ +overrides: + ceph: + log-whitelist: + - overall HEALTH_ + - \(CACHE_POOL_NEAR_FULL\) + - \(CACHE_POOL_NO_HIT_SET\) +tasks: +- exec: + client.0: + - sudo ceph osd pool create cache 4 + - sudo ceph osd tier add rbd cache + - sudo ceph osd tier cache-mode cache writeback + - sudo ceph osd tier set-overlay rbd cache + - sudo ceph osd pool set cache hit_set_type bloom + - sudo ceph osd pool set cache hit_set_count 8 + - sudo ceph osd pool set cache hit_set_period 60 + - sudo ceph osd pool set cache target_max_objects 250 diff --git a/qa/suites/rbd/basic/clusters/+ b/qa/suites/rbd/basic/clusters/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rbd/basic/clusters/.qa b/qa/suites/rbd/basic/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/basic/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/basic/clusters/fixed-1.yaml b/qa/suites/rbd/basic/clusters/fixed-1.yaml new file mode 120000 index 00000000..02df5dd0 --- /dev/null +++ b/qa/suites/rbd/basic/clusters/fixed-1.yaml @@ -0,0 +1 @@ +.qa/clusters/fixed-1.yaml \ No newline at end of file diff --git a/qa/suites/rbd/basic/clusters/openstack.yaml b/qa/suites/rbd/basic/clusters/openstack.yaml new file mode 100644 index 00000000..f4d1349b --- /dev/null +++ b/qa/suites/rbd/basic/clusters/openstack.yaml @@ -0,0 +1,4 @@ +openstack: + - volumes: # attached to each instance + count: 3 + size: 30 # GB diff --git a/qa/suites/rbd/basic/msgr-failures/.qa b/qa/suites/rbd/basic/msgr-failures/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/basic/msgr-failures/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/basic/msgr-failures/few.yaml b/qa/suites/rbd/basic/msgr-failures/few.yaml new file mode 100644 index 00000000..4326fe23 --- /dev/null +++ b/qa/suites/rbd/basic/msgr-failures/few.yaml @@ -0,0 +1,7 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 5000 + log-whitelist: + - \(OSD_SLOW_PING_TIME diff --git a/qa/suites/rbd/basic/objectstore b/qa/suites/rbd/basic/objectstore new file mode 120000 index 00000000..c40bd326 --- /dev/null +++ b/qa/suites/rbd/basic/objectstore @@ -0,0 +1 @@ +.qa/objectstore \ No newline at end of file diff --git a/qa/suites/rbd/basic/supported-random-distro$ b/qa/suites/rbd/basic/supported-random-distro$ new file mode 120000 index 00000000..0862b445 --- /dev/null +++ b/qa/suites/rbd/basic/supported-random-distro$ @@ -0,0 +1 @@ +.qa/distros/supported-random-distro$ \ No newline at end of file diff --git a/qa/suites/rbd/basic/tasks/.qa b/qa/suites/rbd/basic/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/basic/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/basic/tasks/rbd_api_tests_old_format.yaml b/qa/suites/rbd/basic/tasks/rbd_api_tests_old_format.yaml new file mode 100644 index 00000000..fe1e26d5 --- /dev/null +++ b/qa/suites/rbd/basic/tasks/rbd_api_tests_old_format.yaml @@ -0,0 +1,11 @@ +overrides: + ceph: + log-whitelist: + - overall HEALTH_ + - \(CACHE_POOL_NO_HIT_SET\) + - \(POOL_APP_NOT_ENABLED\) +tasks: +- workunit: + clients: + client.0: + - rbd/test_librbd.sh diff --git a/qa/suites/rbd/basic/tasks/rbd_cls_tests.yaml b/qa/suites/rbd/basic/tasks/rbd_cls_tests.yaml new file mode 100644 index 00000000..51b35e2e --- /dev/null +++ b/qa/suites/rbd/basic/tasks/rbd_cls_tests.yaml @@ -0,0 +1,7 @@ +tasks: +- workunit: + clients: + client.0: + - cls/test_cls_rbd.sh + - cls/test_cls_lock.sh + - cls/test_cls_journal.sh diff --git a/qa/suites/rbd/basic/tasks/rbd_lock_and_fence.yaml b/qa/suites/rbd/basic/tasks/rbd_lock_and_fence.yaml new file mode 100644 index 00000000..d2c80ad6 --- /dev/null +++ b/qa/suites/rbd/basic/tasks/rbd_lock_and_fence.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + client.0: + - rbd/test_lock_fence.sh diff --git a/qa/suites/rbd/basic/tasks/rbd_python_api_tests_old_format.yaml b/qa/suites/rbd/basic/tasks/rbd_python_api_tests_old_format.yaml new file mode 100644 index 00000000..90bc152e --- /dev/null +++ b/qa/suites/rbd/basic/tasks/rbd_python_api_tests_old_format.yaml @@ -0,0 +1,10 @@ +overrides: + ceph: + log-whitelist: + - \(SLOW_OPS\) + - slow request +tasks: +- workunit: + clients: + client.0: + - rbd/test_librbd_python.sh diff --git a/qa/suites/rbd/cli/% b/qa/suites/rbd/cli/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rbd/cli/.qa b/qa/suites/rbd/cli/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/cli/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/cli/base/.qa b/qa/suites/rbd/cli/base/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/cli/base/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/cli/base/install.yaml b/qa/suites/rbd/cli/base/install.yaml new file mode 100644 index 00000000..2030acb9 --- /dev/null +++ b/qa/suites/rbd/cli/base/install.yaml @@ -0,0 +1,3 @@ +tasks: +- install: +- ceph: diff --git a/qa/suites/rbd/cli/clusters b/qa/suites/rbd/cli/clusters new file mode 120000 index 00000000..ae92569e --- /dev/null +++ b/qa/suites/rbd/cli/clusters @@ -0,0 +1 @@ +../basic/clusters \ No newline at end of file diff --git a/qa/suites/rbd/cli/features/.qa b/qa/suites/rbd/cli/features/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/cli/features/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/cli/features/defaults.yaml b/qa/suites/rbd/cli/features/defaults.yaml new file mode 100644 index 00000000..75afd68d --- /dev/null +++ b/qa/suites/rbd/cli/features/defaults.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + client: + rbd default features: 61 diff --git a/qa/suites/rbd/cli/features/journaling.yaml b/qa/suites/rbd/cli/features/journaling.yaml new file mode 100644 index 00000000..6cea62a8 --- /dev/null +++ b/qa/suites/rbd/cli/features/journaling.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + client: + rbd default features: 125 diff --git a/qa/suites/rbd/cli/features/layering.yaml b/qa/suites/rbd/cli/features/layering.yaml new file mode 100644 index 00000000..429b8e14 --- /dev/null +++ b/qa/suites/rbd/cli/features/layering.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + client: + rbd default features: 1 diff --git a/qa/suites/rbd/cli/msgr-failures/.qa b/qa/suites/rbd/cli/msgr-failures/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/cli/msgr-failures/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/cli/msgr-failures/few.yaml b/qa/suites/rbd/cli/msgr-failures/few.yaml new file mode 100644 index 00000000..4326fe23 --- /dev/null +++ b/qa/suites/rbd/cli/msgr-failures/few.yaml @@ -0,0 +1,7 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 5000 + log-whitelist: + - \(OSD_SLOW_PING_TIME diff --git a/qa/suites/rbd/cli/objectstore b/qa/suites/rbd/cli/objectstore new file mode 120000 index 00000000..c40bd326 --- /dev/null +++ b/qa/suites/rbd/cli/objectstore @@ -0,0 +1 @@ +.qa/objectstore \ No newline at end of file diff --git a/qa/suites/rbd/cli/pool/.qa b/qa/suites/rbd/cli/pool/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/cli/pool/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/cli/pool/ec-data-pool.yaml b/qa/suites/rbd/cli/pool/ec-data-pool.yaml new file mode 100644 index 00000000..376bf08e --- /dev/null +++ b/qa/suites/rbd/cli/pool/ec-data-pool.yaml @@ -0,0 +1,27 @@ +tasks: +- exec: + client.0: + - sudo ceph osd erasure-code-profile set teuthologyprofile crush-failure-domain=osd m=1 k=2 + - sudo ceph osd pool create datapool 4 4 erasure teuthologyprofile + - sudo ceph osd pool set datapool allow_ec_overwrites true + - rbd pool init datapool + +overrides: + thrashosds: + bdev_inject_crash: 2 + bdev_inject_crash_probability: .5 + ceph: + fs: xfs + log-whitelist: + - overall HEALTH_ + - \(CACHE_POOL_NO_HIT_SET\) + conf: + client: + rbd default data pool: datapool + osd: # force bluestore since it's required for ec overwrites + osd objectstore: bluestore + bluestore block size: 96636764160 + enable experimental unrecoverable data corrupting features: "*" + osd debug randomize hobject sort order: false +# this doesn't work with failures bc the log writes are not atomic across the two backends +# bluestore bluefs env mirror: true diff --git a/qa/suites/rbd/cli/pool/none.yaml b/qa/suites/rbd/cli/pool/none.yaml new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rbd/cli/pool/replicated-data-pool.yaml b/qa/suites/rbd/cli/pool/replicated-data-pool.yaml new file mode 100644 index 00000000..c5647dba --- /dev/null +++ b/qa/suites/rbd/cli/pool/replicated-data-pool.yaml @@ -0,0 +1,11 @@ +tasks: +- exec: + client.0: + - sudo ceph osd pool create datapool 4 + - rbd pool init datapool + +overrides: + ceph: + conf: + client: + rbd default data pool: datapool diff --git a/qa/suites/rbd/cli/pool/small-cache-pool.yaml b/qa/suites/rbd/cli/pool/small-cache-pool.yaml new file mode 100644 index 00000000..1b505657 --- /dev/null +++ b/qa/suites/rbd/cli/pool/small-cache-pool.yaml @@ -0,0 +1,17 @@ +overrides: + ceph: + log-whitelist: + - overall HEALTH_ + - \(CACHE_POOL_NEAR_FULL\) + - \(CACHE_POOL_NO_HIT_SET\) +tasks: +- exec: + client.0: + - sudo ceph osd pool create cache 4 + - sudo ceph osd tier add rbd cache + - sudo ceph osd tier cache-mode cache writeback + - sudo ceph osd tier set-overlay rbd cache + - sudo ceph osd pool set cache hit_set_type bloom + - sudo ceph osd pool set cache hit_set_count 8 + - sudo ceph osd pool set cache hit_set_period 60 + - sudo ceph osd pool set cache target_max_objects 250 diff --git a/qa/suites/rbd/cli/supported-random-distro$ b/qa/suites/rbd/cli/supported-random-distro$ new file mode 120000 index 00000000..0862b445 --- /dev/null +++ b/qa/suites/rbd/cli/supported-random-distro$ @@ -0,0 +1 @@ +.qa/distros/supported-random-distro$ \ No newline at end of file diff --git a/qa/suites/rbd/cli/workloads/.qa b/qa/suites/rbd/cli/workloads/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/cli/workloads/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/cli/workloads/rbd_cli_generic.yaml b/qa/suites/rbd/cli/workloads/rbd_cli_generic.yaml new file mode 100644 index 00000000..be43b3e8 --- /dev/null +++ b/qa/suites/rbd/cli/workloads/rbd_cli_generic.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + client.0: + - rbd/cli_generic.sh diff --git a/qa/suites/rbd/cli/workloads/rbd_cli_groups.yaml b/qa/suites/rbd/cli/workloads/rbd_cli_groups.yaml new file mode 100644 index 00000000..6ff83634 --- /dev/null +++ b/qa/suites/rbd/cli/workloads/rbd_cli_groups.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + client.0: + - rbd/rbd_groups.sh diff --git a/qa/suites/rbd/cli/workloads/rbd_cli_import_export.yaml b/qa/suites/rbd/cli/workloads/rbd_cli_import_export.yaml new file mode 100644 index 00000000..b08f2612 --- /dev/null +++ b/qa/suites/rbd/cli/workloads/rbd_cli_import_export.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + client.0: + - rbd/import_export.sh diff --git a/qa/suites/rbd/cli_v1/% b/qa/suites/rbd/cli_v1/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rbd/cli_v1/.qa b/qa/suites/rbd/cli_v1/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/cli_v1/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/cli_v1/base/.qa b/qa/suites/rbd/cli_v1/base/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/cli_v1/base/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/cli_v1/base/install.yaml b/qa/suites/rbd/cli_v1/base/install.yaml new file mode 100644 index 00000000..2030acb9 --- /dev/null +++ b/qa/suites/rbd/cli_v1/base/install.yaml @@ -0,0 +1,3 @@ +tasks: +- install: +- ceph: diff --git a/qa/suites/rbd/cli_v1/clusters b/qa/suites/rbd/cli_v1/clusters new file mode 120000 index 00000000..ae92569e --- /dev/null +++ b/qa/suites/rbd/cli_v1/clusters @@ -0,0 +1 @@ +../basic/clusters \ No newline at end of file diff --git a/qa/suites/rbd/cli_v1/features/.qa b/qa/suites/rbd/cli_v1/features/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/cli_v1/features/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/cli_v1/features/format-1.yaml b/qa/suites/rbd/cli_v1/features/format-1.yaml new file mode 100644 index 00000000..9c532083 --- /dev/null +++ b/qa/suites/rbd/cli_v1/features/format-1.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + client: + rbd default format: 1 diff --git a/qa/suites/rbd/cli_v1/msgr-failures/.qa b/qa/suites/rbd/cli_v1/msgr-failures/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/cli_v1/msgr-failures/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/cli_v1/msgr-failures/few.yaml b/qa/suites/rbd/cli_v1/msgr-failures/few.yaml new file mode 100644 index 00000000..4326fe23 --- /dev/null +++ b/qa/suites/rbd/cli_v1/msgr-failures/few.yaml @@ -0,0 +1,7 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 5000 + log-whitelist: + - \(OSD_SLOW_PING_TIME diff --git a/qa/suites/rbd/cli_v1/objectstore b/qa/suites/rbd/cli_v1/objectstore new file mode 120000 index 00000000..c40bd326 --- /dev/null +++ b/qa/suites/rbd/cli_v1/objectstore @@ -0,0 +1 @@ +.qa/objectstore \ No newline at end of file diff --git a/qa/suites/rbd/cli_v1/pool/.qa b/qa/suites/rbd/cli_v1/pool/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/cli_v1/pool/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/cli_v1/pool/none.yaml b/qa/suites/rbd/cli_v1/pool/none.yaml new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rbd/cli_v1/pool/small-cache-pool.yaml b/qa/suites/rbd/cli_v1/pool/small-cache-pool.yaml new file mode 100644 index 00000000..1b505657 --- /dev/null +++ b/qa/suites/rbd/cli_v1/pool/small-cache-pool.yaml @@ -0,0 +1,17 @@ +overrides: + ceph: + log-whitelist: + - overall HEALTH_ + - \(CACHE_POOL_NEAR_FULL\) + - \(CACHE_POOL_NO_HIT_SET\) +tasks: +- exec: + client.0: + - sudo ceph osd pool create cache 4 + - sudo ceph osd tier add rbd cache + - sudo ceph osd tier cache-mode cache writeback + - sudo ceph osd tier set-overlay rbd cache + - sudo ceph osd pool set cache hit_set_type bloom + - sudo ceph osd pool set cache hit_set_count 8 + - sudo ceph osd pool set cache hit_set_period 60 + - sudo ceph osd pool set cache target_max_objects 250 diff --git a/qa/suites/rbd/cli_v1/supported-random-distro$ b/qa/suites/rbd/cli_v1/supported-random-distro$ new file mode 120000 index 00000000..0862b445 --- /dev/null +++ b/qa/suites/rbd/cli_v1/supported-random-distro$ @@ -0,0 +1 @@ +.qa/distros/supported-random-distro$ \ No newline at end of file diff --git a/qa/suites/rbd/cli_v1/workloads/.qa b/qa/suites/rbd/cli_v1/workloads/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/cli_v1/workloads/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/cli_v1/workloads/rbd_cli_generic.yaml b/qa/suites/rbd/cli_v1/workloads/rbd_cli_generic.yaml new file mode 100644 index 00000000..be43b3e8 --- /dev/null +++ b/qa/suites/rbd/cli_v1/workloads/rbd_cli_generic.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + client.0: + - rbd/cli_generic.sh diff --git a/qa/suites/rbd/cli_v1/workloads/rbd_cli_import_export.yaml b/qa/suites/rbd/cli_v1/workloads/rbd_cli_import_export.yaml new file mode 100644 index 00000000..b08f2612 --- /dev/null +++ b/qa/suites/rbd/cli_v1/workloads/rbd_cli_import_export.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + client.0: + - rbd/import_export.sh diff --git a/qa/suites/rbd/librbd/% b/qa/suites/rbd/librbd/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rbd/librbd/.qa b/qa/suites/rbd/librbd/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/librbd/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/librbd/cache/.qa b/qa/suites/rbd/librbd/cache/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/librbd/cache/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/librbd/cache/none.yaml b/qa/suites/rbd/librbd/cache/none.yaml new file mode 100644 index 00000000..42fd9c95 --- /dev/null +++ b/qa/suites/rbd/librbd/cache/none.yaml @@ -0,0 +1,6 @@ +tasks: +- install: +- ceph: + conf: + client: + rbd cache: false diff --git a/qa/suites/rbd/librbd/cache/writeback.yaml b/qa/suites/rbd/librbd/cache/writeback.yaml new file mode 100644 index 00000000..86fe06af --- /dev/null +++ b/qa/suites/rbd/librbd/cache/writeback.yaml @@ -0,0 +1,6 @@ +tasks: +- install: +- ceph: + conf: + client: + rbd cache: true diff --git a/qa/suites/rbd/librbd/cache/writethrough.yaml b/qa/suites/rbd/librbd/cache/writethrough.yaml new file mode 100644 index 00000000..6dc29e16 --- /dev/null +++ b/qa/suites/rbd/librbd/cache/writethrough.yaml @@ -0,0 +1,7 @@ +tasks: +- install: +- ceph: + conf: + client: + rbd cache: true + rbd cache max dirty: 0 diff --git a/qa/suites/rbd/librbd/clusters/+ b/qa/suites/rbd/librbd/clusters/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rbd/librbd/clusters/.qa b/qa/suites/rbd/librbd/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/librbd/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/librbd/clusters/fixed-3.yaml b/qa/suites/rbd/librbd/clusters/fixed-3.yaml new file mode 120000 index 00000000..f75a848b --- /dev/null +++ b/qa/suites/rbd/librbd/clusters/fixed-3.yaml @@ -0,0 +1 @@ +.qa/clusters/fixed-3.yaml \ No newline at end of file diff --git a/qa/suites/rbd/librbd/clusters/openstack.yaml b/qa/suites/rbd/librbd/clusters/openstack.yaml new file mode 100644 index 00000000..b0f3b9b4 --- /dev/null +++ b/qa/suites/rbd/librbd/clusters/openstack.yaml @@ -0,0 +1,4 @@ +openstack: + - volumes: # attached to each instance + count: 4 + size: 30 # GB diff --git a/qa/suites/rbd/librbd/config/.qa b/qa/suites/rbd/librbd/config/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/librbd/config/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/librbd/config/copy-on-read.yaml b/qa/suites/rbd/librbd/config/copy-on-read.yaml new file mode 100644 index 00000000..ce99e7ec --- /dev/null +++ b/qa/suites/rbd/librbd/config/copy-on-read.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + client: + rbd clone copy on read: true diff --git a/qa/suites/rbd/librbd/config/none.yaml b/qa/suites/rbd/librbd/config/none.yaml new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rbd/librbd/config/permit-partial-discard.yaml b/qa/suites/rbd/librbd/config/permit-partial-discard.yaml new file mode 100644 index 00000000..a9929469 --- /dev/null +++ b/qa/suites/rbd/librbd/config/permit-partial-discard.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + client: + rbd skip partial discard: false diff --git a/qa/suites/rbd/librbd/msgr-failures/.qa b/qa/suites/rbd/librbd/msgr-failures/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/librbd/msgr-failures/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/librbd/msgr-failures/few.yaml b/qa/suites/rbd/librbd/msgr-failures/few.yaml new file mode 100644 index 00000000..55b6df53 --- /dev/null +++ b/qa/suites/rbd/librbd/msgr-failures/few.yaml @@ -0,0 +1,7 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 5000 + log-whitelist: + - but it is still running diff --git a/qa/suites/rbd/librbd/objectstore b/qa/suites/rbd/librbd/objectstore new file mode 120000 index 00000000..c40bd326 --- /dev/null +++ b/qa/suites/rbd/librbd/objectstore @@ -0,0 +1 @@ +.qa/objectstore \ No newline at end of file diff --git a/qa/suites/rbd/librbd/pool/.qa b/qa/suites/rbd/librbd/pool/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/librbd/pool/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/librbd/pool/ec-data-pool.yaml b/qa/suites/rbd/librbd/pool/ec-data-pool.yaml new file mode 100644 index 00000000..f39a5bb4 --- /dev/null +++ b/qa/suites/rbd/librbd/pool/ec-data-pool.yaml @@ -0,0 +1,24 @@ +tasks: +- exec: + client.0: + - sudo ceph osd erasure-code-profile set teuthologyprofile crush-failure-domain=osd m=1 k=2 + - sudo ceph osd pool create datapool 4 4 erasure teuthologyprofile + - sudo ceph osd pool set datapool allow_ec_overwrites true + - rbd pool init datapool + +overrides: + thrashosds: + bdev_inject_crash: 2 + bdev_inject_crash_probability: .5 + ceph: + fs: xfs + conf: + client: + rbd default data pool: datapool + osd: # force bluestore since it's required for ec overwrites + osd objectstore: bluestore + bluestore block size: 96636764160 + enable experimental unrecoverable data corrupting features: "*" + osd debug randomize hobject sort order: false +# this doesn't work with failures bc the log writes are not atomic across the two backends +# bluestore bluefs env mirror: true diff --git a/qa/suites/rbd/librbd/pool/none.yaml b/qa/suites/rbd/librbd/pool/none.yaml new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rbd/librbd/pool/replicated-data-pool.yaml b/qa/suites/rbd/librbd/pool/replicated-data-pool.yaml new file mode 100644 index 00000000..c5647dba --- /dev/null +++ b/qa/suites/rbd/librbd/pool/replicated-data-pool.yaml @@ -0,0 +1,11 @@ +tasks: +- exec: + client.0: + - sudo ceph osd pool create datapool 4 + - rbd pool init datapool + +overrides: + ceph: + conf: + client: + rbd default data pool: datapool diff --git a/qa/suites/rbd/librbd/pool/small-cache-pool.yaml b/qa/suites/rbd/librbd/pool/small-cache-pool.yaml new file mode 100644 index 00000000..1b505657 --- /dev/null +++ b/qa/suites/rbd/librbd/pool/small-cache-pool.yaml @@ -0,0 +1,17 @@ +overrides: + ceph: + log-whitelist: + - overall HEALTH_ + - \(CACHE_POOL_NEAR_FULL\) + - \(CACHE_POOL_NO_HIT_SET\) +tasks: +- exec: + client.0: + - sudo ceph osd pool create cache 4 + - sudo ceph osd tier add rbd cache + - sudo ceph osd tier cache-mode cache writeback + - sudo ceph osd tier set-overlay rbd cache + - sudo ceph osd pool set cache hit_set_type bloom + - sudo ceph osd pool set cache hit_set_count 8 + - sudo ceph osd pool set cache hit_set_period 60 + - sudo ceph osd pool set cache target_max_objects 250 diff --git a/qa/suites/rbd/librbd/supported-random-distro$ b/qa/suites/rbd/librbd/supported-random-distro$ new file mode 120000 index 00000000..0862b445 --- /dev/null +++ b/qa/suites/rbd/librbd/supported-random-distro$ @@ -0,0 +1 @@ +.qa/distros/supported-random-distro$ \ No newline at end of file diff --git a/qa/suites/rbd/librbd/workloads/.qa b/qa/suites/rbd/librbd/workloads/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/librbd/workloads/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/librbd/workloads/c_api_tests.yaml b/qa/suites/rbd/librbd/workloads/c_api_tests.yaml new file mode 100644 index 00000000..04af9c85 --- /dev/null +++ b/qa/suites/rbd/librbd/workloads/c_api_tests.yaml @@ -0,0 +1,13 @@ +overrides: + ceph: + log-whitelist: + - overall HEALTH_ + - \(CACHE_POOL_NO_HIT_SET\) + - \(POOL_APP_NOT_ENABLED\) +tasks: +- workunit: + clients: + client.0: + - rbd/test_librbd.sh + env: + RBD_FEATURES: "1" diff --git a/qa/suites/rbd/librbd/workloads/c_api_tests_with_defaults.yaml b/qa/suites/rbd/librbd/workloads/c_api_tests_with_defaults.yaml new file mode 100644 index 00000000..6ae7f462 --- /dev/null +++ b/qa/suites/rbd/librbd/workloads/c_api_tests_with_defaults.yaml @@ -0,0 +1,13 @@ +overrides: + ceph: + log-whitelist: + - overall HEALTH_ + - \(CACHE_POOL_NO_HIT_SET\) + - \(POOL_APP_NOT_ENABLED\) +tasks: +- workunit: + clients: + client.0: + - rbd/test_librbd.sh + env: + RBD_FEATURES: "61" diff --git a/qa/suites/rbd/librbd/workloads/c_api_tests_with_journaling.yaml b/qa/suites/rbd/librbd/workloads/c_api_tests_with_journaling.yaml new file mode 100644 index 00000000..578115ee --- /dev/null +++ b/qa/suites/rbd/librbd/workloads/c_api_tests_with_journaling.yaml @@ -0,0 +1,13 @@ +overrides: + ceph: + log-whitelist: + - overall HEALTH_ + - \(CACHE_POOL_NO_HIT_SET\) + - \(POOL_APP_NOT_ENABLED\) +tasks: +- workunit: + clients: + client.0: + - rbd/test_librbd.sh + env: + RBD_FEATURES: "125" diff --git a/qa/suites/rbd/librbd/workloads/fsx.yaml b/qa/suites/rbd/librbd/workloads/fsx.yaml new file mode 100644 index 00000000..6d8cd5f1 --- /dev/null +++ b/qa/suites/rbd/librbd/workloads/fsx.yaml @@ -0,0 +1,4 @@ +tasks: +- rbd_fsx: + clients: [client.0] + ops: 20000 diff --git a/qa/suites/rbd/librbd/workloads/python_api_tests.yaml b/qa/suites/rbd/librbd/workloads/python_api_tests.yaml new file mode 100644 index 00000000..a7b3ce7d --- /dev/null +++ b/qa/suites/rbd/librbd/workloads/python_api_tests.yaml @@ -0,0 +1,7 @@ +tasks: +- workunit: + clients: + client.0: + - rbd/test_librbd_python.sh + env: + RBD_FEATURES: "1" diff --git a/qa/suites/rbd/librbd/workloads/python_api_tests_with_defaults.yaml b/qa/suites/rbd/librbd/workloads/python_api_tests_with_defaults.yaml new file mode 100644 index 00000000..40b2312f --- /dev/null +++ b/qa/suites/rbd/librbd/workloads/python_api_tests_with_defaults.yaml @@ -0,0 +1,7 @@ +tasks: +- workunit: + clients: + client.0: + - rbd/test_librbd_python.sh + env: + RBD_FEATURES: "61" diff --git a/qa/suites/rbd/librbd/workloads/python_api_tests_with_journaling.yaml b/qa/suites/rbd/librbd/workloads/python_api_tests_with_journaling.yaml new file mode 100644 index 00000000..d0e905ff --- /dev/null +++ b/qa/suites/rbd/librbd/workloads/python_api_tests_with_journaling.yaml @@ -0,0 +1,7 @@ +tasks: +- workunit: + clients: + client.0: + - rbd/test_librbd_python.sh + env: + RBD_FEATURES: "125" diff --git a/qa/suites/rbd/librbd/workloads/rbd_fio.yaml b/qa/suites/rbd/librbd/workloads/rbd_fio.yaml new file mode 100644 index 00000000..ff788c6a --- /dev/null +++ b/qa/suites/rbd/librbd/workloads/rbd_fio.yaml @@ -0,0 +1,10 @@ +tasks: +- rbd_fio: + client.0: + fio-io-size: 80% + formats: [2] + features: [[layering],[layering,exclusive-lock,object-map]] + io-engine: rbd + test-clone-io: 1 + rw: randrw + runtime: 900 diff --git a/qa/suites/rbd/maintenance/% b/qa/suites/rbd/maintenance/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rbd/maintenance/.qa b/qa/suites/rbd/maintenance/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/maintenance/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/maintenance/base/.qa b/qa/suites/rbd/maintenance/base/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/maintenance/base/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/maintenance/base/install.yaml b/qa/suites/rbd/maintenance/base/install.yaml new file mode 100644 index 00000000..2030acb9 --- /dev/null +++ b/qa/suites/rbd/maintenance/base/install.yaml @@ -0,0 +1,3 @@ +tasks: +- install: +- ceph: diff --git a/qa/suites/rbd/maintenance/clusters/+ b/qa/suites/rbd/maintenance/clusters/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rbd/maintenance/clusters/.qa b/qa/suites/rbd/maintenance/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/maintenance/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/maintenance/clusters/fixed-3.yaml b/qa/suites/rbd/maintenance/clusters/fixed-3.yaml new file mode 120000 index 00000000..f75a848b --- /dev/null +++ b/qa/suites/rbd/maintenance/clusters/fixed-3.yaml @@ -0,0 +1 @@ +.qa/clusters/fixed-3.yaml \ No newline at end of file diff --git a/qa/suites/rbd/maintenance/clusters/openstack.yaml b/qa/suites/rbd/maintenance/clusters/openstack.yaml new file mode 120000 index 00000000..3e5028f9 --- /dev/null +++ b/qa/suites/rbd/maintenance/clusters/openstack.yaml @@ -0,0 +1 @@ +../../qemu/clusters/openstack.yaml \ No newline at end of file diff --git a/qa/suites/rbd/maintenance/objectstore b/qa/suites/rbd/maintenance/objectstore new file mode 120000 index 00000000..c40bd326 --- /dev/null +++ b/qa/suites/rbd/maintenance/objectstore @@ -0,0 +1 @@ +.qa/objectstore \ No newline at end of file diff --git a/qa/suites/rbd/maintenance/qemu/.qa b/qa/suites/rbd/maintenance/qemu/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/maintenance/qemu/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/maintenance/qemu/xfstests.yaml b/qa/suites/rbd/maintenance/qemu/xfstests.yaml new file mode 100644 index 00000000..135103b3 --- /dev/null +++ b/qa/suites/rbd/maintenance/qemu/xfstests.yaml @@ -0,0 +1,14 @@ +tasks: +- parallel: + - io_workload + - op_workload +io_workload: + sequential: + - qemu: + client.0: + clone: true + type: block + disks: 3 + time_wait: 120 + test: qa/run_xfstests_qemu.sh +exclude_arch: armv7l diff --git a/qa/suites/rbd/maintenance/supported-random-distro$ b/qa/suites/rbd/maintenance/supported-random-distro$ new file mode 120000 index 00000000..0862b445 --- /dev/null +++ b/qa/suites/rbd/maintenance/supported-random-distro$ @@ -0,0 +1 @@ +.qa/distros/supported-random-distro$ \ No newline at end of file diff --git a/qa/suites/rbd/maintenance/workloads/.qa b/qa/suites/rbd/maintenance/workloads/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/maintenance/workloads/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/maintenance/workloads/dynamic_features.yaml b/qa/suites/rbd/maintenance/workloads/dynamic_features.yaml new file mode 100644 index 00000000..d7e1c1ed --- /dev/null +++ b/qa/suites/rbd/maintenance/workloads/dynamic_features.yaml @@ -0,0 +1,8 @@ +op_workload: + sequential: + - workunit: + clients: + client.0: + - rbd/qemu_dynamic_features.sh + env: + IMAGE_NAME: client.0.1-clone diff --git a/qa/suites/rbd/maintenance/workloads/dynamic_features_no_cache.yaml b/qa/suites/rbd/maintenance/workloads/dynamic_features_no_cache.yaml new file mode 100644 index 00000000..dc8671b7 --- /dev/null +++ b/qa/suites/rbd/maintenance/workloads/dynamic_features_no_cache.yaml @@ -0,0 +1,13 @@ +overrides: + ceph: + conf: + client: + rbd cache: false +op_workload: + sequential: + - workunit: + clients: + client.0: + - rbd/qemu_dynamic_features.sh + env: + IMAGE_NAME: client.0.1-clone diff --git a/qa/suites/rbd/maintenance/workloads/rebuild_object_map.yaml b/qa/suites/rbd/maintenance/workloads/rebuild_object_map.yaml new file mode 100644 index 00000000..308158f6 --- /dev/null +++ b/qa/suites/rbd/maintenance/workloads/rebuild_object_map.yaml @@ -0,0 +1,8 @@ +op_workload: + sequential: + - workunit: + clients: + client.0: + - rbd/qemu_rebuild_object_map.sh + env: + IMAGE_NAME: client.0.1-clone diff --git a/qa/suites/rbd/mirror-thrash/% b/qa/suites/rbd/mirror-thrash/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rbd/mirror-thrash/.qa b/qa/suites/rbd/mirror-thrash/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/mirror-thrash/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/mirror-thrash/base/.qa b/qa/suites/rbd/mirror-thrash/base/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/mirror-thrash/base/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/mirror-thrash/base/install.yaml b/qa/suites/rbd/mirror-thrash/base/install.yaml new file mode 100644 index 00000000..365c3a8c --- /dev/null +++ b/qa/suites/rbd/mirror-thrash/base/install.yaml @@ -0,0 +1,9 @@ +meta: +- desc: run two ceph clusters and install rbd-mirror +tasks: +- install: + extra_packages: [rbd-mirror] +- ceph: + cluster: cluster1 +- ceph: + cluster: cluster2 diff --git a/qa/suites/rbd/mirror-thrash/cluster/+ b/qa/suites/rbd/mirror-thrash/cluster/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rbd/mirror-thrash/cluster/.qa b/qa/suites/rbd/mirror-thrash/cluster/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/mirror-thrash/cluster/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/mirror-thrash/cluster/2-node.yaml b/qa/suites/rbd/mirror-thrash/cluster/2-node.yaml new file mode 100644 index 00000000..74f9fb3c --- /dev/null +++ b/qa/suites/rbd/mirror-thrash/cluster/2-node.yaml @@ -0,0 +1,31 @@ +meta: +- desc: 2 ceph clusters with 1 mon and 3 osds each +roles: +- - cluster1.mon.a + - cluster1.mgr.x + - cluster2.mgr.x + - cluster1.osd.0 + - cluster1.osd.1 + - cluster1.osd.2 + - cluster1.client.0 + - cluster2.client.0 +- - cluster2.mon.a + - cluster2.osd.0 + - cluster2.osd.1 + - cluster2.osd.2 + - cluster1.client.mirror + - cluster1.client.mirror.0 + - cluster1.client.mirror.1 + - cluster1.client.mirror.2 + - cluster1.client.mirror.3 + - cluster1.client.mirror.4 + - cluster1.client.mirror.5 + - cluster1.client.mirror.6 + - cluster2.client.mirror + - cluster2.client.mirror.0 + - cluster2.client.mirror.1 + - cluster2.client.mirror.2 + - cluster2.client.mirror.3 + - cluster2.client.mirror.4 + - cluster2.client.mirror.5 + - cluster2.client.mirror.6 diff --git a/qa/suites/rbd/mirror-thrash/cluster/openstack.yaml b/qa/suites/rbd/mirror-thrash/cluster/openstack.yaml new file mode 100644 index 00000000..f4d1349b --- /dev/null +++ b/qa/suites/rbd/mirror-thrash/cluster/openstack.yaml @@ -0,0 +1,4 @@ +openstack: + - volumes: # attached to each instance + count: 3 + size: 30 # GB diff --git a/qa/suites/rbd/mirror-thrash/msgr-failures b/qa/suites/rbd/mirror-thrash/msgr-failures new file mode 120000 index 00000000..db59eb46 --- /dev/null +++ b/qa/suites/rbd/mirror-thrash/msgr-failures @@ -0,0 +1 @@ +../basic/msgr-failures \ No newline at end of file diff --git a/qa/suites/rbd/mirror-thrash/objectstore b/qa/suites/rbd/mirror-thrash/objectstore new file mode 120000 index 00000000..c40bd326 --- /dev/null +++ b/qa/suites/rbd/mirror-thrash/objectstore @@ -0,0 +1 @@ +.qa/objectstore \ No newline at end of file diff --git a/qa/suites/rbd/mirror-thrash/policy/.qa b/qa/suites/rbd/mirror-thrash/policy/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/mirror-thrash/policy/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/mirror-thrash/policy/none.yaml b/qa/suites/rbd/mirror-thrash/policy/none.yaml new file mode 100644 index 00000000..e0a7c118 --- /dev/null +++ b/qa/suites/rbd/mirror-thrash/policy/none.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + client: + rbd mirror image policy type: none diff --git a/qa/suites/rbd/mirror-thrash/policy/simple.yaml b/qa/suites/rbd/mirror-thrash/policy/simple.yaml new file mode 100644 index 00000000..ee3082d3 --- /dev/null +++ b/qa/suites/rbd/mirror-thrash/policy/simple.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + client: + rbd mirror image policy type: simple diff --git a/qa/suites/rbd/mirror-thrash/rbd-mirror/.qa b/qa/suites/rbd/mirror-thrash/rbd-mirror/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/mirror-thrash/rbd-mirror/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/mirror-thrash/rbd-mirror/four-per-cluster.yaml b/qa/suites/rbd/mirror-thrash/rbd-mirror/four-per-cluster.yaml new file mode 100644 index 00000000..70df34e4 --- /dev/null +++ b/qa/suites/rbd/mirror-thrash/rbd-mirror/four-per-cluster.yaml @@ -0,0 +1,31 @@ +meta: +- desc: run four rbd-mirror daemons per cluster +tasks: +- rbd-mirror: + client: cluster1.client.mirror.0 + thrash: True +- rbd-mirror: + client: cluster1.client.mirror.1 + thrash: True +- rbd-mirror: + client: cluster1.client.mirror.2 + thrash: True +- rbd-mirror: + client: cluster1.client.mirror.3 + thrash: True +- rbd-mirror: + client: cluster2.client.mirror.0 + thrash: True +- rbd-mirror: + client: cluster2.client.mirror.1 + thrash: True +- rbd-mirror: + client: cluster2.client.mirror.2 + thrash: True +- rbd-mirror: + client: cluster2.client.mirror.3 + thrash: True +- rbd-mirror-thrash: + cluster: cluster1 +- rbd-mirror-thrash: + cluster: cluster2 diff --git a/qa/suites/rbd/mirror-thrash/supported-random-distro$ b/qa/suites/rbd/mirror-thrash/supported-random-distro$ new file mode 120000 index 00000000..0862b445 --- /dev/null +++ b/qa/suites/rbd/mirror-thrash/supported-random-distro$ @@ -0,0 +1 @@ +.qa/distros/supported-random-distro$ \ No newline at end of file diff --git a/qa/suites/rbd/mirror-thrash/users/.qa b/qa/suites/rbd/mirror-thrash/users/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/mirror-thrash/users/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/mirror-thrash/users/mirror.yaml b/qa/suites/rbd/mirror-thrash/users/mirror.yaml new file mode 100644 index 00000000..8de3eb2b --- /dev/null +++ b/qa/suites/rbd/mirror-thrash/users/mirror.yaml @@ -0,0 +1,23 @@ +meta: +- desc: configure the permissions for client.mirror +overrides: + ceph: + conf: + client: + rbd default features: 125 + debug rbd: 20 + debug rbd_mirror: 15 + log to stderr: false + # override to make these names predictable + client.mirror.0: + admin socket: /var/run/ceph/rbd-mirror.$cluster-$name.asok + pid file: /var/run/ceph/rbd-mirror.$cluster-$name.pid + client.mirror.1: + admin socket: /var/run/ceph/rbd-mirror.$cluster-$name.asok + pid file: /var/run/ceph/rbd-mirror.$cluster-$name.pid + client.mirror.2: + admin socket: /var/run/ceph/rbd-mirror.$cluster-$name.asok + pid file: /var/run/ceph/rbd-mirror.$cluster-$name.pid + client.mirror.3: + admin socket: /var/run/ceph/rbd-mirror.$cluster-$name.asok + pid file: /var/run/ceph/rbd-mirror.$cluster-$name.pid diff --git a/qa/suites/rbd/mirror-thrash/workloads/.qa b/qa/suites/rbd/mirror-thrash/workloads/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/mirror-thrash/workloads/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/mirror-thrash/workloads/rbd-mirror-fsx-workunit.yaml b/qa/suites/rbd/mirror-thrash/workloads/rbd-mirror-fsx-workunit.yaml new file mode 100644 index 00000000..d2db0f52 --- /dev/null +++ b/qa/suites/rbd/mirror-thrash/workloads/rbd-mirror-fsx-workunit.yaml @@ -0,0 +1,33 @@ +meta: +- desc: run multiple FSX workloads to simulate cluster load and then verify + that the images were replicated +tasks: +- workunit: + clients: + cluster1.client.mirror: [rbd/rbd_mirror_fsx_prepare.sh] + env: + # override workunit setting of CEPH_ARGS='--cluster' + CEPH_ARGS: '' + RBD_MIRROR_NOCLEANUP: '1' + RBD_MIRROR_USE_EXISTING_CLUSTER: '1' + RBD_MIRROR_USE_RBD_MIRROR: '1' +- rbd_fsx: + clients: + - cluster1.client.mirror.0 + - cluster1.client.mirror.1 + - cluster1.client.mirror.2 + - cluster1.client.mirror.3 + - cluster1.client.mirror.4 + - cluster1.client.mirror.5 + ops: 6000 + keep_images: true + pool_name: mirror +- workunit: + clients: + cluster1.client.mirror: [rbd/rbd_mirror_fsx_compare.sh] + env: + # override workunit setting of CEPH_ARGS='--cluster' + CEPH_ARGS: '' + RBD_MIRROR_USE_EXISTING_CLUSTER: '1' + RBD_MIRROR_USE_RBD_MIRROR: '1' + timeout: 6h diff --git a/qa/suites/rbd/mirror-thrash/workloads/rbd-mirror-stress-workunit.yaml b/qa/suites/rbd/mirror-thrash/workloads/rbd-mirror-stress-workunit.yaml new file mode 100644 index 00000000..62bda881 --- /dev/null +++ b/qa/suites/rbd/mirror-thrash/workloads/rbd-mirror-stress-workunit.yaml @@ -0,0 +1,13 @@ +meta: +- desc: run the rbd_mirror_stress.sh workunit to test the rbd-mirror daemon +tasks: +- workunit: + clients: + cluster1.client.mirror: [rbd/rbd_mirror_stress.sh] + env: + # override workunit setting of CEPH_ARGS='--cluster' + CEPH_ARGS: '' + RBD_MIRROR_INSTANCES: '4' + RBD_MIRROR_USE_EXISTING_CLUSTER: '1' + RBD_MIRROR_USE_RBD_MIRROR: '1' + timeout: 6h diff --git a/qa/suites/rbd/mirror-thrash/workloads/rbd-mirror-workunit.yaml b/qa/suites/rbd/mirror-thrash/workloads/rbd-mirror-workunit.yaml new file mode 100644 index 00000000..349d3fc8 --- /dev/null +++ b/qa/suites/rbd/mirror-thrash/workloads/rbd-mirror-workunit.yaml @@ -0,0 +1,12 @@ +meta: +- desc: run the rbd_mirror.sh workunit to test the rbd-mirror daemon +tasks: +- workunit: + clients: + cluster1.client.mirror: [rbd/rbd_mirror.sh] + env: + # override workunit setting of CEPH_ARGS='--cluster' + CEPH_ARGS: '' + RBD_MIRROR_INSTANCES: '4' + RBD_MIRROR_USE_EXISTING_CLUSTER: '1' + RBD_MIRROR_USE_RBD_MIRROR: '1' diff --git a/qa/suites/rbd/mirror/% b/qa/suites/rbd/mirror/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rbd/mirror/.qa b/qa/suites/rbd/mirror/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/mirror/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/mirror/base b/qa/suites/rbd/mirror/base new file mode 120000 index 00000000..8d9546e2 --- /dev/null +++ b/qa/suites/rbd/mirror/base @@ -0,0 +1 @@ +../mirror-thrash/base \ No newline at end of file diff --git a/qa/suites/rbd/mirror/cluster b/qa/suites/rbd/mirror/cluster new file mode 120000 index 00000000..3fc87a15 --- /dev/null +++ b/qa/suites/rbd/mirror/cluster @@ -0,0 +1 @@ +../mirror-thrash/cluster \ No newline at end of file diff --git a/qa/suites/rbd/mirror/msgr-failures b/qa/suites/rbd/mirror/msgr-failures new file mode 120000 index 00000000..728aeab3 --- /dev/null +++ b/qa/suites/rbd/mirror/msgr-failures @@ -0,0 +1 @@ +../mirror-thrash/msgr-failures \ No newline at end of file diff --git a/qa/suites/rbd/mirror/objectstore b/qa/suites/rbd/mirror/objectstore new file mode 120000 index 00000000..d751ff12 --- /dev/null +++ b/qa/suites/rbd/mirror/objectstore @@ -0,0 +1 @@ +../mirror-thrash/objectstore \ No newline at end of file diff --git a/qa/suites/rbd/mirror/supported-random-distro$ b/qa/suites/rbd/mirror/supported-random-distro$ new file mode 120000 index 00000000..0862b445 --- /dev/null +++ b/qa/suites/rbd/mirror/supported-random-distro$ @@ -0,0 +1 @@ +.qa/distros/supported-random-distro$ \ No newline at end of file diff --git a/qa/suites/rbd/mirror/users b/qa/suites/rbd/mirror/users new file mode 120000 index 00000000..8d9d0d2c --- /dev/null +++ b/qa/suites/rbd/mirror/users @@ -0,0 +1 @@ +../mirror-thrash/users \ No newline at end of file diff --git a/qa/suites/rbd/mirror/workloads/.qa b/qa/suites/rbd/mirror/workloads/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/mirror/workloads/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/mirror/workloads/rbd-mirror-bootstrap-workunit.yaml b/qa/suites/rbd/mirror/workloads/rbd-mirror-bootstrap-workunit.yaml new file mode 100644 index 00000000..585f5829 --- /dev/null +++ b/qa/suites/rbd/mirror/workloads/rbd-mirror-bootstrap-workunit.yaml @@ -0,0 +1,11 @@ +meta: +- desc: run the rbd_mirror_bootstrap.sh workunit to test the rbd-mirror daemon +tasks: +- workunit: + clients: + cluster1.client.mirror: [rbd/rbd_mirror_bootstrap.sh] + env: + # override workunit setting of CEPH_ARGS='--cluster' + CEPH_ARGS: '' + RBD_MIRROR_INSTANCES: '1' + RBD_MIRROR_USE_EXISTING_CLUSTER: '1' diff --git a/qa/suites/rbd/mirror/workloads/rbd-mirror-ha-workunit.yaml b/qa/suites/rbd/mirror/workloads/rbd-mirror-ha-workunit.yaml new file mode 100644 index 00000000..7aa8d548 --- /dev/null +++ b/qa/suites/rbd/mirror/workloads/rbd-mirror-ha-workunit.yaml @@ -0,0 +1,26 @@ +meta: +- desc: run the rbd_mirror_ha.sh workunit to test the rbd-mirror daemon +overrides: + ceph: + conf: + client: + rbd mirror image policy type: none + # override to make these names predictable + client.mirror.4: + admin socket: /var/run/ceph/rbd-mirror.$cluster-$name.asok + pid file: /var/run/ceph/rbd-mirror.$cluster-$name.pid + client.mirror.5: + admin socket: /var/run/ceph/rbd-mirror.$cluster-$name.asok + pid file: /var/run/ceph/rbd-mirror.$cluster-$name.pid + client.mirror.6: + admin socket: /var/run/ceph/rbd-mirror.$cluster-$name.asok + pid file: /var/run/ceph/rbd-mirror.$cluster-$name.pid +tasks: +- workunit: + clients: + cluster1.client.mirror: [rbd/rbd_mirror_ha.sh] + env: + # override workunit setting of CEPH_ARGS='--cluster' + CEPH_ARGS: '' + RBD_MIRROR_USE_EXISTING_CLUSTER: '1' + timeout: 6h diff --git a/qa/suites/rbd/mirror/workloads/rbd-mirror-workunit-config-key.yaml b/qa/suites/rbd/mirror/workloads/rbd-mirror-workunit-config-key.yaml new file mode 100644 index 00000000..0c8c2180 --- /dev/null +++ b/qa/suites/rbd/mirror/workloads/rbd-mirror-workunit-config-key.yaml @@ -0,0 +1,12 @@ +meta: +- desc: run the rbd_mirror.sh workunit to test the rbd-mirror daemon +tasks: +- workunit: + clients: + cluster1.client.mirror: [rbd/rbd_mirror.sh] + env: + # override workunit setting of CEPH_ARGS='--cluster' + CEPH_ARGS: '' + RBD_MIRROR_INSTANCES: '4' + RBD_MIRROR_USE_EXISTING_CLUSTER: '1' + RBD_MIRROR_CONFIG_KEY: '1' diff --git a/qa/suites/rbd/mirror/workloads/rbd-mirror-workunit-policy-none.yaml b/qa/suites/rbd/mirror/workloads/rbd-mirror-workunit-policy-none.yaml new file mode 100644 index 00000000..eff20a9e --- /dev/null +++ b/qa/suites/rbd/mirror/workloads/rbd-mirror-workunit-policy-none.yaml @@ -0,0 +1,16 @@ +meta: +- desc: run the rbd_mirror.sh workunit to test the rbd-mirror daemon +overrides: + ceph: + conf: + client: + rbd mirror image policy type: none +tasks: +- workunit: + clients: + cluster1.client.mirror: [rbd/rbd_mirror.sh] + env: + # override workunit setting of CEPH_ARGS='--cluster' + CEPH_ARGS: '' + RBD_MIRROR_INSTANCES: '4' + RBD_MIRROR_USE_EXISTING_CLUSTER: '1' diff --git a/qa/suites/rbd/mirror/workloads/rbd-mirror-workunit-policy-simple.yaml b/qa/suites/rbd/mirror/workloads/rbd-mirror-workunit-policy-simple.yaml new file mode 100644 index 00000000..85814217 --- /dev/null +++ b/qa/suites/rbd/mirror/workloads/rbd-mirror-workunit-policy-simple.yaml @@ -0,0 +1,16 @@ +meta: +- desc: run the rbd_mirror.sh workunit to test the rbd-mirror daemon +overrides: + ceph: + conf: + client: + rbd mirror image policy type: simple +tasks: +- workunit: + clients: + cluster1.client.mirror: [rbd/rbd_mirror.sh] + env: + # override workunit setting of CEPH_ARGS='--cluster' + CEPH_ARGS: '' + RBD_MIRROR_INSTANCES: '4' + RBD_MIRROR_USE_EXISTING_CLUSTER: '1' diff --git a/qa/suites/rbd/nbd/% b/qa/suites/rbd/nbd/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rbd/nbd/.qa b/qa/suites/rbd/nbd/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/nbd/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/nbd/base b/qa/suites/rbd/nbd/base new file mode 120000 index 00000000..fd10a859 --- /dev/null +++ b/qa/suites/rbd/nbd/base @@ -0,0 +1 @@ +../thrash/base \ No newline at end of file diff --git a/qa/suites/rbd/nbd/cluster/+ b/qa/suites/rbd/nbd/cluster/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rbd/nbd/cluster/.qa b/qa/suites/rbd/nbd/cluster/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/nbd/cluster/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/nbd/cluster/fixed-3.yaml b/qa/suites/rbd/nbd/cluster/fixed-3.yaml new file mode 100644 index 00000000..18258915 --- /dev/null +++ b/qa/suites/rbd/nbd/cluster/fixed-3.yaml @@ -0,0 +1,4 @@ +roles: +- [mon.a, mon.c, osd.0, osd.1, osd.2] +- [mon.b, mgr.x, osd.3, osd.4, osd.5] +- [client.0] diff --git a/qa/suites/rbd/nbd/cluster/openstack.yaml b/qa/suites/rbd/nbd/cluster/openstack.yaml new file mode 120000 index 00000000..48becbb8 --- /dev/null +++ b/qa/suites/rbd/nbd/cluster/openstack.yaml @@ -0,0 +1 @@ +../../thrash/clusters/openstack.yaml \ No newline at end of file diff --git a/qa/suites/rbd/nbd/msgr-failures b/qa/suites/rbd/nbd/msgr-failures new file mode 120000 index 00000000..03689aa4 --- /dev/null +++ b/qa/suites/rbd/nbd/msgr-failures @@ -0,0 +1 @@ +../thrash/msgr-failures \ No newline at end of file diff --git a/qa/suites/rbd/nbd/objectstore b/qa/suites/rbd/nbd/objectstore new file mode 120000 index 00000000..c40bd326 --- /dev/null +++ b/qa/suites/rbd/nbd/objectstore @@ -0,0 +1 @@ +.qa/objectstore \ No newline at end of file diff --git a/qa/suites/rbd/nbd/thrashers b/qa/suites/rbd/nbd/thrashers new file mode 120000 index 00000000..f461dadc --- /dev/null +++ b/qa/suites/rbd/nbd/thrashers @@ -0,0 +1 @@ +../thrash/thrashers \ No newline at end of file diff --git a/qa/suites/rbd/nbd/thrashosds-health.yaml b/qa/suites/rbd/nbd/thrashosds-health.yaml new file mode 120000 index 00000000..9124eb1a --- /dev/null +++ b/qa/suites/rbd/nbd/thrashosds-health.yaml @@ -0,0 +1 @@ +.qa/tasks/thrashosds-health.yaml \ No newline at end of file diff --git a/qa/suites/rbd/nbd/workloads/.qa b/qa/suites/rbd/nbd/workloads/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/nbd/workloads/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/nbd/workloads/rbd_fsx_nbd.yaml b/qa/suites/rbd/nbd/workloads/rbd_fsx_nbd.yaml new file mode 100644 index 00000000..b6e9d5b1 --- /dev/null +++ b/qa/suites/rbd/nbd/workloads/rbd_fsx_nbd.yaml @@ -0,0 +1,15 @@ +os_type: ubuntu +overrides: + install: + ceph: + extra_packages: [rbd-nbd] +tasks: +- rbd_fsx: + clients: [client.0] + ops: 6000 + nbd: True + holebdy: 512 + punch_holes: true + readbdy: 512 + truncbdy: 512 + writebdy: 512 diff --git a/qa/suites/rbd/nbd/workloads/rbd_nbd.yaml b/qa/suites/rbd/nbd/workloads/rbd_nbd.yaml new file mode 100644 index 00000000..897d07ce --- /dev/null +++ b/qa/suites/rbd/nbd/workloads/rbd_nbd.yaml @@ -0,0 +1,10 @@ +os_type: ubuntu +overrides: + install: + ceph: + extra_packages: [rbd-nbd] +tasks: +- workunit: + clients: + client.0: + - rbd/rbd-nbd.sh diff --git a/qa/suites/rbd/qemu/% b/qa/suites/rbd/qemu/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rbd/qemu/.qa b/qa/suites/rbd/qemu/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/qemu/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/qemu/cache/.qa b/qa/suites/rbd/qemu/cache/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/qemu/cache/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/qemu/cache/none.yaml b/qa/suites/rbd/qemu/cache/none.yaml new file mode 100644 index 00000000..42fd9c95 --- /dev/null +++ b/qa/suites/rbd/qemu/cache/none.yaml @@ -0,0 +1,6 @@ +tasks: +- install: +- ceph: + conf: + client: + rbd cache: false diff --git a/qa/suites/rbd/qemu/cache/writeback.yaml b/qa/suites/rbd/qemu/cache/writeback.yaml new file mode 100644 index 00000000..86fe06af --- /dev/null +++ b/qa/suites/rbd/qemu/cache/writeback.yaml @@ -0,0 +1,6 @@ +tasks: +- install: +- ceph: + conf: + client: + rbd cache: true diff --git a/qa/suites/rbd/qemu/cache/writethrough.yaml b/qa/suites/rbd/qemu/cache/writethrough.yaml new file mode 100644 index 00000000..6dc29e16 --- /dev/null +++ b/qa/suites/rbd/qemu/cache/writethrough.yaml @@ -0,0 +1,7 @@ +tasks: +- install: +- ceph: + conf: + client: + rbd cache: true + rbd cache max dirty: 0 diff --git a/qa/suites/rbd/qemu/clusters/+ b/qa/suites/rbd/qemu/clusters/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rbd/qemu/clusters/.qa b/qa/suites/rbd/qemu/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/qemu/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/qemu/clusters/fixed-3.yaml b/qa/suites/rbd/qemu/clusters/fixed-3.yaml new file mode 120000 index 00000000..f75a848b --- /dev/null +++ b/qa/suites/rbd/qemu/clusters/fixed-3.yaml @@ -0,0 +1 @@ +.qa/clusters/fixed-3.yaml \ No newline at end of file diff --git a/qa/suites/rbd/qemu/clusters/openstack.yaml b/qa/suites/rbd/qemu/clusters/openstack.yaml new file mode 100644 index 00000000..9c39c7e5 --- /dev/null +++ b/qa/suites/rbd/qemu/clusters/openstack.yaml @@ -0,0 +1,8 @@ +openstack: + - machine: + disk: 40 # GB + ram: 30000 # MB + cpus: 1 + volumes: # attached to each instance + count: 4 + size: 30 # GB diff --git a/qa/suites/rbd/qemu/features/.qa b/qa/suites/rbd/qemu/features/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/qemu/features/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/qemu/features/defaults.yaml b/qa/suites/rbd/qemu/features/defaults.yaml new file mode 100644 index 00000000..75afd68d --- /dev/null +++ b/qa/suites/rbd/qemu/features/defaults.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + client: + rbd default features: 61 diff --git a/qa/suites/rbd/qemu/features/journaling.yaml b/qa/suites/rbd/qemu/features/journaling.yaml new file mode 100644 index 00000000..6cea62a8 --- /dev/null +++ b/qa/suites/rbd/qemu/features/journaling.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + client: + rbd default features: 125 diff --git a/qa/suites/rbd/qemu/msgr-failures/.qa b/qa/suites/rbd/qemu/msgr-failures/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/qemu/msgr-failures/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/qemu/msgr-failures/few.yaml b/qa/suites/rbd/qemu/msgr-failures/few.yaml new file mode 100644 index 00000000..9349b4f9 --- /dev/null +++ b/qa/suites/rbd/qemu/msgr-failures/few.yaml @@ -0,0 +1,8 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 5000 + log-whitelist: + - but it is still running + - \(OSD_SLOW_PING_TIME diff --git a/qa/suites/rbd/qemu/objectstore b/qa/suites/rbd/qemu/objectstore new file mode 120000 index 00000000..c40bd326 --- /dev/null +++ b/qa/suites/rbd/qemu/objectstore @@ -0,0 +1 @@ +.qa/objectstore \ No newline at end of file diff --git a/qa/suites/rbd/qemu/pool/.qa b/qa/suites/rbd/qemu/pool/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/qemu/pool/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/qemu/pool/ec-cache-pool.yaml b/qa/suites/rbd/qemu/pool/ec-cache-pool.yaml new file mode 100644 index 00000000..c75e6fd4 --- /dev/null +++ b/qa/suites/rbd/qemu/pool/ec-cache-pool.yaml @@ -0,0 +1,21 @@ +overrides: + ceph: + log-whitelist: + - overall HEALTH_ + - \(CACHE_POOL_NEAR_FULL\) + - \(CACHE_POOL_NO_HIT_SET\) +tasks: +- exec: + client.0: + - sudo ceph osd erasure-code-profile set teuthologyprofile crush-failure-domain=osd m=1 k=2 + - sudo ceph osd pool delete rbd rbd --yes-i-really-really-mean-it + - sudo ceph osd pool create rbd 4 4 erasure teuthologyprofile + - sudo ceph osd pool create cache 4 + - sudo ceph osd tier add rbd cache + - sudo ceph osd tier cache-mode cache writeback + - sudo ceph osd tier set-overlay rbd cache + - sudo ceph osd pool set cache hit_set_type bloom + - sudo ceph osd pool set cache hit_set_count 8 + - sudo ceph osd pool set cache hit_set_period 60 + - sudo ceph osd pool set cache target_max_objects 250 + - rbd pool init rbd diff --git a/qa/suites/rbd/qemu/pool/ec-data-pool.yaml b/qa/suites/rbd/qemu/pool/ec-data-pool.yaml new file mode 100644 index 00000000..f39a5bb4 --- /dev/null +++ b/qa/suites/rbd/qemu/pool/ec-data-pool.yaml @@ -0,0 +1,24 @@ +tasks: +- exec: + client.0: + - sudo ceph osd erasure-code-profile set teuthologyprofile crush-failure-domain=osd m=1 k=2 + - sudo ceph osd pool create datapool 4 4 erasure teuthologyprofile + - sudo ceph osd pool set datapool allow_ec_overwrites true + - rbd pool init datapool + +overrides: + thrashosds: + bdev_inject_crash: 2 + bdev_inject_crash_probability: .5 + ceph: + fs: xfs + conf: + client: + rbd default data pool: datapool + osd: # force bluestore since it's required for ec overwrites + osd objectstore: bluestore + bluestore block size: 96636764160 + enable experimental unrecoverable data corrupting features: "*" + osd debug randomize hobject sort order: false +# this doesn't work with failures bc the log writes are not atomic across the two backends +# bluestore bluefs env mirror: true diff --git a/qa/suites/rbd/qemu/pool/none.yaml b/qa/suites/rbd/qemu/pool/none.yaml new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rbd/qemu/pool/replicated-data-pool.yaml b/qa/suites/rbd/qemu/pool/replicated-data-pool.yaml new file mode 100644 index 00000000..c5647dba --- /dev/null +++ b/qa/suites/rbd/qemu/pool/replicated-data-pool.yaml @@ -0,0 +1,11 @@ +tasks: +- exec: + client.0: + - sudo ceph osd pool create datapool 4 + - rbd pool init datapool + +overrides: + ceph: + conf: + client: + rbd default data pool: datapool diff --git a/qa/suites/rbd/qemu/pool/small-cache-pool.yaml b/qa/suites/rbd/qemu/pool/small-cache-pool.yaml new file mode 100644 index 00000000..1b505657 --- /dev/null +++ b/qa/suites/rbd/qemu/pool/small-cache-pool.yaml @@ -0,0 +1,17 @@ +overrides: + ceph: + log-whitelist: + - overall HEALTH_ + - \(CACHE_POOL_NEAR_FULL\) + - \(CACHE_POOL_NO_HIT_SET\) +tasks: +- exec: + client.0: + - sudo ceph osd pool create cache 4 + - sudo ceph osd tier add rbd cache + - sudo ceph osd tier cache-mode cache writeback + - sudo ceph osd tier set-overlay rbd cache + - sudo ceph osd pool set cache hit_set_type bloom + - sudo ceph osd pool set cache hit_set_count 8 + - sudo ceph osd pool set cache hit_set_period 60 + - sudo ceph osd pool set cache target_max_objects 250 diff --git a/qa/suites/rbd/qemu/supported-random-distro$ b/qa/suites/rbd/qemu/supported-random-distro$ new file mode 120000 index 00000000..0862b445 --- /dev/null +++ b/qa/suites/rbd/qemu/supported-random-distro$ @@ -0,0 +1 @@ +.qa/distros/supported-random-distro$ \ No newline at end of file diff --git a/qa/suites/rbd/qemu/workloads/.qa b/qa/suites/rbd/qemu/workloads/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/qemu/workloads/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/qemu/workloads/qemu_bonnie.yaml b/qa/suites/rbd/qemu/workloads/qemu_bonnie.yaml new file mode 100644 index 00000000..0ef9ebb6 --- /dev/null +++ b/qa/suites/rbd/qemu/workloads/qemu_bonnie.yaml @@ -0,0 +1,6 @@ +tasks: +- qemu: + all: + clone: true + test: qa/workunits/suites/bonnie.sh +exclude_arch: armv7l diff --git a/qa/suites/rbd/qemu/workloads/qemu_fsstress.yaml b/qa/suites/rbd/qemu/workloads/qemu_fsstress.yaml new file mode 100644 index 00000000..95f51480 --- /dev/null +++ b/qa/suites/rbd/qemu/workloads/qemu_fsstress.yaml @@ -0,0 +1,6 @@ +tasks: +- qemu: + all: + clone: true + test: qa/workunits/suites/fsstress.sh +exclude_arch: armv7l diff --git a/qa/suites/rbd/qemu/workloads/qemu_iozone.yaml.disabled b/qa/suites/rbd/qemu/workloads/qemu_iozone.yaml.disabled new file mode 100644 index 00000000..e159e208 --- /dev/null +++ b/qa/suites/rbd/qemu/workloads/qemu_iozone.yaml.disabled @@ -0,0 +1,6 @@ +tasks: +- qemu: + all: + test: qa/workunits/suites/iozone.sh + image_size: 20480 +exclude_arch: armv7l diff --git a/qa/suites/rbd/qemu/workloads/qemu_xfstests.yaml b/qa/suites/rbd/qemu/workloads/qemu_xfstests.yaml new file mode 100644 index 00000000..198f798d --- /dev/null +++ b/qa/suites/rbd/qemu/workloads/qemu_xfstests.yaml @@ -0,0 +1,8 @@ +tasks: +- qemu: + all: + clone: true + type: block + disks: 3 + test: qa/run_xfstests_qemu.sh +exclude_arch: armv7l diff --git a/qa/suites/rbd/singleton-bluestore/% b/qa/suites/rbd/singleton-bluestore/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rbd/singleton-bluestore/.qa b/qa/suites/rbd/singleton-bluestore/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/singleton-bluestore/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/singleton-bluestore/all/.qa b/qa/suites/rbd/singleton-bluestore/all/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/singleton-bluestore/all/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/singleton-bluestore/all/issue-20295.yaml b/qa/suites/rbd/singleton-bluestore/all/issue-20295.yaml new file mode 100644 index 00000000..9af52e0e --- /dev/null +++ b/qa/suites/rbd/singleton-bluestore/all/issue-20295.yaml @@ -0,0 +1,14 @@ +roles: +- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0] +- [mon.b, mgr.y, osd.3, osd.4, osd.5] +- [mon.c, mgr.z, osd.6, osd.7, osd.8] +- [osd.9, osd.10, osd.11] +tasks: +- install: +- ceph: + log-whitelist: + - 'application not enabled' +- workunit: + timeout: 30m + clients: + all: [rbd/issue-20295.sh] diff --git a/qa/suites/rbd/singleton-bluestore/objectstore/.qa b/qa/suites/rbd/singleton-bluestore/objectstore/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/singleton-bluestore/objectstore/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/singleton-bluestore/objectstore/bluestore-bitmap.yaml b/qa/suites/rbd/singleton-bluestore/objectstore/bluestore-bitmap.yaml new file mode 120000 index 00000000..a59cf517 --- /dev/null +++ b/qa/suites/rbd/singleton-bluestore/objectstore/bluestore-bitmap.yaml @@ -0,0 +1 @@ +.qa/objectstore/bluestore-bitmap.yaml \ No newline at end of file diff --git a/qa/suites/rbd/singleton-bluestore/objectstore/bluestore-comp-snappy.yaml b/qa/suites/rbd/singleton-bluestore/objectstore/bluestore-comp-snappy.yaml new file mode 120000 index 00000000..888caf55 --- /dev/null +++ b/qa/suites/rbd/singleton-bluestore/objectstore/bluestore-comp-snappy.yaml @@ -0,0 +1 @@ +.qa/objectstore/bluestore-comp-snappy.yaml \ No newline at end of file diff --git a/qa/suites/rbd/singleton-bluestore/openstack.yaml b/qa/suites/rbd/singleton-bluestore/openstack.yaml new file mode 100644 index 00000000..f4d1349b --- /dev/null +++ b/qa/suites/rbd/singleton-bluestore/openstack.yaml @@ -0,0 +1,4 @@ +openstack: + - volumes: # attached to each instance + count: 3 + size: 30 # GB diff --git a/qa/suites/rbd/singleton-bluestore/supported-random-distro$ b/qa/suites/rbd/singleton-bluestore/supported-random-distro$ new file mode 120000 index 00000000..0862b445 --- /dev/null +++ b/qa/suites/rbd/singleton-bluestore/supported-random-distro$ @@ -0,0 +1 @@ +.qa/distros/supported-random-distro$ \ No newline at end of file diff --git a/qa/suites/rbd/singleton/% b/qa/suites/rbd/singleton/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rbd/singleton/.qa b/qa/suites/rbd/singleton/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/singleton/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/singleton/all/.qa b/qa/suites/rbd/singleton/all/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/singleton/all/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/singleton/all/admin_socket.yaml b/qa/suites/rbd/singleton/all/admin_socket.yaml new file mode 100644 index 00000000..22dbd8c0 --- /dev/null +++ b/qa/suites/rbd/singleton/all/admin_socket.yaml @@ -0,0 +1,9 @@ +roles: +- [mon.a, mgr.x, osd.0, osd.1, client.0] +tasks: +- install: +- ceph: + fs: xfs +- workunit: + clients: + all: [rbd/test_admin_socket.sh] diff --git a/qa/suites/rbd/singleton/all/formatted-output.yaml b/qa/suites/rbd/singleton/all/formatted-output.yaml new file mode 100644 index 00000000..7be94ef2 --- /dev/null +++ b/qa/suites/rbd/singleton/all/formatted-output.yaml @@ -0,0 +1,10 @@ +roles: +- [mon.a, mgr.x, osd.0, osd.1, client.0] +tasks: +- install: +- ceph: + fs: xfs +- cram: + clients: + client.0: + - src/test/cli-integration/rbd/formatted-output.t diff --git a/qa/suites/rbd/singleton/all/merge_diff.yaml b/qa/suites/rbd/singleton/all/merge_diff.yaml new file mode 100644 index 00000000..31b269d6 --- /dev/null +++ b/qa/suites/rbd/singleton/all/merge_diff.yaml @@ -0,0 +1,9 @@ +roles: +- [mon.a, mgr.x, osd.0, osd.1, client.0] +tasks: +- install: +- ceph: + fs: xfs +- workunit: + clients: + all: [rbd/merge_diff.sh] diff --git a/qa/suites/rbd/singleton/all/permissions.yaml b/qa/suites/rbd/singleton/all/permissions.yaml new file mode 100644 index 00000000..c00a5c9b --- /dev/null +++ b/qa/suites/rbd/singleton/all/permissions.yaml @@ -0,0 +1,9 @@ +roles: +- [mon.a, mgr.x, osd.0, osd.1, client.0] +tasks: +- install: +- ceph: + fs: xfs +- workunit: + clients: + all: [rbd/permissions.sh] diff --git a/qa/suites/rbd/singleton/all/qemu-iotests-no-cache.yaml b/qa/suites/rbd/singleton/all/qemu-iotests-no-cache.yaml new file mode 100644 index 00000000..bfb20390 --- /dev/null +++ b/qa/suites/rbd/singleton/all/qemu-iotests-no-cache.yaml @@ -0,0 +1,13 @@ +exclude_arch: armv7l +roles: +- [mon.a, mgr.x, osd.0, osd.1, client.0] +tasks: +- install: +- ceph: + fs: xfs + conf: + client: + rbd cache: false +- workunit: + clients: + all: [rbd/qemu-iotests.sh] diff --git a/qa/suites/rbd/singleton/all/qemu-iotests-writeback.yaml b/qa/suites/rbd/singleton/all/qemu-iotests-writeback.yaml new file mode 100644 index 00000000..bf1b4bef --- /dev/null +++ b/qa/suites/rbd/singleton/all/qemu-iotests-writeback.yaml @@ -0,0 +1,13 @@ +exclude_arch: armv7l +roles: +- [mon.a, mgr.x, osd.0, osd.1, client.0] +tasks: +- install: +- ceph: + fs: xfs + conf: + client: + rbd cache: true +- workunit: + clients: + all: [rbd/qemu-iotests.sh] diff --git a/qa/suites/rbd/singleton/all/qemu-iotests-writethrough.yaml b/qa/suites/rbd/singleton/all/qemu-iotests-writethrough.yaml new file mode 100644 index 00000000..908a6780 --- /dev/null +++ b/qa/suites/rbd/singleton/all/qemu-iotests-writethrough.yaml @@ -0,0 +1,14 @@ +exclude_arch: armv7l +roles: +- [mon.a, mgr.x, osd.0, osd.1, client.0] +tasks: +- install: +- ceph: + fs: xfs + conf: + client: + rbd cache: true + rbd cache max dirty: 0 +- workunit: + clients: + all: [rbd/qemu-iotests.sh] diff --git a/qa/suites/rbd/singleton/all/rbd-vs-unmanaged-snaps.yaml b/qa/suites/rbd/singleton/all/rbd-vs-unmanaged-snaps.yaml new file mode 100644 index 00000000..f14bd743 --- /dev/null +++ b/qa/suites/rbd/singleton/all/rbd-vs-unmanaged-snaps.yaml @@ -0,0 +1,14 @@ +roles: +- [mon.a, mgr.x, osd.0, osd.1, client.0] +tasks: +- install: +- ceph: + fs: xfs + conf: + client: + rbd validate pool: false +- workunit: + clients: + all: + - mon/rbd_snaps_ops.sh + diff --git a/qa/suites/rbd/singleton/all/rbd_mirror.yaml b/qa/suites/rbd/singleton/all/rbd_mirror.yaml new file mode 100644 index 00000000..0800cbfc --- /dev/null +++ b/qa/suites/rbd/singleton/all/rbd_mirror.yaml @@ -0,0 +1,13 @@ +roles: +- [mon.a, mgr.x, osd.0, osd.1, client.0] +tasks: +- install: +- ceph: + fs: xfs + log-whitelist: + - overall HEALTH_ + - \(CACHE_POOL_NO_HIT_SET\) + - \(POOL_APP_NOT_ENABLED\) +- workunit: + clients: + all: [rbd/test_rbd_mirror.sh] diff --git a/qa/suites/rbd/singleton/all/rbd_tasks.yaml b/qa/suites/rbd/singleton/all/rbd_tasks.yaml new file mode 100644 index 00000000..b920cfc7 --- /dev/null +++ b/qa/suites/rbd/singleton/all/rbd_tasks.yaml @@ -0,0 +1,13 @@ +roles: +- [mon.a, mgr.x, osd.0, osd.1, client.0] +tasks: +- install: +- ceph: + fs: xfs + log-whitelist: + - overall HEALTH_ + - \(CACHE_POOL_NO_HIT_SET\) + - \(POOL_APP_NOT_ENABLED\) +- workunit: + clients: + all: [rbd/test_rbd_tasks.sh] diff --git a/qa/suites/rbd/singleton/all/rbdmap_RBDMAPFILE.yaml b/qa/suites/rbd/singleton/all/rbdmap_RBDMAPFILE.yaml new file mode 100644 index 00000000..0053e66b --- /dev/null +++ b/qa/suites/rbd/singleton/all/rbdmap_RBDMAPFILE.yaml @@ -0,0 +1,7 @@ +roles: +- [client.0] +tasks: +- install: +- workunit: + clients: + all: [rbd/test_rbdmap_RBDMAPFILE.sh] diff --git a/qa/suites/rbd/singleton/all/read-flags-no-cache.yaml b/qa/suites/rbd/singleton/all/read-flags-no-cache.yaml new file mode 100644 index 00000000..cf602cbb --- /dev/null +++ b/qa/suites/rbd/singleton/all/read-flags-no-cache.yaml @@ -0,0 +1,12 @@ +roles: +- [mon.a, mgr.x, osd.0, osd.1, client.0] +tasks: +- install: +- ceph: + fs: xfs + conf: + client: + rbd cache: false +- workunit: + clients: + all: [rbd/read-flags.sh] diff --git a/qa/suites/rbd/singleton/all/read-flags-writeback.yaml b/qa/suites/rbd/singleton/all/read-flags-writeback.yaml new file mode 100644 index 00000000..e763bcc3 --- /dev/null +++ b/qa/suites/rbd/singleton/all/read-flags-writeback.yaml @@ -0,0 +1,12 @@ +roles: +- [mon.a, mgr.x, osd.0, osd.1, client.0] +tasks: +- install: +- ceph: + fs: xfs + conf: + client: + rbd cache: true +- workunit: + clients: + all: [rbd/read-flags.sh] diff --git a/qa/suites/rbd/singleton/all/read-flags-writethrough.yaml b/qa/suites/rbd/singleton/all/read-flags-writethrough.yaml new file mode 100644 index 00000000..fc499d49 --- /dev/null +++ b/qa/suites/rbd/singleton/all/read-flags-writethrough.yaml @@ -0,0 +1,13 @@ +roles: +- [mon.a, mgr.x, osd.0, osd.1, client.0] +tasks: +- install: +- ceph: + fs: xfs + conf: + client: + rbd cache: true + rbd cache max dirty: 0 +- workunit: + clients: + all: [rbd/read-flags.sh] diff --git a/qa/suites/rbd/singleton/all/snap-diff.yaml b/qa/suites/rbd/singleton/all/snap-diff.yaml new file mode 100644 index 00000000..be7e6858 --- /dev/null +++ b/qa/suites/rbd/singleton/all/snap-diff.yaml @@ -0,0 +1,10 @@ +roles: +- [mon.a, mgr.x, osd.0, osd.1, client.0] +tasks: +- install: +- ceph: + fs: xfs +- cram: + clients: + client.0: + - src/test/cli-integration/rbd/snap-diff.t diff --git a/qa/suites/rbd/singleton/all/verify_pool.yaml b/qa/suites/rbd/singleton/all/verify_pool.yaml new file mode 100644 index 00000000..5ab06f74 --- /dev/null +++ b/qa/suites/rbd/singleton/all/verify_pool.yaml @@ -0,0 +1,9 @@ +roles: +- [mon.a, mgr.x, osd.0, osd.1, client.0] +tasks: +- install: +- ceph: + fs: xfs +- workunit: + clients: + all: [rbd/verify_pool.sh] diff --git a/qa/suites/rbd/singleton/objectstore b/qa/suites/rbd/singleton/objectstore new file mode 120000 index 00000000..c40bd326 --- /dev/null +++ b/qa/suites/rbd/singleton/objectstore @@ -0,0 +1 @@ +.qa/objectstore \ No newline at end of file diff --git a/qa/suites/rbd/singleton/openstack.yaml b/qa/suites/rbd/singleton/openstack.yaml new file mode 100644 index 00000000..21eca2bb --- /dev/null +++ b/qa/suites/rbd/singleton/openstack.yaml @@ -0,0 +1,4 @@ +openstack: + - volumes: # attached to each instance + count: 2 + size: 30 # GB diff --git a/qa/suites/rbd/singleton/supported-random-distro$ b/qa/suites/rbd/singleton/supported-random-distro$ new file mode 120000 index 00000000..0862b445 --- /dev/null +++ b/qa/suites/rbd/singleton/supported-random-distro$ @@ -0,0 +1 @@ +.qa/distros/supported-random-distro$ \ No newline at end of file diff --git a/qa/suites/rbd/thrash/% b/qa/suites/rbd/thrash/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rbd/thrash/.qa b/qa/suites/rbd/thrash/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/thrash/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/thrash/base/.qa b/qa/suites/rbd/thrash/base/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/thrash/base/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/thrash/base/install.yaml b/qa/suites/rbd/thrash/base/install.yaml new file mode 100644 index 00000000..2030acb9 --- /dev/null +++ b/qa/suites/rbd/thrash/base/install.yaml @@ -0,0 +1,3 @@ +tasks: +- install: +- ceph: diff --git a/qa/suites/rbd/thrash/clusters/+ b/qa/suites/rbd/thrash/clusters/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rbd/thrash/clusters/.qa b/qa/suites/rbd/thrash/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/thrash/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/thrash/clusters/fixed-2.yaml b/qa/suites/rbd/thrash/clusters/fixed-2.yaml new file mode 120000 index 00000000..230ff0fd --- /dev/null +++ b/qa/suites/rbd/thrash/clusters/fixed-2.yaml @@ -0,0 +1 @@ +.qa/clusters/fixed-2.yaml \ No newline at end of file diff --git a/qa/suites/rbd/thrash/clusters/openstack.yaml b/qa/suites/rbd/thrash/clusters/openstack.yaml new file mode 100644 index 00000000..40fef477 --- /dev/null +++ b/qa/suites/rbd/thrash/clusters/openstack.yaml @@ -0,0 +1,8 @@ +openstack: + - machine: + disk: 40 # GB + ram: 8000 # MB + cpus: 1 + volumes: # attached to each instance + count: 4 + size: 30 # GB diff --git a/qa/suites/rbd/thrash/msgr-failures/.qa b/qa/suites/rbd/thrash/msgr-failures/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/thrash/msgr-failures/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/thrash/msgr-failures/few.yaml b/qa/suites/rbd/thrash/msgr-failures/few.yaml new file mode 100644 index 00000000..4326fe23 --- /dev/null +++ b/qa/suites/rbd/thrash/msgr-failures/few.yaml @@ -0,0 +1,7 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 5000 + log-whitelist: + - \(OSD_SLOW_PING_TIME diff --git a/qa/suites/rbd/thrash/objectstore b/qa/suites/rbd/thrash/objectstore new file mode 120000 index 00000000..c40bd326 --- /dev/null +++ b/qa/suites/rbd/thrash/objectstore @@ -0,0 +1 @@ +.qa/objectstore \ No newline at end of file diff --git a/qa/suites/rbd/thrash/supported-random-distro$ b/qa/suites/rbd/thrash/supported-random-distro$ new file mode 120000 index 00000000..0862b445 --- /dev/null +++ b/qa/suites/rbd/thrash/supported-random-distro$ @@ -0,0 +1 @@ +.qa/distros/supported-random-distro$ \ No newline at end of file diff --git a/qa/suites/rbd/thrash/thrashers/.qa b/qa/suites/rbd/thrash/thrashers/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/thrash/thrashers/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/thrash/thrashers/cache.yaml b/qa/suites/rbd/thrash/thrashers/cache.yaml new file mode 100644 index 00000000..24956484 --- /dev/null +++ b/qa/suites/rbd/thrash/thrashers/cache.yaml @@ -0,0 +1,21 @@ +overrides: + ceph: + log-whitelist: + - but it is still running + - objects unfound and apparently lost + - overall HEALTH_ + - \(CACHE_POOL_NEAR_FULL\) + - \(CACHE_POOL_NO_HIT_SET\) +tasks: +- exec: + client.0: + - sudo ceph osd pool create cache 4 + - sudo ceph osd tier add rbd cache + - sudo ceph osd tier cache-mode cache writeback + - sudo ceph osd tier set-overlay rbd cache + - sudo ceph osd pool set cache hit_set_type bloom + - sudo ceph osd pool set cache hit_set_count 8 + - sudo ceph osd pool set cache hit_set_period 60 + - sudo ceph osd pool set cache target_max_objects 250 +- thrashosds: + timeout: 1200 diff --git a/qa/suites/rbd/thrash/thrashers/default.yaml b/qa/suites/rbd/thrash/thrashers/default.yaml new file mode 100644 index 00000000..3f1615c8 --- /dev/null +++ b/qa/suites/rbd/thrash/thrashers/default.yaml @@ -0,0 +1,8 @@ +overrides: + ceph: + log-whitelist: + - but it is still running + - objects unfound and apparently lost +tasks: +- thrashosds: + timeout: 1200 diff --git a/qa/suites/rbd/thrash/thrashosds-health.yaml b/qa/suites/rbd/thrash/thrashosds-health.yaml new file mode 120000 index 00000000..9124eb1a --- /dev/null +++ b/qa/suites/rbd/thrash/thrashosds-health.yaml @@ -0,0 +1 @@ +.qa/tasks/thrashosds-health.yaml \ No newline at end of file diff --git a/qa/suites/rbd/thrash/workloads/.qa b/qa/suites/rbd/thrash/workloads/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/thrash/workloads/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/thrash/workloads/journal.yaml b/qa/suites/rbd/thrash/workloads/journal.yaml new file mode 100644 index 00000000..4dae1063 --- /dev/null +++ b/qa/suites/rbd/thrash/workloads/journal.yaml @@ -0,0 +1,5 @@ +tasks: +- workunit: + clients: + client.0: + - rbd/journal.sh diff --git a/qa/suites/rbd/thrash/workloads/rbd_api_tests.yaml b/qa/suites/rbd/thrash/workloads/rbd_api_tests.yaml new file mode 100644 index 00000000..6ae7f462 --- /dev/null +++ b/qa/suites/rbd/thrash/workloads/rbd_api_tests.yaml @@ -0,0 +1,13 @@ +overrides: + ceph: + log-whitelist: + - overall HEALTH_ + - \(CACHE_POOL_NO_HIT_SET\) + - \(POOL_APP_NOT_ENABLED\) +tasks: +- workunit: + clients: + client.0: + - rbd/test_librbd.sh + env: + RBD_FEATURES: "61" diff --git a/qa/suites/rbd/thrash/workloads/rbd_api_tests_copy_on_read.yaml b/qa/suites/rbd/thrash/workloads/rbd_api_tests_copy_on_read.yaml new file mode 100644 index 00000000..a9021548 --- /dev/null +++ b/qa/suites/rbd/thrash/workloads/rbd_api_tests_copy_on_read.yaml @@ -0,0 +1,16 @@ +tasks: +- workunit: + clients: + client.0: + - rbd/test_librbd.sh + env: + RBD_FEATURES: "61" +overrides: + ceph: + log-whitelist: + - overall HEALTH_ + - \(CACHE_POOL_NO_HIT_SET\) + - \(POOL_APP_NOT_ENABLED\) + conf: + client: + rbd clone copy on read: true diff --git a/qa/suites/rbd/thrash/workloads/rbd_api_tests_journaling.yaml b/qa/suites/rbd/thrash/workloads/rbd_api_tests_journaling.yaml new file mode 100644 index 00000000..578115ee --- /dev/null +++ b/qa/suites/rbd/thrash/workloads/rbd_api_tests_journaling.yaml @@ -0,0 +1,13 @@ +overrides: + ceph: + log-whitelist: + - overall HEALTH_ + - \(CACHE_POOL_NO_HIT_SET\) + - \(POOL_APP_NOT_ENABLED\) +tasks: +- workunit: + clients: + client.0: + - rbd/test_librbd.sh + env: + RBD_FEATURES: "125" diff --git a/qa/suites/rbd/thrash/workloads/rbd_api_tests_no_locking.yaml b/qa/suites/rbd/thrash/workloads/rbd_api_tests_no_locking.yaml new file mode 100644 index 00000000..04af9c85 --- /dev/null +++ b/qa/suites/rbd/thrash/workloads/rbd_api_tests_no_locking.yaml @@ -0,0 +1,13 @@ +overrides: + ceph: + log-whitelist: + - overall HEALTH_ + - \(CACHE_POOL_NO_HIT_SET\) + - \(POOL_APP_NOT_ENABLED\) +tasks: +- workunit: + clients: + client.0: + - rbd/test_librbd.sh + env: + RBD_FEATURES: "1" diff --git a/qa/suites/rbd/thrash/workloads/rbd_fsx_cache_writeback.yaml b/qa/suites/rbd/thrash/workloads/rbd_fsx_cache_writeback.yaml new file mode 100644 index 00000000..98e0b392 --- /dev/null +++ b/qa/suites/rbd/thrash/workloads/rbd_fsx_cache_writeback.yaml @@ -0,0 +1,9 @@ +tasks: +- rbd_fsx: + clients: [client.0] + ops: 6000 +overrides: + ceph: + conf: + client: + rbd cache: true diff --git a/qa/suites/rbd/thrash/workloads/rbd_fsx_cache_writethrough.yaml b/qa/suites/rbd/thrash/workloads/rbd_fsx_cache_writethrough.yaml new file mode 100644 index 00000000..463ba996 --- /dev/null +++ b/qa/suites/rbd/thrash/workloads/rbd_fsx_cache_writethrough.yaml @@ -0,0 +1,10 @@ +tasks: +- rbd_fsx: + clients: [client.0] + ops: 6000 +overrides: + ceph: + conf: + client: + rbd cache: true + rbd cache max dirty: 0 diff --git a/qa/suites/rbd/thrash/workloads/rbd_fsx_copy_on_read.yaml b/qa/suites/rbd/thrash/workloads/rbd_fsx_copy_on_read.yaml new file mode 100644 index 00000000..0c284ca8 --- /dev/null +++ b/qa/suites/rbd/thrash/workloads/rbd_fsx_copy_on_read.yaml @@ -0,0 +1,10 @@ +tasks: +- rbd_fsx: + clients: [client.0] + ops: 6000 +overrides: + ceph: + conf: + client: + rbd cache: true + rbd clone copy on read: true diff --git a/qa/suites/rbd/thrash/workloads/rbd_fsx_deep_copy.yaml b/qa/suites/rbd/thrash/workloads/rbd_fsx_deep_copy.yaml new file mode 100644 index 00000000..79749149 --- /dev/null +++ b/qa/suites/rbd/thrash/workloads/rbd_fsx_deep_copy.yaml @@ -0,0 +1,5 @@ +tasks: +- rbd_fsx: + clients: [client.0] + ops: 6000 + deep_copy: True diff --git a/qa/suites/rbd/thrash/workloads/rbd_fsx_journal.yaml b/qa/suites/rbd/thrash/workloads/rbd_fsx_journal.yaml new file mode 100644 index 00000000..13e9a783 --- /dev/null +++ b/qa/suites/rbd/thrash/workloads/rbd_fsx_journal.yaml @@ -0,0 +1,5 @@ +tasks: +- rbd_fsx: + clients: [client.0] + ops: 6000 + journal_replay: True diff --git a/qa/suites/rbd/thrash/workloads/rbd_fsx_nocache.yaml b/qa/suites/rbd/thrash/workloads/rbd_fsx_nocache.yaml new file mode 100644 index 00000000..968665e1 --- /dev/null +++ b/qa/suites/rbd/thrash/workloads/rbd_fsx_nocache.yaml @@ -0,0 +1,9 @@ +tasks: +- rbd_fsx: + clients: [client.0] + ops: 6000 +overrides: + ceph: + conf: + client: + rbd cache: false diff --git a/qa/suites/rbd/thrash/workloads/rbd_fsx_rate_limit.yaml b/qa/suites/rbd/thrash/workloads/rbd_fsx_rate_limit.yaml new file mode 100644 index 00000000..611320bc --- /dev/null +++ b/qa/suites/rbd/thrash/workloads/rbd_fsx_rate_limit.yaml @@ -0,0 +1,11 @@ +tasks: +- rbd_fsx: + clients: [client.0] + ops: 6000 +overrides: + ceph: + conf: + client: + rbd qos iops limit: 50 + rbd qos iops burst: 100 + rbd qos schedule tick min: 100 diff --git a/qa/suites/rbd/valgrind/% b/qa/suites/rbd/valgrind/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rbd/valgrind/.qa b/qa/suites/rbd/valgrind/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/valgrind/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/valgrind/base/.qa b/qa/suites/rbd/valgrind/base/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/valgrind/base/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/valgrind/base/install.yaml b/qa/suites/rbd/valgrind/base/install.yaml new file mode 100644 index 00000000..2030acb9 --- /dev/null +++ b/qa/suites/rbd/valgrind/base/install.yaml @@ -0,0 +1,3 @@ +tasks: +- install: +- ceph: diff --git a/qa/suites/rbd/valgrind/centos_latest.yaml b/qa/suites/rbd/valgrind/centos_latest.yaml new file mode 120000 index 00000000..bd9854e7 --- /dev/null +++ b/qa/suites/rbd/valgrind/centos_latest.yaml @@ -0,0 +1 @@ +.qa/distros/supported/centos_latest.yaml \ No newline at end of file diff --git a/qa/suites/rbd/valgrind/clusters b/qa/suites/rbd/valgrind/clusters new file mode 120000 index 00000000..ae92569e --- /dev/null +++ b/qa/suites/rbd/valgrind/clusters @@ -0,0 +1 @@ +../basic/clusters \ No newline at end of file diff --git a/qa/suites/rbd/valgrind/objectstore b/qa/suites/rbd/valgrind/objectstore new file mode 120000 index 00000000..c40bd326 --- /dev/null +++ b/qa/suites/rbd/valgrind/objectstore @@ -0,0 +1 @@ +.qa/objectstore \ No newline at end of file diff --git a/qa/suites/rbd/valgrind/validator/.qa b/qa/suites/rbd/valgrind/validator/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/valgrind/validator/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/valgrind/validator/memcheck.yaml b/qa/suites/rbd/valgrind/validator/memcheck.yaml new file mode 100644 index 00000000..fcea1b88 --- /dev/null +++ b/qa/suites/rbd/valgrind/validator/memcheck.yaml @@ -0,0 +1,12 @@ +# see http://tracker.ceph.com/issues/20360 and http://tracker.ceph.com/issues/18126 +os_type: centos + +overrides: + install: + ceph: + debuginfo: true + rbd_fsx: + valgrind: ["--tool=memcheck"] + workunit: + env: + VALGRIND: "--tool=memcheck --leak-check=full" diff --git a/qa/suites/rbd/valgrind/workloads/.qa b/qa/suites/rbd/valgrind/workloads/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rbd/valgrind/workloads/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rbd/valgrind/workloads/c_api_tests.yaml b/qa/suites/rbd/valgrind/workloads/c_api_tests.yaml new file mode 100644 index 00000000..04af9c85 --- /dev/null +++ b/qa/suites/rbd/valgrind/workloads/c_api_tests.yaml @@ -0,0 +1,13 @@ +overrides: + ceph: + log-whitelist: + - overall HEALTH_ + - \(CACHE_POOL_NO_HIT_SET\) + - \(POOL_APP_NOT_ENABLED\) +tasks: +- workunit: + clients: + client.0: + - rbd/test_librbd.sh + env: + RBD_FEATURES: "1" diff --git a/qa/suites/rbd/valgrind/workloads/c_api_tests_with_defaults.yaml b/qa/suites/rbd/valgrind/workloads/c_api_tests_with_defaults.yaml new file mode 100644 index 00000000..6ae7f462 --- /dev/null +++ b/qa/suites/rbd/valgrind/workloads/c_api_tests_with_defaults.yaml @@ -0,0 +1,13 @@ +overrides: + ceph: + log-whitelist: + - overall HEALTH_ + - \(CACHE_POOL_NO_HIT_SET\) + - \(POOL_APP_NOT_ENABLED\) +tasks: +- workunit: + clients: + client.0: + - rbd/test_librbd.sh + env: + RBD_FEATURES: "61" diff --git a/qa/suites/rbd/valgrind/workloads/c_api_tests_with_journaling.yaml b/qa/suites/rbd/valgrind/workloads/c_api_tests_with_journaling.yaml new file mode 100644 index 00000000..578115ee --- /dev/null +++ b/qa/suites/rbd/valgrind/workloads/c_api_tests_with_journaling.yaml @@ -0,0 +1,13 @@ +overrides: + ceph: + log-whitelist: + - overall HEALTH_ + - \(CACHE_POOL_NO_HIT_SET\) + - \(POOL_APP_NOT_ENABLED\) +tasks: +- workunit: + clients: + client.0: + - rbd/test_librbd.sh + env: + RBD_FEATURES: "125" diff --git a/qa/suites/rbd/valgrind/workloads/fsx.yaml b/qa/suites/rbd/valgrind/workloads/fsx.yaml new file mode 100644 index 00000000..5c745a2c --- /dev/null +++ b/qa/suites/rbd/valgrind/workloads/fsx.yaml @@ -0,0 +1,4 @@ +tasks: +- rbd_fsx: + clients: [client.0] + size: 134217728 diff --git a/qa/suites/rbd/valgrind/workloads/python_api_tests.yaml b/qa/suites/rbd/valgrind/workloads/python_api_tests.yaml new file mode 100644 index 00000000..a7b3ce7d --- /dev/null +++ b/qa/suites/rbd/valgrind/workloads/python_api_tests.yaml @@ -0,0 +1,7 @@ +tasks: +- workunit: + clients: + client.0: + - rbd/test_librbd_python.sh + env: + RBD_FEATURES: "1" diff --git a/qa/suites/rbd/valgrind/workloads/python_api_tests_with_defaults.yaml b/qa/suites/rbd/valgrind/workloads/python_api_tests_with_defaults.yaml new file mode 100644 index 00000000..40b2312f --- /dev/null +++ b/qa/suites/rbd/valgrind/workloads/python_api_tests_with_defaults.yaml @@ -0,0 +1,7 @@ +tasks: +- workunit: + clients: + client.0: + - rbd/test_librbd_python.sh + env: + RBD_FEATURES: "61" diff --git a/qa/suites/rbd/valgrind/workloads/python_api_tests_with_journaling.yaml b/qa/suites/rbd/valgrind/workloads/python_api_tests_with_journaling.yaml new file mode 100644 index 00000000..d0e905ff --- /dev/null +++ b/qa/suites/rbd/valgrind/workloads/python_api_tests_with_journaling.yaml @@ -0,0 +1,7 @@ +tasks: +- workunit: + clients: + client.0: + - rbd/test_librbd_python.sh + env: + RBD_FEATURES: "125" diff --git a/qa/suites/rbd/valgrind/workloads/rbd_mirror.yaml b/qa/suites/rbd/valgrind/workloads/rbd_mirror.yaml new file mode 100644 index 00000000..e0943439 --- /dev/null +++ b/qa/suites/rbd/valgrind/workloads/rbd_mirror.yaml @@ -0,0 +1,11 @@ +overrides: + ceph: + log-whitelist: + - overall HEALTH_ + - \(CACHE_POOL_NO_HIT_SET\) + - \(POOL_APP_NOT_ENABLED\) +tasks: +- workunit: + clients: + client.0: + - rbd/test_rbd_mirror.sh diff --git a/qa/suites/rgw/.qa b/qa/suites/rgw/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rgw/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rgw/hadoop-s3a/% b/qa/suites/rgw/hadoop-s3a/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rgw/hadoop-s3a/.qa b/qa/suites/rgw/hadoop-s3a/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rgw/hadoop-s3a/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rgw/hadoop-s3a/clusters/.qa b/qa/suites/rgw/hadoop-s3a/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rgw/hadoop-s3a/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rgw/hadoop-s3a/clusters/fixed-2.yaml b/qa/suites/rgw/hadoop-s3a/clusters/fixed-2.yaml new file mode 120000 index 00000000..230ff0fd --- /dev/null +++ b/qa/suites/rgw/hadoop-s3a/clusters/fixed-2.yaml @@ -0,0 +1 @@ +.qa/clusters/fixed-2.yaml \ No newline at end of file diff --git a/qa/suites/rgw/hadoop-s3a/hadoop/.qa b/qa/suites/rgw/hadoop-s3a/hadoop/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rgw/hadoop-s3a/hadoop/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rgw/hadoop-s3a/hadoop/default.yaml b/qa/suites/rgw/hadoop-s3a/hadoop/default.yaml new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/qa/suites/rgw/hadoop-s3a/hadoop/default.yaml @@ -0,0 +1 @@ + diff --git a/qa/suites/rgw/hadoop-s3a/hadoop/v32.yaml b/qa/suites/rgw/hadoop-s3a/hadoop/v32.yaml new file mode 100644 index 00000000..d017b756 --- /dev/null +++ b/qa/suites/rgw/hadoop-s3a/hadoop/v32.yaml @@ -0,0 +1,3 @@ +overrides: + s3a-hadoop: + hadoop-version: '3.2.0' diff --git a/qa/suites/rgw/hadoop-s3a/s3a-hadoop.yaml b/qa/suites/rgw/hadoop-s3a/s3a-hadoop.yaml new file mode 100644 index 00000000..ed077a89 --- /dev/null +++ b/qa/suites/rgw/hadoop-s3a/s3a-hadoop.yaml @@ -0,0 +1,11 @@ +tasks: +- install: +- ceph: +- ssh-keys: +- dnsmasq: + client.0: [s3.] +- rgw: + client.0: + dns-name: s3. +- s3a-hadoop: + role: client.0 diff --git a/qa/suites/rgw/hadoop-s3a/supported-random-distro$ b/qa/suites/rgw/hadoop-s3a/supported-random-distro$ new file mode 120000 index 00000000..0862b445 --- /dev/null +++ b/qa/suites/rgw/hadoop-s3a/supported-random-distro$ @@ -0,0 +1 @@ +.qa/distros/supported-random-distro$ \ No newline at end of file diff --git a/qa/suites/rgw/multifs/% b/qa/suites/rgw/multifs/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rgw/multifs/.qa b/qa/suites/rgw/multifs/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rgw/multifs/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rgw/multifs/clusters/.qa b/qa/suites/rgw/multifs/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rgw/multifs/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rgw/multifs/clusters/fixed-2.yaml b/qa/suites/rgw/multifs/clusters/fixed-2.yaml new file mode 120000 index 00000000..230ff0fd --- /dev/null +++ b/qa/suites/rgw/multifs/clusters/fixed-2.yaml @@ -0,0 +1 @@ +.qa/clusters/fixed-2.yaml \ No newline at end of file diff --git a/qa/suites/rgw/multifs/frontend/.qa b/qa/suites/rgw/multifs/frontend/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rgw/multifs/frontend/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rgw/multifs/frontend/civetweb.yaml b/qa/suites/rgw/multifs/frontend/civetweb.yaml new file mode 120000 index 00000000..f9115ff4 --- /dev/null +++ b/qa/suites/rgw/multifs/frontend/civetweb.yaml @@ -0,0 +1 @@ +.qa/rgw_frontend/civetweb.yaml \ No newline at end of file diff --git a/qa/suites/rgw/multifs/objectstore b/qa/suites/rgw/multifs/objectstore new file mode 120000 index 00000000..c40bd326 --- /dev/null +++ b/qa/suites/rgw/multifs/objectstore @@ -0,0 +1 @@ +.qa/objectstore \ No newline at end of file diff --git a/qa/suites/rgw/multifs/overrides.yaml b/qa/suites/rgw/multifs/overrides.yaml new file mode 100644 index 00000000..339784d3 --- /dev/null +++ b/qa/suites/rgw/multifs/overrides.yaml @@ -0,0 +1,10 @@ +overrides: + ceph: + wait-for-scrub: false + conf: + client: + debug rgw: 20 + rgw crypt s3 kms encryption keys: testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo= + rgw crypt require ssl: false + rgw: + storage classes: LUKEWARM, FROZEN diff --git a/qa/suites/rgw/multifs/rgw_pool_type b/qa/suites/rgw/multifs/rgw_pool_type new file mode 120000 index 00000000..3bbd28e9 --- /dev/null +++ b/qa/suites/rgw/multifs/rgw_pool_type @@ -0,0 +1 @@ +.qa/rgw_pool_type \ No newline at end of file diff --git a/qa/suites/rgw/multifs/tasks/.qa b/qa/suites/rgw/multifs/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rgw/multifs/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rgw/multifs/tasks/rgw_bucket_quota.yaml b/qa/suites/rgw/multifs/tasks/rgw_bucket_quota.yaml new file mode 100644 index 00000000..c518d0e1 --- /dev/null +++ b/qa/suites/rgw/multifs/tasks/rgw_bucket_quota.yaml @@ -0,0 +1,10 @@ +# Amazon/S3.pm (cpan) not available as an rpm +os_type: ubuntu +tasks: +- install: +- ceph: +- rgw: [client.0] +- workunit: + clients: + client.0: + - rgw/s3_bucket_quota.pl diff --git a/qa/suites/rgw/multifs/tasks/rgw_multipart_upload.yaml b/qa/suites/rgw/multifs/tasks/rgw_multipart_upload.yaml new file mode 100644 index 00000000..b042aa80 --- /dev/null +++ b/qa/suites/rgw/multifs/tasks/rgw_multipart_upload.yaml @@ -0,0 +1,10 @@ +# Amazon::S3 is not available on el7 +os_type: ubuntu +tasks: +- install: +- ceph: +- rgw: [client.0] +- workunit: + clients: + client.0: + - rgw/s3_multipart_upload.pl diff --git a/qa/suites/rgw/multifs/tasks/rgw_ragweed.yaml b/qa/suites/rgw/multifs/tasks/rgw_ragweed.yaml new file mode 100644 index 00000000..32b0d5f2 --- /dev/null +++ b/qa/suites/rgw/multifs/tasks/rgw_ragweed.yaml @@ -0,0 +1,19 @@ +tasks: +- install: +- ceph: +- rgw: [client.0] +- ragweed: + client.0: + default-branch: ceph-nautilus + rgw_server: client.0 + stages: prepare +- ragweed: + client.0: + default-branch: ceph-nautilus + rgw_server: client.0 + stages: check +overrides: + ceph: + conf: + client: + rgw lc debug interval: 10 diff --git a/qa/suites/rgw/multifs/tasks/rgw_readwrite.yaml b/qa/suites/rgw/multifs/tasks/rgw_readwrite.yaml new file mode 100644 index 00000000..40a91599 --- /dev/null +++ b/qa/suites/rgw/multifs/tasks/rgw_readwrite.yaml @@ -0,0 +1,17 @@ +tasks: +- install: +- ceph: +- rgw: [client.0] +- s3readwrite: + client.0: + force-branch: ceph-nautilus + rgw_server: client.0 + readwrite: + bucket: rwtest + readers: 10 + writers: 3 + duration: 300 + files: + num: 10 + size: 2000 + stddev: 500 diff --git a/qa/suites/rgw/multifs/tasks/rgw_roundtrip.yaml b/qa/suites/rgw/multifs/tasks/rgw_roundtrip.yaml new file mode 100644 index 00000000..8f888313 --- /dev/null +++ b/qa/suites/rgw/multifs/tasks/rgw_roundtrip.yaml @@ -0,0 +1,17 @@ +tasks: +- install: +- ceph: +- rgw: [client.0] +- s3roundtrip: + client.0: + force-branch: ceph-nautilus + rgw_server: client.0 + roundtrip: + bucket: rttest + readers: 10 + writers: 3 + duration: 300 + files: + num: 10 + size: 2000 + stddev: 500 diff --git a/qa/suites/rgw/multifs/tasks/rgw_s3tests.yaml b/qa/suites/rgw/multifs/tasks/rgw_s3tests.yaml new file mode 100644 index 00000000..72bae1ac --- /dev/null +++ b/qa/suites/rgw/multifs/tasks/rgw_s3tests.yaml @@ -0,0 +1,13 @@ +tasks: +- install: +- ceph: +- rgw: [client.0] +- s3tests: + client.0: + force-branch: ceph-nautilus + rgw_server: client.0 +overrides: + ceph: + conf: + client: + rgw lc debug interval: 10 diff --git a/qa/suites/rgw/multifs/tasks/rgw_swift.yaml b/qa/suites/rgw/multifs/tasks/rgw_swift.yaml new file mode 100644 index 00000000..e959e0ac --- /dev/null +++ b/qa/suites/rgw/multifs/tasks/rgw_swift.yaml @@ -0,0 +1,8 @@ +tasks: +- install: +- ceph: +- rgw: [client.0] +- swift: + client.0: + force-branch: ceph-nautilus + rgw_server: client.0 diff --git a/qa/suites/rgw/multifs/tasks/rgw_user_quota.yaml b/qa/suites/rgw/multifs/tasks/rgw_user_quota.yaml new file mode 100644 index 00000000..ef9d6df1 --- /dev/null +++ b/qa/suites/rgw/multifs/tasks/rgw_user_quota.yaml @@ -0,0 +1,10 @@ +# Amazon/S3.pm (cpan) not available as an rpm +os_type: ubuntu +tasks: +- install: +- ceph: +- rgw: [client.0] +- workunit: + clients: + client.0: + - rgw/s3_user_quota.pl diff --git a/qa/suites/rgw/multisite/% b/qa/suites/rgw/multisite/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rgw/multisite/.qa b/qa/suites/rgw/multisite/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rgw/multisite/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rgw/multisite/clusters.yaml b/qa/suites/rgw/multisite/clusters.yaml new file mode 100644 index 00000000..536ef7ca --- /dev/null +++ b/qa/suites/rgw/multisite/clusters.yaml @@ -0,0 +1,3 @@ +roles: +- [c1.mon.a, c1.mgr.x, c1.osd.0, c1.osd.1, c1.osd.2, c1.client.0, c1.client.1] +- [c2.mon.a, c2.mgr.x, c2.osd.0, c2.osd.1, c2.osd.2, c2.client.0, c2.client.1] diff --git a/qa/suites/rgw/multisite/frontend b/qa/suites/rgw/multisite/frontend new file mode 120000 index 00000000..926a53e8 --- /dev/null +++ b/qa/suites/rgw/multisite/frontend @@ -0,0 +1 @@ +.qa/rgw_frontend \ No newline at end of file diff --git a/qa/suites/rgw/multisite/omap_limits.yaml b/qa/suites/rgw/multisite/omap_limits.yaml new file mode 100644 index 00000000..cd02c212 --- /dev/null +++ b/qa/suites/rgw/multisite/omap_limits.yaml @@ -0,0 +1,10 @@ +overrides: + ceph: + conf: + # instead of expanding the matrix, run each osd with a different omap limit + osd.0: + osd_max_omap_entries_per_request: 10 + osd.1: + osd_max_omap_entries_per_request: 1000 + osd.2: + osd_max_omap_entries_per_request: 10000 diff --git a/qa/suites/rgw/multisite/overrides.yaml b/qa/suites/rgw/multisite/overrides.yaml new file mode 100644 index 00000000..a04bae9a --- /dev/null +++ b/qa/suites/rgw/multisite/overrides.yaml @@ -0,0 +1,15 @@ +overrides: + ceph: + wait-for-scrub: false + conf: + client: + debug rgw: 20 + rgw crypt s3 kms encryption keys: testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= + rgw crypt require ssl: false + rgw sync log trim interval: 0 + rgw curl low speed time: 300 + rgw md log max shards: 4 + rgw data log num shards: 4 + rgw sync obj etag verify: true + rgw: + compression type: random diff --git a/qa/suites/rgw/multisite/realms/.qa b/qa/suites/rgw/multisite/realms/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rgw/multisite/realms/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rgw/multisite/realms/three-zone-plus-pubsub.yaml b/qa/suites/rgw/multisite/realms/three-zone-plus-pubsub.yaml new file mode 100644 index 00000000..e77e5ade --- /dev/null +++ b/qa/suites/rgw/multisite/realms/three-zone-plus-pubsub.yaml @@ -0,0 +1,23 @@ +overrides: + rgw-multisite: + realm: + name: test-realm + is default: true + zonegroups: + - name: test-zonegroup + is_master: true + is_default: true + endpoints: [c1.client.0] + zones: + - name: test-zone1 + is_master: true + is_default: true + endpoints: [c1.client.0] + - name: test-zone2 + is_default: true + endpoints: [c2.client.0] + - name: test-zone3 + endpoints: [c1.client.1] + - name: test-zone4 + endpoints: [c2.client.1] + is_pubsub: true diff --git a/qa/suites/rgw/multisite/realms/three-zone.yaml b/qa/suites/rgw/multisite/realms/three-zone.yaml new file mode 100644 index 00000000..a8a7ca1d --- /dev/null +++ b/qa/suites/rgw/multisite/realms/three-zone.yaml @@ -0,0 +1,20 @@ +overrides: + rgw-multisite: + realm: + name: test-realm + is default: true + zonegroups: + - name: test-zonegroup + is_master: true + is_default: true + endpoints: [c1.client.0] + zones: + - name: test-zone1 + is_master: true + is_default: true + endpoints: [c1.client.0] + - name: test-zone2 + is_default: true + endpoints: [c2.client.0] + - name: test-zone3 + endpoints: [c1.client.1] diff --git a/qa/suites/rgw/multisite/realms/two-zonegroup.yaml b/qa/suites/rgw/multisite/realms/two-zonegroup.yaml new file mode 100644 index 00000000..dc5a786c --- /dev/null +++ b/qa/suites/rgw/multisite/realms/two-zonegroup.yaml @@ -0,0 +1,27 @@ +overrides: + rgw-multisite: + realm: + name: test-realm + is default: true + zonegroups: + - name: a + is_master: true + is_default: true + endpoints: [c1.client.0] + zones: + - name: a1 + is_master: true + is_default: true + endpoints: [c1.client.0] + - name: a2 + endpoints: [c1.client.1] + - name: b + is_default: true + endpoints: [c2.client.0] + zones: + - name: b1 + is_master: true + is_default: true + endpoints: [c2.client.0] + - name: b2 + endpoints: [c2.client.1] diff --git a/qa/suites/rgw/multisite/tasks/.qa b/qa/suites/rgw/multisite/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rgw/multisite/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rgw/multisite/tasks/test_multi.yaml b/qa/suites/rgw/multisite/tasks/test_multi.yaml new file mode 100644 index 00000000..3247f241 --- /dev/null +++ b/qa/suites/rgw/multisite/tasks/test_multi.yaml @@ -0,0 +1,26 @@ +# see http://tracker.ceph.com/issues/20360 and http://tracker.ceph.com/issues/18126 +#os_type: centos +# ubuntu and no valgrind until we migrate test to py3 +os_type: ubuntu + +tasks: +- install: +- ceph: {cluster: c1} +- ceph: {cluster: c2} +- rgw: + c1.client.0: + port: 8000 +# valgrind: [--tool=memcheck, --max-threads=1024] # http://tracker.ceph.com/issues/25214 + c1.client.1: + port: 8001 +# valgrind: [--tool=memcheck, --max-threads=1024] + c2.client.0: + port: 8000 +# valgrind: [--tool=memcheck, --max-threads=1024] + c2.client.1: + port: 8001 +# valgrind: [--tool=memcheck, --max-threads=1024] +- rgw-multisite: +- rgw-multisite-tests: + config: + reconfigure_delay: 60 diff --git a/qa/suites/rgw/multisite/valgrind.yaml b/qa/suites/rgw/multisite/valgrind.yaml new file mode 100644 index 00000000..d18d0fc9 --- /dev/null +++ b/qa/suites/rgw/multisite/valgrind.yaml @@ -0,0 +1,19 @@ +# see http://tracker.ceph.com/issues/20360 and http://tracker.ceph.com/issues/18126 +#os_type: centos +# ubuntu and no valgrind until we migrate test to py3 +os_type: ubuntu + +overrides: + install: + ceph: + conf: + global: + osd heartbeat grace: 40 + mon: + mon osd crush smoke test: false + osd: + osd fast shutdown: false +# valgrind: +# mon: [--tool=memcheck, --leak-check=full, --show-reachable=yes] +# osd: [--tool=memcheck] +# mds: [--tool=memcheck] diff --git a/qa/suites/rgw/singleton/% b/qa/suites/rgw/singleton/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rgw/singleton/.qa b/qa/suites/rgw/singleton/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rgw/singleton/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rgw/singleton/all/.qa b/qa/suites/rgw/singleton/all/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rgw/singleton/all/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rgw/singleton/all/radosgw-admin.yaml b/qa/suites/rgw/singleton/all/radosgw-admin.yaml new file mode 100644 index 00000000..010a0647 --- /dev/null +++ b/qa/suites/rgw/singleton/all/radosgw-admin.yaml @@ -0,0 +1,21 @@ +roles: +- [mon.a, osd.0] +- [mgr.x, client.0, osd.1, osd.2, osd.3] +openstack: +- volumes: # attached to each instance + count: 3 + size: 10 # GB +tasks: +- install: +- ceph: + conf: + client: + debug ms: 1 + rgw gc obj min wait: 15 + osd: + debug ms: 1 + debug objclass : 20 +- rgw: + client.0: +- radosgw-admin: +- radosgw-admin-rest: diff --git a/qa/suites/rgw/singleton/frontend/.qa b/qa/suites/rgw/singleton/frontend/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rgw/singleton/frontend/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rgw/singleton/frontend/civetweb.yaml b/qa/suites/rgw/singleton/frontend/civetweb.yaml new file mode 120000 index 00000000..f9115ff4 --- /dev/null +++ b/qa/suites/rgw/singleton/frontend/civetweb.yaml @@ -0,0 +1 @@ +.qa/rgw_frontend/civetweb.yaml \ No newline at end of file diff --git a/qa/suites/rgw/singleton/objectstore b/qa/suites/rgw/singleton/objectstore new file mode 120000 index 00000000..c40bd326 --- /dev/null +++ b/qa/suites/rgw/singleton/objectstore @@ -0,0 +1 @@ +.qa/objectstore \ No newline at end of file diff --git a/qa/suites/rgw/singleton/overrides.yaml b/qa/suites/rgw/singleton/overrides.yaml new file mode 100644 index 00000000..ed4ad591 --- /dev/null +++ b/qa/suites/rgw/singleton/overrides.yaml @@ -0,0 +1,6 @@ +overrides: + ceph: + wait-for-scrub: false + conf: + client: + debug rgw: 20 diff --git a/qa/suites/rgw/singleton/rgw_pool_type b/qa/suites/rgw/singleton/rgw_pool_type new file mode 120000 index 00000000..3bbd28e9 --- /dev/null +++ b/qa/suites/rgw/singleton/rgw_pool_type @@ -0,0 +1 @@ +.qa/rgw_pool_type \ No newline at end of file diff --git a/qa/suites/rgw/singleton/supported-random-distro$ b/qa/suites/rgw/singleton/supported-random-distro$ new file mode 120000 index 00000000..0862b445 --- /dev/null +++ b/qa/suites/rgw/singleton/supported-random-distro$ @@ -0,0 +1 @@ +.qa/distros/supported-random-distro$ \ No newline at end of file diff --git a/qa/suites/rgw/tempest/% b/qa/suites/rgw/tempest/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rgw/tempest/.qa b/qa/suites/rgw/tempest/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rgw/tempest/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rgw/tempest/clusters/.qa b/qa/suites/rgw/tempest/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rgw/tempest/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rgw/tempest/clusters/fixed-1.yaml b/qa/suites/rgw/tempest/clusters/fixed-1.yaml new file mode 120000 index 00000000..02df5dd0 --- /dev/null +++ b/qa/suites/rgw/tempest/clusters/fixed-1.yaml @@ -0,0 +1 @@ +.qa/clusters/fixed-1.yaml \ No newline at end of file diff --git a/qa/suites/rgw/tempest/frontend b/qa/suites/rgw/tempest/frontend new file mode 120000 index 00000000..926a53e8 --- /dev/null +++ b/qa/suites/rgw/tempest/frontend @@ -0,0 +1 @@ +.qa/rgw_frontend \ No newline at end of file diff --git a/qa/suites/rgw/tempest/tasks/.qa b/qa/suites/rgw/tempest/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rgw/tempest/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rgw/tempest/tasks/rgw_tempest.yaml b/qa/suites/rgw/tempest/tasks/rgw_tempest.yaml new file mode 100644 index 00000000..bfbb0cb4 --- /dev/null +++ b/qa/suites/rgw/tempest/tasks/rgw_tempest.yaml @@ -0,0 +1,78 @@ +# ubuntu for py2 until we move to py3 +os_type: ubuntu + +# see http://tracker.ceph.com/issues/20360 and http://tracker.ceph.com/issues/18126 +tasks: +- install: +- ceph: +- tox: [ client.0 ] +- keystone: + client.0: + sha1: 17.0.0.0rc2 + force-branch: master + tenants: + - name: admin + description: Admin Tenant + users: + - name: admin + password: ADMIN + project: admin + roles: [ name: admin, name: Member ] + role-mappings: + - name: admin + user: admin + project: admin + services: + - name: keystone + type: identity + description: Keystone Identity Service + - name: swift + type: object-store + description: Swift Service +- rgw: + client.0: + frontend_prefix: /swift + use-keystone-role: client.0 +- tempest: + client.0: + sha1: d3fa46495a78160989120ba39793f7ba2e22d81c + force-branch: master + use-keystone-role: client.0 + auth: + admin_username: admin + admin_project_name: admin + admin_password: ADMIN + admin_domain_name: Default + identity: + uri: http://{keystone_public_host}:{keystone_public_port}/v2.0/ + uri_v3: http://{keystone_public_host}:{keystone_public_port}/v3/ + admin_role: admin + object-storage: + reseller_admin_role: admin + object-storage-feature-enabled: + container_sync: false + discoverability: false + blacklist: + # TODO(rzarzynski): we really need to update the list after + # merging PRs #15369 and #12704. Additionally, we would be + # able to enable the discoverability API testing above. + - .*test_list_containers_reverse_order.* + - .*test_list_container_contents_with_end_marker.* + - .*test_delete_non_empty_container.* + - .*test_container_synchronization.* + - .*test_get_object_after_expiration_time.* + - .*test_create_object_with_transfer_encoding.* +overrides: + ceph: + conf: + global: + osd_min_pg_log_entries: 10 + osd_max_pg_log_entries: 10 + client: + rgw keystone admin token: ADMIN + rgw keystone accepted roles: admin,Member + rgw keystone implicit tenants: true + rgw keystone accepted admin roles: admin + rgw swift enforce content length: true + rgw swift account in url: true + rgw swift versioning enabled: true diff --git a/qa/suites/rgw/tempest/ubuntu_latest.yaml b/qa/suites/rgw/tempest/ubuntu_latest.yaml new file mode 120000 index 00000000..3a09f9ab --- /dev/null +++ b/qa/suites/rgw/tempest/ubuntu_latest.yaml @@ -0,0 +1 @@ +.qa/distros/supported/ubuntu_latest.yaml \ No newline at end of file diff --git a/qa/suites/rgw/thrash/% b/qa/suites/rgw/thrash/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rgw/thrash/.qa b/qa/suites/rgw/thrash/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rgw/thrash/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rgw/thrash/civetweb.yaml b/qa/suites/rgw/thrash/civetweb.yaml new file mode 100644 index 00000000..5845a0e6 --- /dev/null +++ b/qa/suites/rgw/thrash/civetweb.yaml @@ -0,0 +1,3 @@ +overrides: + rgw: + frontend: civetweb diff --git a/qa/suites/rgw/thrash/clusters/.qa b/qa/suites/rgw/thrash/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rgw/thrash/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rgw/thrash/clusters/fixed-2.yaml b/qa/suites/rgw/thrash/clusters/fixed-2.yaml new file mode 120000 index 00000000..230ff0fd --- /dev/null +++ b/qa/suites/rgw/thrash/clusters/fixed-2.yaml @@ -0,0 +1 @@ +.qa/clusters/fixed-2.yaml \ No newline at end of file diff --git a/qa/suites/rgw/thrash/install.yaml b/qa/suites/rgw/thrash/install.yaml new file mode 100644 index 00000000..84a1d70c --- /dev/null +++ b/qa/suites/rgw/thrash/install.yaml @@ -0,0 +1,5 @@ +tasks: +- install: +- ceph: +- rgw: [client.0] + diff --git a/qa/suites/rgw/thrash/objectstore b/qa/suites/rgw/thrash/objectstore new file mode 120000 index 00000000..c40bd326 --- /dev/null +++ b/qa/suites/rgw/thrash/objectstore @@ -0,0 +1 @@ +.qa/objectstore \ No newline at end of file diff --git a/qa/suites/rgw/thrash/thrasher/.qa b/qa/suites/rgw/thrash/thrasher/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rgw/thrash/thrasher/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rgw/thrash/thrasher/default.yaml b/qa/suites/rgw/thrash/thrasher/default.yaml new file mode 100644 index 00000000..1f35f1bc --- /dev/null +++ b/qa/suites/rgw/thrash/thrasher/default.yaml @@ -0,0 +1,9 @@ +tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgnum_shrink: 1 + chance_pgpnum_fix: 1 + op_delay: 30 + chance_test_min_size: 0 + ceph_objectstore_tool: false diff --git a/qa/suites/rgw/thrash/thrashosds-health.yaml b/qa/suites/rgw/thrash/thrashosds-health.yaml new file mode 120000 index 00000000..9124eb1a --- /dev/null +++ b/qa/suites/rgw/thrash/thrashosds-health.yaml @@ -0,0 +1 @@ +.qa/tasks/thrashosds-health.yaml \ No newline at end of file diff --git a/qa/suites/rgw/thrash/workload/.qa b/qa/suites/rgw/thrash/workload/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rgw/thrash/workload/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rgw/thrash/workload/rgw_bucket_quota.yaml b/qa/suites/rgw/thrash/workload/rgw_bucket_quota.yaml new file mode 100644 index 00000000..32e6af59 --- /dev/null +++ b/qa/suites/rgw/thrash/workload/rgw_bucket_quota.yaml @@ -0,0 +1,7 @@ +# Amazon/S3.pm (cpan) not available as an rpm +os_type: ubuntu +tasks: +- workunit: + clients: + client.0: + - rgw/s3_bucket_quota.pl diff --git a/qa/suites/rgw/thrash/workload/rgw_multipart_upload.yaml b/qa/suites/rgw/thrash/workload/rgw_multipart_upload.yaml new file mode 100644 index 00000000..b792336d --- /dev/null +++ b/qa/suites/rgw/thrash/workload/rgw_multipart_upload.yaml @@ -0,0 +1,7 @@ +# Amazon::S3 is not available on el7 +os_type: ubuntu +tasks: +- workunit: + clients: + client.0: + - rgw/s3_multipart_upload.pl diff --git a/qa/suites/rgw/thrash/workload/rgw_readwrite.yaml b/qa/suites/rgw/thrash/workload/rgw_readwrite.yaml new file mode 100644 index 00000000..d67d5263 --- /dev/null +++ b/qa/suites/rgw/thrash/workload/rgw_readwrite.yaml @@ -0,0 +1,14 @@ +tasks: +- s3readwrite: + client.0: + force-branch: ceph-nautilus + rgw_server: client.0 + readwrite: + bucket: rwtest + readers: 10 + writers: 3 + duration: 300 + files: + num: 10 + size: 2000 + stddev: 500 diff --git a/qa/suites/rgw/thrash/workload/rgw_roundtrip.yaml b/qa/suites/rgw/thrash/workload/rgw_roundtrip.yaml new file mode 100644 index 00000000..b4b9806d --- /dev/null +++ b/qa/suites/rgw/thrash/workload/rgw_roundtrip.yaml @@ -0,0 +1,14 @@ +tasks: +- s3roundtrip: + client.0: + force-branch: ceph-nautilus + rgw_server: client.0 + roundtrip: + bucket: rttest + readers: 10 + writers: 3 + duration: 300 + files: + num: 10 + size: 2000 + stddev: 500 diff --git a/qa/suites/rgw/thrash/workload/rgw_s3tests.yaml b/qa/suites/rgw/thrash/workload/rgw_s3tests.yaml new file mode 100644 index 00000000..91987ed0 --- /dev/null +++ b/qa/suites/rgw/thrash/workload/rgw_s3tests.yaml @@ -0,0 +1,12 @@ +tasks: +- s3tests: + client.0: + force-branch: ceph-nautilus + rgw_server: client.0 +overrides: + ceph: + conf: + client: + rgw lc debug interval: 10 + rgw crypt s3 kms encryption keys: testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo= + rgw crypt require ssl: false diff --git a/qa/suites/rgw/thrash/workload/rgw_swift.yaml b/qa/suites/rgw/thrash/workload/rgw_swift.yaml new file mode 100644 index 00000000..dcb1c1ac --- /dev/null +++ b/qa/suites/rgw/thrash/workload/rgw_swift.yaml @@ -0,0 +1,5 @@ +tasks: +- swift: + client.0: + force-branch: ceph-nautilus + rgw_server: client.0 diff --git a/qa/suites/rgw/thrash/workload/rgw_user_quota.yaml b/qa/suites/rgw/thrash/workload/rgw_user_quota.yaml new file mode 100644 index 00000000..0a988827 --- /dev/null +++ b/qa/suites/rgw/thrash/workload/rgw_user_quota.yaml @@ -0,0 +1,7 @@ +# Amazon/S3.pm (cpan) not available as an rpm +os_type: ubuntu +tasks: +- workunit: + clients: + client.0: + - rgw/s3_user_quota.pl diff --git a/qa/suites/rgw/tools/+ b/qa/suites/rgw/tools/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rgw/tools/.qa b/qa/suites/rgw/tools/.qa new file mode 120000 index 00000000..fea2489f --- /dev/null +++ b/qa/suites/rgw/tools/.qa @@ -0,0 +1 @@ +../.qa \ No newline at end of file diff --git a/qa/suites/rgw/tools/centos_latest.yaml b/qa/suites/rgw/tools/centos_latest.yaml new file mode 120000 index 00000000..bd9854e7 --- /dev/null +++ b/qa/suites/rgw/tools/centos_latest.yaml @@ -0,0 +1 @@ +.qa/distros/supported/centos_latest.yaml \ No newline at end of file diff --git a/qa/suites/rgw/tools/cluster.yaml b/qa/suites/rgw/tools/cluster.yaml new file mode 100644 index 00000000..0eab7eba --- /dev/null +++ b/qa/suites/rgw/tools/cluster.yaml @@ -0,0 +1,9 @@ +roles: +- [mon.a, osd.0, osd.1, osd.2, mgr.0, client.0] +openstack: +- volumes: # attached to each instance + count: 1 + size: 10 # GB +overrides: + rgw: + frontend: beast \ No newline at end of file diff --git a/qa/suites/rgw/tools/tasks.yaml b/qa/suites/rgw/tools/tasks.yaml new file mode 100644 index 00000000..acceb21c --- /dev/null +++ b/qa/suites/rgw/tools/tasks.yaml @@ -0,0 +1,19 @@ +tasks: +- install: +- ceph: +- rgw: + client.0: + # force rgw_dns_name to be set with the fully qualified host name; + # it will be appended to the empty string + dns-name: '' +- workunit: + clients: + client.0: + - rgw/test_rgw_orphan_list.sh +overrides: + ceph: + conf: + client: + debug rgw: 20 + debug ms: 1 + rgw enable static website: false diff --git a/qa/suites/rgw/verify/% b/qa/suites/rgw/verify/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rgw/verify/.qa b/qa/suites/rgw/verify/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rgw/verify/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rgw/verify/clusters/.qa b/qa/suites/rgw/verify/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rgw/verify/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rgw/verify/clusters/fixed-2.yaml b/qa/suites/rgw/verify/clusters/fixed-2.yaml new file mode 120000 index 00000000..230ff0fd --- /dev/null +++ b/qa/suites/rgw/verify/clusters/fixed-2.yaml @@ -0,0 +1 @@ +.qa/clusters/fixed-2.yaml \ No newline at end of file diff --git a/qa/suites/rgw/verify/frontend b/qa/suites/rgw/verify/frontend new file mode 120000 index 00000000..926a53e8 --- /dev/null +++ b/qa/suites/rgw/verify/frontend @@ -0,0 +1 @@ +.qa/rgw_frontend \ No newline at end of file diff --git a/qa/suites/rgw/verify/msgr-failures/.qa b/qa/suites/rgw/verify/msgr-failures/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rgw/verify/msgr-failures/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rgw/verify/msgr-failures/few.yaml b/qa/suites/rgw/verify/msgr-failures/few.yaml new file mode 100644 index 00000000..4326fe23 --- /dev/null +++ b/qa/suites/rgw/verify/msgr-failures/few.yaml @@ -0,0 +1,7 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 5000 + log-whitelist: + - \(OSD_SLOW_PING_TIME diff --git a/qa/suites/rgw/verify/objectstore b/qa/suites/rgw/verify/objectstore new file mode 120000 index 00000000..c40bd326 --- /dev/null +++ b/qa/suites/rgw/verify/objectstore @@ -0,0 +1 @@ +.qa/objectstore \ No newline at end of file diff --git a/qa/suites/rgw/verify/overrides.yaml b/qa/suites/rgw/verify/overrides.yaml new file mode 100644 index 00000000..8b0b5cff --- /dev/null +++ b/qa/suites/rgw/verify/overrides.yaml @@ -0,0 +1,10 @@ +overrides: + ceph: + conf: + client: + debug rgw: 20 + rgw crypt s3 kms encryption keys: testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo= + rgw crypt require ssl: false + rgw: + compression type: random + storage classes: LUKEWARM, FROZEN diff --git a/qa/suites/rgw/verify/proto/.qa b/qa/suites/rgw/verify/proto/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rgw/verify/proto/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rgw/verify/proto/http.yaml b/qa/suites/rgw/verify/proto/http.yaml new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rgw/verify/proto/https.yaml b/qa/suites/rgw/verify/proto/https.yaml new file mode 100644 index 00000000..fb6b6520 --- /dev/null +++ b/qa/suites/rgw/verify/proto/https.yaml @@ -0,0 +1,20 @@ +overrides: + openssl_keys: + root: + client: client.0 + key-type: rsa:4096 + cn: teuthology + install: [client.0, client.1] + rgw.client.0: + client: client.0 + ca: root + embed-key: true + rgw.client.1: + client: client.1 + ca: root + embed-key: true + rgw: + client.0: + ssl certificate: rgw.client.0 + client.1: + ssl certificate: rgw.client.1 diff --git a/qa/suites/rgw/verify/rgw_pool_type b/qa/suites/rgw/verify/rgw_pool_type new file mode 120000 index 00000000..3bbd28e9 --- /dev/null +++ b/qa/suites/rgw/verify/rgw_pool_type @@ -0,0 +1 @@ +.qa/rgw_pool_type \ No newline at end of file diff --git a/qa/suites/rgw/verify/striping$/stripe-equals-chunk.yaml b/qa/suites/rgw/verify/striping$/stripe-equals-chunk.yaml new file mode 100644 index 00000000..9b3e20a8 --- /dev/null +++ b/qa/suites/rgw/verify/striping$/stripe-equals-chunk.yaml @@ -0,0 +1,7 @@ +overrides: + ceph: + conf: + client: + # use default values where chunk-size=stripe-size + #rgw max chunk size: 4194304 + #rgw obj stripe size: 4194304 diff --git a/qa/suites/rgw/verify/striping$/stripe-greater-than-chunk.yaml b/qa/suites/rgw/verify/striping$/stripe-greater-than-chunk.yaml new file mode 100644 index 00000000..3bf40d6d --- /dev/null +++ b/qa/suites/rgw/verify/striping$/stripe-greater-than-chunk.yaml @@ -0,0 +1,7 @@ +overrides: + ceph: + conf: + client: + rgw max chunk size: 4194304 + # stripe size greater than (and not a multiple of) chunk size + rgw obj stripe size: 6291456 diff --git a/qa/suites/rgw/verify/tasks/+ b/qa/suites/rgw/verify/tasks/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/rgw/verify/tasks/.qa b/qa/suites/rgw/verify/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rgw/verify/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rgw/verify/tasks/0-install.yaml b/qa/suites/rgw/verify/tasks/0-install.yaml new file mode 100644 index 00000000..ce1d9f56 --- /dev/null +++ b/qa/suites/rgw/verify/tasks/0-install.yaml @@ -0,0 +1,21 @@ +# see http://tracker.ceph.com/issues/20360 and http://tracker.ceph.com/issues/18126 +#os_type: centos +# ubuntu and no valgrind until we migrate test to py3 +os_type: ubuntu + +tasks: +- install: +- ceph: +- openssl_keys: +- rgw: + client.0: +# valgrind: [--tool=memcheck, --max-threads=1024] # http://tracker.ceph.com/issues/25214 + +overrides: + ceph: + conf: + global: + osd_min_pg_log_entries: 10 + osd_max_pg_log_entries: 10 + client: + rgw lc debug interval: 10 diff --git a/qa/suites/rgw/verify/tasks/cls.yaml b/qa/suites/rgw/verify/tasks/cls.yaml new file mode 100644 index 00000000..88a8f9d2 --- /dev/null +++ b/qa/suites/rgw/verify/tasks/cls.yaml @@ -0,0 +1,8 @@ +tasks: +- workunit: + clients: + client.0: + - cls/test_cls_lock.sh + - cls/test_cls_log.sh + - cls/test_cls_refcount.sh + - cls/test_cls_rgw.sh diff --git a/qa/suites/rgw/verify/tasks/ragweed.yaml b/qa/suites/rgw/verify/tasks/ragweed.yaml new file mode 100644 index 00000000..3e91afad --- /dev/null +++ b/qa/suites/rgw/verify/tasks/ragweed.yaml @@ -0,0 +1,6 @@ +tasks: +- ragweed: + client.0: + default-branch: ceph-nautilus + rgw_server: client.0 + stages: prepare,check diff --git a/qa/suites/rgw/verify/tasks/s3tests.yaml b/qa/suites/rgw/verify/tasks/s3tests.yaml new file mode 100644 index 00000000..642aaf27 --- /dev/null +++ b/qa/suites/rgw/verify/tasks/s3tests.yaml @@ -0,0 +1,5 @@ +tasks: +- s3tests: + client.0: + force-branch: ceph-nautilus + rgw_server: client.0 diff --git a/qa/suites/rgw/verify/tasks/swift.yaml b/qa/suites/rgw/verify/tasks/swift.yaml new file mode 100644 index 00000000..4110d899 --- /dev/null +++ b/qa/suites/rgw/verify/tasks/swift.yaml @@ -0,0 +1,8 @@ +# py2 for swift until we move to py3 +os_type: ubuntu + +tasks: +- swift: + client.0: + force-branch: ceph-nautilus + rgw_server: client.0 diff --git a/qa/suites/rgw/verify/validater/.qa b/qa/suites/rgw/verify/validater/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/rgw/verify/validater/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rgw/verify/validater/lockdep.yaml b/qa/suites/rgw/verify/validater/lockdep.yaml new file mode 100644 index 00000000..941fe12b --- /dev/null +++ b/qa/suites/rgw/verify/validater/lockdep.yaml @@ -0,0 +1,7 @@ +overrides: + ceph: + conf: + osd: + lockdep: true + mon: + lockdep: true diff --git a/qa/suites/rgw/verify/validater/valgrind.yaml b/qa/suites/rgw/verify/validater/valgrind.yaml new file mode 100644 index 00000000..e75dcc7b --- /dev/null +++ b/qa/suites/rgw/verify/validater/valgrind.yaml @@ -0,0 +1,21 @@ +# see http://tracker.ceph.com/issues/20360 and http://tracker.ceph.com/issues/18126 +#os_type: centos +# ubuntu and no valgrind until we migrate test to py3 +os_type: ubuntu + +overrides: + install: + ceph: + conf: + global: + osd heartbeat grace: 40 + mon: + mon osd crush smoke test: false + osd: + osd fast shutdown: false +# valgrind: +# mon: [--tool=memcheck, --leak-check=full, --show-reachable=yes] +# osd: [--tool=memcheck] +# mds: [--tool=memcheck] +## https://tracker.ceph.com/issues/38621 +## mgr: [--tool=memcheck] diff --git a/qa/suites/samba/% b/qa/suites/samba/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/samba/.qa b/qa/suites/samba/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/samba/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/samba/clusters/.qa b/qa/suites/samba/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/samba/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/samba/clusters/samba-basic.yaml b/qa/suites/samba/clusters/samba-basic.yaml new file mode 100644 index 00000000..af432f61 --- /dev/null +++ b/qa/suites/samba/clusters/samba-basic.yaml @@ -0,0 +1,7 @@ +roles: +- [mon.a, mon.b, mon.c, mgr.x, mds.a, osd.0, osd.1] +- [samba.0, client.0, client.1] +openstack: +- volumes: # attached to each instance + count: 2 + size: 10 # GB diff --git a/qa/suites/samba/install/.qa b/qa/suites/samba/install/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/samba/install/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/samba/install/install.yaml b/qa/suites/samba/install/install.yaml new file mode 100644 index 00000000..c53f9c55 --- /dev/null +++ b/qa/suites/samba/install/install.yaml @@ -0,0 +1,9 @@ +# we currently can't install Samba on RHEL; need a gitbuilder and code updates +os_type: ubuntu + +tasks: +- install: +- install: + project: samba + extra_packages: ['samba'] +- ceph: diff --git a/qa/suites/samba/mount/.qa b/qa/suites/samba/mount/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/samba/mount/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/samba/mount/fuse.yaml b/qa/suites/samba/mount/fuse.yaml new file mode 100644 index 00000000..d00ffdb4 --- /dev/null +++ b/qa/suites/samba/mount/fuse.yaml @@ -0,0 +1,6 @@ +tasks: +- ceph-fuse: [client.0] +- samba: + samba.0: + ceph: "{testdir}/mnt.0" + diff --git a/qa/suites/samba/mount/kclient.yaml b/qa/suites/samba/mount/kclient.yaml new file mode 100644 index 00000000..8baa09f8 --- /dev/null +++ b/qa/suites/samba/mount/kclient.yaml @@ -0,0 +1,14 @@ +overrides: + ceph: + conf: + global: + ms die on skipped message: false +kernel: + client: + branch: testing +tasks: +- kclient: [client.0] +- samba: + samba.0: + ceph: "{testdir}/mnt.0" + diff --git a/qa/suites/samba/mount/native.yaml b/qa/suites/samba/mount/native.yaml new file mode 100644 index 00000000..09b8c1c4 --- /dev/null +++ b/qa/suites/samba/mount/native.yaml @@ -0,0 +1,2 @@ +tasks: +- samba: diff --git a/qa/suites/samba/mount/noceph.yaml b/qa/suites/samba/mount/noceph.yaml new file mode 100644 index 00000000..3cad4740 --- /dev/null +++ b/qa/suites/samba/mount/noceph.yaml @@ -0,0 +1,5 @@ +tasks: +- localdir: [client.0] +- samba: + samba.0: + ceph: "{testdir}/mnt.0" diff --git a/qa/suites/samba/objectstore b/qa/suites/samba/objectstore new file mode 120000 index 00000000..c40bd326 --- /dev/null +++ b/qa/suites/samba/objectstore @@ -0,0 +1 @@ +.qa/objectstore \ No newline at end of file diff --git a/qa/suites/samba/workload/.qa b/qa/suites/samba/workload/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/samba/workload/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/samba/workload/cifs-dbench.yaml b/qa/suites/samba/workload/cifs-dbench.yaml new file mode 100644 index 00000000..c13c1c09 --- /dev/null +++ b/qa/suites/samba/workload/cifs-dbench.yaml @@ -0,0 +1,8 @@ +tasks: +- cifs-mount: + client.1: + share: ceph +- workunit: + clients: + client.1: + - suites/dbench.sh diff --git a/qa/suites/samba/workload/cifs-fsstress.yaml b/qa/suites/samba/workload/cifs-fsstress.yaml new file mode 100644 index 00000000..ff003af3 --- /dev/null +++ b/qa/suites/samba/workload/cifs-fsstress.yaml @@ -0,0 +1,8 @@ +tasks: +- cifs-mount: + client.1: + share: ceph +- workunit: + clients: + client.1: + - suites/fsstress.sh diff --git a/qa/suites/samba/workload/cifs-kernel-build.yaml.disabled b/qa/suites/samba/workload/cifs-kernel-build.yaml.disabled new file mode 100644 index 00000000..ab9ff8ac --- /dev/null +++ b/qa/suites/samba/workload/cifs-kernel-build.yaml.disabled @@ -0,0 +1,9 @@ +tasks: +- cifs-mount: + client.1: + share: ceph +- workunit: + clients: + client.1: + - kernel_untar_build.sh + diff --git a/qa/suites/samba/workload/smbtorture.yaml b/qa/suites/samba/workload/smbtorture.yaml new file mode 100644 index 00000000..823489a2 --- /dev/null +++ b/qa/suites/samba/workload/smbtorture.yaml @@ -0,0 +1,39 @@ +tasks: +- pexec: + client.1: + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.lock + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.fdpass + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.unlink + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.attr + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.trans2 + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.negnowait + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.dir1 + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.deny1 + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.deny2 + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.deny3 + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.denydos + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.ntdeny1 + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.ntdeny2 + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.tcon + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.tcondev + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.vuid + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.rw1 + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.open + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.defer_open + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.xcopy + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.rename + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.properties + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.mangle + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.openattr + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.chkpath + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.secleak + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.disconnect + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.samba3error + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.smb +# - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.bench-holdcon +# - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.bench-holdopen + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.bench-readwrite + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.bench-torture + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.scan-pipe_number + - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.scan-ioctl +# - /usr/local/samba/bin/smbtorture --password=ubuntu //localhost/ceph base.scan-maxfid diff --git a/qa/suites/smoke/.qa b/qa/suites/smoke/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/smoke/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/smoke/basic/% b/qa/suites/smoke/basic/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/smoke/basic/.qa b/qa/suites/smoke/basic/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/smoke/basic/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/smoke/basic/clusters/+ b/qa/suites/smoke/basic/clusters/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/smoke/basic/clusters/.qa b/qa/suites/smoke/basic/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/smoke/basic/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/smoke/basic/clusters/fixed-3-cephfs.yaml b/qa/suites/smoke/basic/clusters/fixed-3-cephfs.yaml new file mode 120000 index 00000000..24480dfc --- /dev/null +++ b/qa/suites/smoke/basic/clusters/fixed-3-cephfs.yaml @@ -0,0 +1 @@ +.qa/clusters/fixed-3-cephfs.yaml \ No newline at end of file diff --git a/qa/suites/smoke/basic/clusters/openstack.yaml b/qa/suites/smoke/basic/clusters/openstack.yaml new file mode 100644 index 00000000..7d652b49 --- /dev/null +++ b/qa/suites/smoke/basic/clusters/openstack.yaml @@ -0,0 +1,8 @@ +openstack: + - machine: + disk: 40 # GB + ram: 8000 # MB + cpus: 1 + volumes: # attached to each instance + count: 4 + size: 10 # GB diff --git a/qa/suites/smoke/basic/objectstore/.qa b/qa/suites/smoke/basic/objectstore/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/smoke/basic/objectstore/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/smoke/basic/objectstore/bluestore-bitmap.yaml b/qa/suites/smoke/basic/objectstore/bluestore-bitmap.yaml new file mode 120000 index 00000000..a59cf517 --- /dev/null +++ b/qa/suites/smoke/basic/objectstore/bluestore-bitmap.yaml @@ -0,0 +1 @@ +.qa/objectstore/bluestore-bitmap.yaml \ No newline at end of file diff --git a/qa/suites/smoke/basic/tasks/.qa b/qa/suites/smoke/basic/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/smoke/basic/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/smoke/basic/tasks/cfuse_workunit_suites_blogbench.yaml b/qa/suites/smoke/basic/tasks/cfuse_workunit_suites_blogbench.yaml new file mode 100644 index 00000000..2ee41772 --- /dev/null +++ b/qa/suites/smoke/basic/tasks/cfuse_workunit_suites_blogbench.yaml @@ -0,0 +1,9 @@ +tasks: +- install: +- ceph: + fs: xfs +- ceph-fuse: +- workunit: + clients: + all: + - suites/blogbench.sh diff --git a/qa/suites/smoke/basic/tasks/cfuse_workunit_suites_fsstress.yaml b/qa/suites/smoke/basic/tasks/cfuse_workunit_suites_fsstress.yaml new file mode 100644 index 00000000..b58487c0 --- /dev/null +++ b/qa/suites/smoke/basic/tasks/cfuse_workunit_suites_fsstress.yaml @@ -0,0 +1,8 @@ +tasks: +- install: +- ceph: +- ceph-fuse: +- workunit: + clients: + all: + - suites/fsstress.sh diff --git a/qa/suites/smoke/basic/tasks/cfuse_workunit_suites_iozone.yaml b/qa/suites/smoke/basic/tasks/cfuse_workunit_suites_iozone.yaml new file mode 100644 index 00000000..dc6df2f7 --- /dev/null +++ b/qa/suites/smoke/basic/tasks/cfuse_workunit_suites_iozone.yaml @@ -0,0 +1,8 @@ +tasks: +- install: +- ceph: +- ceph-fuse: [client.0] +- workunit: + clients: + all: + - suites/iozone.sh diff --git a/qa/suites/smoke/basic/tasks/cfuse_workunit_suites_pjd.yaml b/qa/suites/smoke/basic/tasks/cfuse_workunit_suites_pjd.yaml new file mode 100644 index 00000000..a76154d1 --- /dev/null +++ b/qa/suites/smoke/basic/tasks/cfuse_workunit_suites_pjd.yaml @@ -0,0 +1,18 @@ +tasks: +- install: +- ceph: + fs: xfs + conf: + mds: + debug mds: 20 + debug ms: 1 + client: + debug client: 20 + debug ms: 1 + fuse default permissions: false + fuse set user groups: true +- ceph-fuse: +- workunit: + clients: + all: + - suites/pjd.sh diff --git a/qa/suites/smoke/basic/tasks/kclient_workunit_direct_io.yaml b/qa/suites/smoke/basic/tasks/kclient_workunit_direct_io.yaml new file mode 100644 index 00000000..21820071 --- /dev/null +++ b/qa/suites/smoke/basic/tasks/kclient_workunit_direct_io.yaml @@ -0,0 +1,13 @@ +overrides: + ceph: + conf: + global: + ms die on skipped message: false +tasks: +- install: +- ceph: +- kclient: +- workunit: + clients: + all: + - direct_io diff --git a/qa/suites/smoke/basic/tasks/kclient_workunit_suites_dbench.yaml b/qa/suites/smoke/basic/tasks/kclient_workunit_suites_dbench.yaml new file mode 100644 index 00000000..01d7470a --- /dev/null +++ b/qa/suites/smoke/basic/tasks/kclient_workunit_suites_dbench.yaml @@ -0,0 +1,14 @@ +overrides: + ceph: + conf: + global: + ms die on skipped message: false +tasks: +- install: +- ceph: + fs: xfs +- kclient: +- workunit: + clients: + all: + - suites/dbench.sh diff --git a/qa/suites/smoke/basic/tasks/kclient_workunit_suites_fsstress.yaml b/qa/suites/smoke/basic/tasks/kclient_workunit_suites_fsstress.yaml new file mode 100644 index 00000000..42d6b97c --- /dev/null +++ b/qa/suites/smoke/basic/tasks/kclient_workunit_suites_fsstress.yaml @@ -0,0 +1,14 @@ +overrides: + ceph: + conf: + global: + ms die on skipped message: false +tasks: +- install: +- ceph: + fs: xfs +- kclient: +- workunit: + clients: + all: + - suites/fsstress.sh diff --git a/qa/suites/smoke/basic/tasks/kclient_workunit_suites_pjd.yaml b/qa/suites/smoke/basic/tasks/kclient_workunit_suites_pjd.yaml new file mode 100644 index 00000000..6818a2a6 --- /dev/null +++ b/qa/suites/smoke/basic/tasks/kclient_workunit_suites_pjd.yaml @@ -0,0 +1,14 @@ +overrides: + ceph: + conf: + global: + ms die on skipped message: false +tasks: +- install: +- ceph: + fs: xfs +- kclient: +- workunit: + clients: + all: + - suites/pjd.sh diff --git a/qa/suites/smoke/basic/tasks/libcephfs_interface_tests.yaml b/qa/suites/smoke/basic/tasks/libcephfs_interface_tests.yaml new file mode 100644 index 00000000..aa2e7679 --- /dev/null +++ b/qa/suites/smoke/basic/tasks/libcephfs_interface_tests.yaml @@ -0,0 +1,17 @@ +overrides: + ceph: + conf: + client: + debug ms: 1 + debug client: 20 + mds: + debug ms: 1 + debug mds: 20 +tasks: +- install: +- ceph: +- ceph-fuse: +- workunit: + clients: + client.0: + - libcephfs/test.sh diff --git a/qa/suites/smoke/basic/tasks/mon_thrash.yaml b/qa/suites/smoke/basic/tasks/mon_thrash.yaml new file mode 100644 index 00000000..edaa0dd2 --- /dev/null +++ b/qa/suites/smoke/basic/tasks/mon_thrash.yaml @@ -0,0 +1,36 @@ +overrides: + ceph: + log-whitelist: + - reached quota + - mons down + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ + - \(POOL_ + - \(CACHE_POOL_ + - \(SMALLER_PGP_NUM\) + - \(OBJECT_ + - \(SLOW_OPS\) + - \(TOO_FEW_PGS\) + - \(OSD_SLOW_PING_TIME + - slow request + conf: + global: + ms inject delay max: 1 + ms inject delay probability: 0.005 + ms inject delay type: mon + ms inject internal delays: 0.002 + ms inject socket failures: 2500 +tasks: +- install: null +- ceph: + fs: xfs +- mon_thrash: + revive_delay: 90 + thrash_delay: 1 + thrash_many: true +- workunit: + clients: + client.0: + - rados/test.sh diff --git a/qa/suites/smoke/basic/tasks/rados_api_tests.yaml b/qa/suites/smoke/basic/tasks/rados_api_tests.yaml new file mode 100644 index 00000000..3c772481 --- /dev/null +++ b/qa/suites/smoke/basic/tasks/rados_api_tests.yaml @@ -0,0 +1,30 @@ +tasks: +- install: null +- ceph: + fs: ext4 + log-whitelist: + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ + - \(POOL_ + - \(CACHE_POOL_ + - \(SMALLER_PGP_NUM\) + - \(OBJECT_ + - \(SLOW_OPS\) + - \(TOO_FEW_PGS\) + - reached quota + - but it is still running + - slow request + conf: + mon: + mon warn on pool no app: false +- thrashosds: + chance_pgnum_grow: 2 + chance_pgnum_shrink: 2 + chance_pgpnum_fix: 1 + timeout: 1200 +- workunit: + clients: + client.0: + - rados/test.sh diff --git a/qa/suites/smoke/basic/tasks/rados_bench.yaml b/qa/suites/smoke/basic/tasks/rados_bench.yaml new file mode 100644 index 00000000..ff0e2bbd --- /dev/null +++ b/qa/suites/smoke/basic/tasks/rados_bench.yaml @@ -0,0 +1,47 @@ +overrides: + ceph: + conf: + global: + ms inject delay max: 1 + ms inject delay probability: 0.005 + ms inject delay type: osd + ms inject internal delays: 0.002 + ms inject socket failures: 2500 +tasks: +- install: null +- ceph: + fs: xfs + log-whitelist: + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ + - \(POOL_ + - \(CACHE_POOL_ + - \(SMALLER_PGP_NUM\) + - \(OBJECT_ + - \(SLOW_OPS\) + - \(TOO_FEW_PGS\) + - \(OSD_SLOW_PING_TIME + - slow request +- thrashosds: + chance_pgnum_grow: 2 + chance_pgnum_shrink: 2 + chance_pgpnum_fix: 1 + timeout: 1200 +- full_sequential: + - radosbench: + clients: [client.0] + time: 150 + - radosbench: + clients: [client.0] + time: 150 + - radosbench: + clients: [client.0] + time: 150 + - radosbench: + clients: [client.0] + time: 150 + - radosbench: + clients: [client.0] + time: 150 diff --git a/qa/suites/smoke/basic/tasks/rados_cache_snaps.yaml b/qa/suites/smoke/basic/tasks/rados_cache_snaps.yaml new file mode 100644 index 00000000..f34d3b83 --- /dev/null +++ b/qa/suites/smoke/basic/tasks/rados_cache_snaps.yaml @@ -0,0 +1,51 @@ +tasks: +- install: null +- ceph: + log-whitelist: + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ + - \(POOL_ + - \(CACHE_POOL_ + - \(SMALLER_PGP_NUM\) + - \(OBJECT_ + - \(SLOW_OPS\) + - \(TOO_FEW_PGS\) + - slow request +- thrashosds: + chance_pgnum_grow: 2 + chance_pgnum_shrink: 2 + chance_pgpnum_fix: 1 + timeout: 1200 +- exec: + client.0: + - sudo ceph osd pool create base 4 + - sudo ceph osd pool application enable base rados + - sudo ceph osd pool create cache 4 + - sudo ceph osd tier add base cache + - sudo ceph osd tier cache-mode cache writeback + - sudo ceph osd tier set-overlay base cache + - sudo ceph osd pool set cache hit_set_type bloom + - sudo ceph osd pool set cache hit_set_count 8 + - sudo ceph osd pool set cache hit_set_period 3600 + - sudo ceph osd pool set cache target_max_objects 250 +- rados: + clients: + - client.0 + objects: 500 + op_weights: + copy_from: 50 + delete: 50 + cache_evict: 50 + cache_flush: 50 + read: 100 + rollback: 50 + snap_create: 50 + snap_remove: 50 + cache_try_flush: 50 + write: 100 + ops: 4000 + pool_snaps: true + pools: + - base diff --git a/qa/suites/smoke/basic/tasks/rados_cls_all.yaml b/qa/suites/smoke/basic/tasks/rados_cls_all.yaml new file mode 100644 index 00000000..5f46a1ab --- /dev/null +++ b/qa/suites/smoke/basic/tasks/rados_cls_all.yaml @@ -0,0 +1,16 @@ +overrides: + ceph: + conf: + osd: + osd_class_load_list: "cephfs hello journal lock log numops rbd refcount + rgw sdk timeindex user version" + osd_class_default_list: "cephfs hello journal lock log numops rbd refcount + rgw sdk timeindex user version" +tasks: +- install: +- ceph: + fs: xfs +- workunit: + clients: + client.0: + - cls diff --git a/qa/suites/smoke/basic/tasks/rados_ec_snaps.yaml b/qa/suites/smoke/basic/tasks/rados_ec_snaps.yaml new file mode 100644 index 00000000..3e87eefb --- /dev/null +++ b/qa/suites/smoke/basic/tasks/rados_ec_snaps.yaml @@ -0,0 +1,41 @@ +tasks: +- install: null +- ceph: + fs: xfs + log-whitelist: + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ + - \(POOL_ + - \(CACHE_POOL_ + - \(SMALLER_PGP_NUM\) + - \(OBJECT_ + - \(SLOW_OPS\) + - \(TOO_FEW_PGS\) + - slow request +- thrashosds: + chance_pgnum_grow: 3 + chance_pgnum_shrink: 2 + chance_pgpnum_fix: 1 + timeout: 1200 +- rados: + clients: + - client.0 + ec_pool: true + max_in_flight: 64 + max_seconds: 600 + objects: 1024 + op_weights: + append: 100 + copy_from: 50 + delete: 50 + read: 100 + rmattr: 25 + rollback: 50 + setattr: 25 + snap_create: 50 + snap_remove: 50 + write: 0 + ops: 400000 + size: 16384 diff --git a/qa/suites/smoke/basic/tasks/rados_python.yaml b/qa/suites/smoke/basic/tasks/rados_python.yaml new file mode 100644 index 00000000..2a802cd6 --- /dev/null +++ b/qa/suites/smoke/basic/tasks/rados_python.yaml @@ -0,0 +1,16 @@ +tasks: +- install: +- ceph: + log-whitelist: + - but it is still running + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(PG_ + - \(OSD_ + - \(OBJECT_ + - \(POOL_APP_NOT_ENABLED\) +- ceph-fuse: +- workunit: + clients: + client.0: + - rados/test_python.sh diff --git a/qa/suites/smoke/basic/tasks/rados_workunit_loadgen_mix.yaml b/qa/suites/smoke/basic/tasks/rados_workunit_loadgen_mix.yaml new file mode 100644 index 00000000..5e82e984 --- /dev/null +++ b/qa/suites/smoke/basic/tasks/rados_workunit_loadgen_mix.yaml @@ -0,0 +1,13 @@ +tasks: +- install: +- ceph: + fs: ext4 + log-whitelist: + - but it is still running + - overall HEALTH_ + - \(POOL_APP_NOT_ENABLED\) +- ceph-fuse: +- workunit: + clients: + all: + - rados/load-gen-mix.sh diff --git a/qa/suites/smoke/basic/tasks/rbd_api_tests.yaml b/qa/suites/smoke/basic/tasks/rbd_api_tests.yaml new file mode 100644 index 00000000..f7245bab --- /dev/null +++ b/qa/suites/smoke/basic/tasks/rbd_api_tests.yaml @@ -0,0 +1,18 @@ +tasks: +- install: +- ceph: + log-whitelist: + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ + - \(POOL_ + - \(CACHE_POOL_ + fs: xfs +- ceph-fuse: +- workunit: + clients: + client.0: + - rbd/test_librbd.sh + env: + RBD_FEATURES: "1" diff --git a/qa/suites/smoke/basic/tasks/rbd_cli_import_export.yaml b/qa/suites/smoke/basic/tasks/rbd_cli_import_export.yaml new file mode 100644 index 00000000..e9f38d3a --- /dev/null +++ b/qa/suites/smoke/basic/tasks/rbd_cli_import_export.yaml @@ -0,0 +1,11 @@ +tasks: +- install: +- ceph: + fs: xfs +- ceph-fuse: +- workunit: + clients: + client.0: + - rbd/import_export.sh + env: + RBD_CREATE_ARGS: --new-format diff --git a/qa/suites/smoke/basic/tasks/rbd_fsx.yaml b/qa/suites/smoke/basic/tasks/rbd_fsx.yaml new file mode 100644 index 00000000..59345670 --- /dev/null +++ b/qa/suites/smoke/basic/tasks/rbd_fsx.yaml @@ -0,0 +1,30 @@ +overrides: + ceph: + log-whitelist: + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ + - \(POOL_ + - \(CACHE_POOL_ + - \(SMALLER_PGP_NUM\) + - \(OBJECT_ + - \(SLOW_OPS\) + - \(TOO_FEW_PGS\) + - \(OSD_SLOW_PING_TIME + - slow request + conf: + client: + rbd cache: true + global: + ms inject socket failures: 5000 +tasks: +- install: null +- ceph: + fs: xfs +- thrashosds: + timeout: 1200 +- rbd_fsx: + clients: + - client.0 + ops: 2000 diff --git a/qa/suites/smoke/basic/tasks/rbd_python_api_tests.yaml b/qa/suites/smoke/basic/tasks/rbd_python_api_tests.yaml new file mode 100644 index 00000000..9714a6e4 --- /dev/null +++ b/qa/suites/smoke/basic/tasks/rbd_python_api_tests.yaml @@ -0,0 +1,10 @@ +tasks: +- install: +- ceph: +- ceph-fuse: +- workunit: + clients: + client.0: + - rbd/test_librbd_python.sh + env: + RBD_FEATURES: "1" diff --git a/qa/suites/smoke/basic/tasks/rbd_workunit_suites_iozone.yaml b/qa/suites/smoke/basic/tasks/rbd_workunit_suites_iozone.yaml new file mode 100644 index 00000000..237aa4b3 --- /dev/null +++ b/qa/suites/smoke/basic/tasks/rbd_workunit_suites_iozone.yaml @@ -0,0 +1,17 @@ +overrides: + ceph: + conf: + global: + ms die on skipped message: false + client: + rbd default features: 5 +tasks: +- install: +- ceph: +- rbd: + all: + image_size: 20480 +- workunit: + clients: + all: + - suites/iozone.sh diff --git a/qa/suites/smoke/basic/tasks/rgw_ec_s3tests.yaml b/qa/suites/smoke/basic/tasks/rgw_ec_s3tests.yaml new file mode 100644 index 00000000..3871cf53 --- /dev/null +++ b/qa/suites/smoke/basic/tasks/rgw_ec_s3tests.yaml @@ -0,0 +1,20 @@ +overrides: + rgw: + ec-data-pool: true + cache-pools: true + frontend: civetweb +tasks: +- install: +- ceph: +- rgw: [client.0] +- s3tests: + client.0: + force-branch: ceph-nautilus + rgw_server: client.0 +overrides: + ceph: + conf: + client: + rgw lc debug interval: 10 + rgw crypt s3 kms encryption keys: testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo= + rgw crypt require ssl: false diff --git a/qa/suites/smoke/basic/tasks/rgw_s3tests.yaml b/qa/suites/smoke/basic/tasks/rgw_s3tests.yaml new file mode 100644 index 00000000..011f34a4 --- /dev/null +++ b/qa/suites/smoke/basic/tasks/rgw_s3tests.yaml @@ -0,0 +1,16 @@ +tasks: +- install: +- ceph: + fs: xfs +- rgw: [client.0] +- s3tests: + client.0: + force-branch: ceph-nautilus + rgw_server: client.0 +overrides: + ceph: + conf: + client: + rgw lc debug interval: 10 + rgw crypt s3 kms encryption keys: testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo= + rgw crypt require ssl: false diff --git a/qa/suites/smoke/basic/tasks/rgw_swift.yaml b/qa/suites/smoke/basic/tasks/rgw_swift.yaml new file mode 100644 index 00000000..2db995b0 --- /dev/null +++ b/qa/suites/smoke/basic/tasks/rgw_swift.yaml @@ -0,0 +1,9 @@ +tasks: +- install: +- ceph: + fs: ext4 +- rgw: [client.0] +- swift: + client.0: + force-branch: ceph-nautilus + rgw_server: client.0 diff --git a/qa/suites/stress/.qa b/qa/suites/stress/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/stress/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/stress/bench/% b/qa/suites/stress/bench/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/stress/bench/.qa b/qa/suites/stress/bench/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/stress/bench/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/stress/bench/clusters/.qa b/qa/suites/stress/bench/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/stress/bench/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/stress/bench/clusters/fixed-3-cephfs.yaml b/qa/suites/stress/bench/clusters/fixed-3-cephfs.yaml new file mode 120000 index 00000000..24480dfc --- /dev/null +++ b/qa/suites/stress/bench/clusters/fixed-3-cephfs.yaml @@ -0,0 +1 @@ +.qa/clusters/fixed-3-cephfs.yaml \ No newline at end of file diff --git a/qa/suites/stress/bench/tasks/.qa b/qa/suites/stress/bench/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/stress/bench/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/stress/bench/tasks/cfuse_workunit_snaps.yaml b/qa/suites/stress/bench/tasks/cfuse_workunit_snaps.yaml new file mode 100644 index 00000000..eafec39e --- /dev/null +++ b/qa/suites/stress/bench/tasks/cfuse_workunit_snaps.yaml @@ -0,0 +1,8 @@ +tasks: +- install: +- ceph: +- ceph-fuse: +- workunit: + clients: + all: + - snaps diff --git a/qa/suites/stress/bench/tasks/kclient_workunit_suites_fsx.yaml b/qa/suites/stress/bench/tasks/kclient_workunit_suites_fsx.yaml new file mode 100644 index 00000000..a0d2e765 --- /dev/null +++ b/qa/suites/stress/bench/tasks/kclient_workunit_suites_fsx.yaml @@ -0,0 +1,8 @@ +tasks: +- install: +- ceph: +- kclient: +- workunit: + clients: + all: + - suites/fsx.sh diff --git a/qa/suites/stress/thrash/% b/qa/suites/stress/thrash/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/stress/thrash/.qa b/qa/suites/stress/thrash/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/stress/thrash/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/stress/thrash/clusters/.qa b/qa/suites/stress/thrash/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/stress/thrash/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/stress/thrash/clusters/16-osd.yaml b/qa/suites/stress/thrash/clusters/16-osd.yaml new file mode 100644 index 00000000..76232339 --- /dev/null +++ b/qa/suites/stress/thrash/clusters/16-osd.yaml @@ -0,0 +1,18 @@ +roles: +- [mon.a, mds.a, osd.0] +- [mon.b, mgr.x, osd.1] +- [mon.c, mgr.y, osd.2] +- [osd.3] +- [osd.4] +- [osd.5] +- [osd.6] +- [osd.7] +- [osd.8] +- [osd.9] +- [osd.10] +- [osd.11] +- [osd.12] +- [osd.13] +- [osd.14] +- [osd.15] +- [client.0] diff --git a/qa/suites/stress/thrash/clusters/3-osd-1-machine.yaml b/qa/suites/stress/thrash/clusters/3-osd-1-machine.yaml new file mode 100644 index 00000000..8c3556ae --- /dev/null +++ b/qa/suites/stress/thrash/clusters/3-osd-1-machine.yaml @@ -0,0 +1,3 @@ +roles: +- [mon.a, mgr.x, mds.a, osd.0, osd.1, osd.2] +- [mon.b, mon.c, client.0] diff --git a/qa/suites/stress/thrash/clusters/8-osd.yaml b/qa/suites/stress/thrash/clusters/8-osd.yaml new file mode 100644 index 00000000..9f51c6ba --- /dev/null +++ b/qa/suites/stress/thrash/clusters/8-osd.yaml @@ -0,0 +1,10 @@ +roles: +- [mon.a, mds.a, osd.0] +- [mon.b, mgr.x, osd.1] +- [mon.c, osd.2] +- [osd.3] +- [osd.4] +- [osd.5] +- [osd.6] +- [osd.7] +- [client.0] diff --git a/qa/suites/stress/thrash/thrashers/.qa b/qa/suites/stress/thrash/thrashers/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/stress/thrash/thrashers/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/stress/thrash/thrashers/default.yaml b/qa/suites/stress/thrash/thrashers/default.yaml new file mode 100644 index 00000000..e628ba6d --- /dev/null +++ b/qa/suites/stress/thrash/thrashers/default.yaml @@ -0,0 +1,7 @@ +tasks: +- install: +- ceph: + log-whitelist: + - but it is still running + - objects unfound and apparently lost +- thrashosds: diff --git a/qa/suites/stress/thrash/thrashers/fast.yaml b/qa/suites/stress/thrash/thrashers/fast.yaml new file mode 100644 index 00000000..6bc9dff0 --- /dev/null +++ b/qa/suites/stress/thrash/thrashers/fast.yaml @@ -0,0 +1,9 @@ +tasks: +- install: +- ceph: + log-whitelist: + - but it is still running + - objects unfound and apparently lost +- thrashosds: + op_delay: 1 + chance_down: 10 diff --git a/qa/suites/stress/thrash/thrashers/more-down.yaml b/qa/suites/stress/thrash/thrashers/more-down.yaml new file mode 100644 index 00000000..6042bf6d --- /dev/null +++ b/qa/suites/stress/thrash/thrashers/more-down.yaml @@ -0,0 +1,8 @@ +tasks: +- install: +- ceph: + log-whitelist: + - but it is still running + - objects unfound and apparently lost +- thrashosds: + chance_down: 50 diff --git a/qa/suites/stress/thrash/workloads/.qa b/qa/suites/stress/thrash/workloads/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/stress/thrash/workloads/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/stress/thrash/workloads/bonnie_cfuse.yaml b/qa/suites/stress/thrash/workloads/bonnie_cfuse.yaml new file mode 100644 index 00000000..912f12d6 --- /dev/null +++ b/qa/suites/stress/thrash/workloads/bonnie_cfuse.yaml @@ -0,0 +1,6 @@ +tasks: +- ceph-fuse: +- workunit: + clients: + all: + - suites/bonnie.sh diff --git a/qa/suites/stress/thrash/workloads/iozone_cfuse.yaml b/qa/suites/stress/thrash/workloads/iozone_cfuse.yaml new file mode 100644 index 00000000..18a6051b --- /dev/null +++ b/qa/suites/stress/thrash/workloads/iozone_cfuse.yaml @@ -0,0 +1,6 @@ +tasks: +- ceph-fuse: +- workunit: + clients: + all: + - suites/iozone.sh diff --git a/qa/suites/stress/thrash/workloads/radosbench.yaml b/qa/suites/stress/thrash/workloads/radosbench.yaml new file mode 100644 index 00000000..3940870f --- /dev/null +++ b/qa/suites/stress/thrash/workloads/radosbench.yaml @@ -0,0 +1,4 @@ +tasks: +- radosbench: + clients: [client.0] + time: 1800 diff --git a/qa/suites/stress/thrash/workloads/readwrite.yaml b/qa/suites/stress/thrash/workloads/readwrite.yaml new file mode 100644 index 00000000..c53e52b0 --- /dev/null +++ b/qa/suites/stress/thrash/workloads/readwrite.yaml @@ -0,0 +1,9 @@ +tasks: +- rados: + clients: [client.0] + ops: 4000 + objects: 500 + op_weights: + read: 45 + write: 45 + delete: 10 diff --git a/qa/suites/teuthology/.qa b/qa/suites/teuthology/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/teuthology/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/teuthology/buildpackages/% b/qa/suites/teuthology/buildpackages/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/teuthology/buildpackages/.qa b/qa/suites/teuthology/buildpackages/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/teuthology/buildpackages/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/teuthology/buildpackages/supported-all-distro b/qa/suites/teuthology/buildpackages/supported-all-distro new file mode 120000 index 00000000..ca82dde5 --- /dev/null +++ b/qa/suites/teuthology/buildpackages/supported-all-distro @@ -0,0 +1 @@ +.qa/distros/supported-all-distro \ No newline at end of file diff --git a/qa/suites/teuthology/buildpackages/tasks/.qa b/qa/suites/teuthology/buildpackages/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/teuthology/buildpackages/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/teuthology/buildpackages/tasks/branch.yaml b/qa/suites/teuthology/buildpackages/tasks/branch.yaml new file mode 100644 index 00000000..1dad96f3 --- /dev/null +++ b/qa/suites/teuthology/buildpackages/tasks/branch.yaml @@ -0,0 +1,10 @@ +roles: + - [mon.a, mgr.x, client.0] +tasks: + - install: + # branch has precedence over sha1 + branch: hammer + sha1: e5b6eea91cc37434f78a987d2dd1d3edd4a23f3f # dumpling + - exec: + client.0: + - ceph --version | grep 'version 0.94' diff --git a/qa/suites/teuthology/buildpackages/tasks/default.yaml b/qa/suites/teuthology/buildpackages/tasks/default.yaml new file mode 100644 index 00000000..cb583c76 --- /dev/null +++ b/qa/suites/teuthology/buildpackages/tasks/default.yaml @@ -0,0 +1,14 @@ +roles: + - [client.0] +tasks: + - install: + tag: v0.94.1 + - exec: + client.0: + - ceph --version | grep 'version 0.94.1' + - install.upgrade: + client.0: + tag: v0.94.3 + - exec: + client.0: + - ceph --version | grep 'version 0.94.3' diff --git a/qa/suites/teuthology/buildpackages/tasks/tag.yaml b/qa/suites/teuthology/buildpackages/tasks/tag.yaml new file mode 100644 index 00000000..2bfb8a99 --- /dev/null +++ b/qa/suites/teuthology/buildpackages/tasks/tag.yaml @@ -0,0 +1,11 @@ +roles: + - [mon.a, mgr.x, client.0] +tasks: + - install: + # tag has precedence over branch and sha1 + tag: v0.94.1 + branch: firefly + sha1: e5b6eea91cc37434f78a987d2dd1d3edd4a23f3f # dumpling + - exec: + client.0: + - ceph --version | grep 'version 0.94.1' diff --git a/qa/suites/teuthology/ceph/% b/qa/suites/teuthology/ceph/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/teuthology/ceph/.qa b/qa/suites/teuthology/ceph/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/teuthology/ceph/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/teuthology/ceph/clusters/.qa b/qa/suites/teuthology/ceph/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/teuthology/ceph/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/teuthology/ceph/clusters/single.yaml b/qa/suites/teuthology/ceph/clusters/single.yaml new file mode 100644 index 00000000..0c6a40d0 --- /dev/null +++ b/qa/suites/teuthology/ceph/clusters/single.yaml @@ -0,0 +1,2 @@ +roles: + - [mon.a, mgr.x, client.0] diff --git a/qa/suites/teuthology/ceph/distros b/qa/suites/teuthology/ceph/distros new file mode 120000 index 00000000..23d9e9be --- /dev/null +++ b/qa/suites/teuthology/ceph/distros @@ -0,0 +1 @@ +.qa/distros/supported \ No newline at end of file diff --git a/qa/suites/teuthology/ceph/tasks/.qa b/qa/suites/teuthology/ceph/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/teuthology/ceph/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/teuthology/ceph/tasks/teuthology.yaml b/qa/suites/teuthology/ceph/tasks/teuthology.yaml new file mode 100644 index 00000000..00081c8a --- /dev/null +++ b/qa/suites/teuthology/ceph/tasks/teuthology.yaml @@ -0,0 +1,3 @@ +tasks: + - install: + - tests: diff --git a/qa/suites/teuthology/integration.yaml b/qa/suites/teuthology/integration.yaml new file mode 100644 index 00000000..8a7f1c77 --- /dev/null +++ b/qa/suites/teuthology/integration.yaml @@ -0,0 +1,2 @@ +tasks: +- teuthology_integration: diff --git a/qa/suites/teuthology/multi-cluster/% b/qa/suites/teuthology/multi-cluster/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/teuthology/multi-cluster/.qa b/qa/suites/teuthology/multi-cluster/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/teuthology/multi-cluster/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/teuthology/multi-cluster/all/.qa b/qa/suites/teuthology/multi-cluster/all/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/teuthology/multi-cluster/all/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/teuthology/multi-cluster/all/ceph.yaml b/qa/suites/teuthology/multi-cluster/all/ceph.yaml new file mode 100644 index 00000000..4659ef3d --- /dev/null +++ b/qa/suites/teuthology/multi-cluster/all/ceph.yaml @@ -0,0 +1,25 @@ +roles: +- - ceph.mon.a + - ceph.mon.b + - ceph.mgr.x + - backup.osd.0 + - backup.osd.1 + - backup.osd.2 + - backup.client.0 +- - backup.mon.a + - backup.mgr.x + - ceph.osd.0 + - ceph.osd.1 + - ceph.osd.2 + - ceph.client.0 + - client.1 + - osd.3 +tasks: +- install: +- ceph: + cluster: backup +- ceph: +- workunit: + clients: + ceph.client.0: [true.sh] + backup.client.0: [true.sh] diff --git a/qa/suites/teuthology/multi-cluster/all/thrashosds.yaml b/qa/suites/teuthology/multi-cluster/all/thrashosds.yaml new file mode 100644 index 00000000..52002f57 --- /dev/null +++ b/qa/suites/teuthology/multi-cluster/all/thrashosds.yaml @@ -0,0 +1,21 @@ +roles: +- - backup.mon.a + - backup.mon.b + - backup.mgr.x + - backup.osd.0 + - backup.osd.1 + - backup.osd.2 +- - backup.mon.c + - backup.osd.3 + - backup.osd.4 + - backup.osd.5 + - backup.client.0 +tasks: +- install: +- ceph: + cluster: backup +- thrashosds: + cluster: backup +- workunit: + clients: + all: [true.sh] diff --git a/qa/suites/teuthology/multi-cluster/all/upgrade.yaml b/qa/suites/teuthology/multi-cluster/all/upgrade.yaml new file mode 100644 index 00000000..42cd93b2 --- /dev/null +++ b/qa/suites/teuthology/multi-cluster/all/upgrade.yaml @@ -0,0 +1,51 @@ +overrides: + ceph: + log-whitelist: + - failed to encode map + conf: + mon: + mon warn on legacy crush tunables: false +roles: +- - ceph.mon.a + - ceph.mon.b + - ceph.mgr.x + - backup.osd.0 + - backup.osd.1 + - backup.osd.2 + - backup.client.0 +- - backup.mon.a + - backup.mgr.x + - ceph.osd.0 + - ceph.osd.1 + - ceph.osd.2 + - ceph.client.0 + - client.1 + - osd.3 +tasks: +- install: + branch: infernalis +- ceph: + cluster: backup +- ceph: +- workunit: + clients: + backup.client.0: [true.sh] + ceph.client.0: [true.sh] +- install.upgrade: + ceph.mon.a: + branch: jewel + backup.mon.a: + branch: jewel +- ceph.restart: [ceph.mon.a, ceph.mon.b, ceph.osd.0, ceph.osd.1, ceph.osd.2, osd.3] +- exec: + ceph.client.0: + - ceph --version | grep -F 'version 10.' + client.1: + - ceph --cluster backup --version | grep -F 'version 10.' + backup.client.0: + # cli upgraded + - ceph --cluster backup --id 0 --version | grep -F 'version 10.' + - ceph --version | grep -F 'version 10.' + # backup cluster mon not upgraded + - ceph --cluster backup --id 0 tell mon.a version | grep -F 'version 9.2.' + - ceph tell mon.a version | grep -F 'version 10.' diff --git a/qa/suites/teuthology/multi-cluster/all/workunit.yaml b/qa/suites/teuthology/multi-cluster/all/workunit.yaml new file mode 100644 index 00000000..b1288e38 --- /dev/null +++ b/qa/suites/teuthology/multi-cluster/all/workunit.yaml @@ -0,0 +1,23 @@ +roles: +- - backup.mon.a + - backup.mgr.x + - osd.0 + - osd.1 + - osd.2 + - client.0 + - backup.client.0 +- - mon.a + - mgr.x + - backup.osd.0 + - backup.osd.1 + - backup.osd.2 + - client.1 + - backup.client.1 +tasks: +- install: +- workunit: + clients: + all: [true.sh] +- workunit: + clients: + backup.client.1: [true.sh] diff --git a/qa/suites/teuthology/no-ceph/% b/qa/suites/teuthology/no-ceph/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/teuthology/no-ceph/.qa b/qa/suites/teuthology/no-ceph/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/teuthology/no-ceph/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/teuthology/no-ceph/clusters/.qa b/qa/suites/teuthology/no-ceph/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/teuthology/no-ceph/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/teuthology/no-ceph/clusters/single.yaml b/qa/suites/teuthology/no-ceph/clusters/single.yaml new file mode 100644 index 00000000..0c6a40d0 --- /dev/null +++ b/qa/suites/teuthology/no-ceph/clusters/single.yaml @@ -0,0 +1,2 @@ +roles: + - [mon.a, mgr.x, client.0] diff --git a/qa/suites/teuthology/no-ceph/tasks/.qa b/qa/suites/teuthology/no-ceph/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/teuthology/no-ceph/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/teuthology/no-ceph/tasks/teuthology.yaml b/qa/suites/teuthology/no-ceph/tasks/teuthology.yaml new file mode 100644 index 00000000..1391458b --- /dev/null +++ b/qa/suites/teuthology/no-ceph/tasks/teuthology.yaml @@ -0,0 +1,2 @@ +tasks: + - tests: diff --git a/qa/suites/teuthology/nop/% b/qa/suites/teuthology/nop/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/teuthology/nop/.qa b/qa/suites/teuthology/nop/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/teuthology/nop/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/teuthology/nop/all/.qa b/qa/suites/teuthology/nop/all/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/teuthology/nop/all/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/teuthology/nop/all/nop.yaml b/qa/suites/teuthology/nop/all/nop.yaml new file mode 100644 index 00000000..4a5b227e --- /dev/null +++ b/qa/suites/teuthology/nop/all/nop.yaml @@ -0,0 +1,3 @@ +tasks: + - nop: + diff --git a/qa/suites/teuthology/rgw/% b/qa/suites/teuthology/rgw/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/teuthology/rgw/.qa b/qa/suites/teuthology/rgw/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/teuthology/rgw/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/teuthology/rgw/distros b/qa/suites/teuthology/rgw/distros new file mode 120000 index 00000000..23d9e9be --- /dev/null +++ b/qa/suites/teuthology/rgw/distros @@ -0,0 +1 @@ +.qa/distros/supported \ No newline at end of file diff --git a/qa/suites/teuthology/rgw/tasks/.qa b/qa/suites/teuthology/rgw/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/teuthology/rgw/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/teuthology/rgw/tasks/s3tests-civetweb.yaml b/qa/suites/teuthology/rgw/tasks/s3tests-civetweb.yaml new file mode 100644 index 00000000..00a82a70 --- /dev/null +++ b/qa/suites/teuthology/rgw/tasks/s3tests-civetweb.yaml @@ -0,0 +1,24 @@ +# this runs s3tests against rgw, using civetweb +roles: +- [mon.a, mon.c, osd.0, osd.1, osd.2, client.0] +- [mon.b, mgr.x, osd.3, osd.4, osd.5, client.1] + +tasks: +- install: + branch: master +- ceph: +- rgw: [client.0] +- s3tests: + client.0: + rgw_server: client.0 + force-branch: ceph-nautilus +overrides: + ceph: + fs: xfs + conf: + client: + debug rgw: 20 + rgw lc debug interval: 10 + rgw: + ec-data-pool: false + frontend: civetweb diff --git a/qa/suites/teuthology/rgw/tasks/s3tests-fastcgi.yaml b/qa/suites/teuthology/rgw/tasks/s3tests-fastcgi.yaml new file mode 100644 index 00000000..8e923d00 --- /dev/null +++ b/qa/suites/teuthology/rgw/tasks/s3tests-fastcgi.yaml @@ -0,0 +1,24 @@ +# this runs s3tests against rgw, using mod_fastcgi +roles: +- [mon.a, mon.c, osd.0, osd.1, osd.2, client.0] +- [mon.b, mgr.x, osd.3, osd.4, osd.5, client.1] + +tasks: +- install: + branch: master +- ceph: +- rgw: [client.0] +- s3tests: + client.0: + rgw_server: client.0 + force-branch: ceph-nautilus +overrides: + ceph: + fs: xfs + conf: + client: + debug rgw: 20 + rgw lc debug interval: 10 + rgw: + ec-data-pool: false + frontend: apache diff --git a/qa/suites/teuthology/rgw/tasks/s3tests-fcgi.yaml b/qa/suites/teuthology/rgw/tasks/s3tests-fcgi.yaml new file mode 100644 index 00000000..d03248c2 --- /dev/null +++ b/qa/suites/teuthology/rgw/tasks/s3tests-fcgi.yaml @@ -0,0 +1,26 @@ +# this runs s3tests against rgw, using mod_proxy_fcgi +# the choice between uds or tcp with mod_proxy_fcgi depends on the distro +roles: +- [mon.a, mon.c, osd.0, osd.1, osd.2, client.0] +- [mon.b, mgr.x, osd.3, osd.4, osd.5, client.1] + +tasks: +- install: + branch: master +- ceph: +- rgw: [client.0] +- s3tests: + client.0: + rgw_server: client.0 + force-branch: ceph-nautilus +overrides: + ceph: + fs: xfs + conf: + client: + debug rgw: 20 + rgw lc debug interval: 10 + rgw: + ec-data-pool: false + frontend: apache + use_fcgi: true diff --git a/qa/suites/teuthology/workunits/.qa b/qa/suites/teuthology/workunits/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/teuthology/workunits/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/teuthology/workunits/yes.yaml b/qa/suites/teuthology/workunits/yes.yaml new file mode 100644 index 00000000..45098dbb --- /dev/null +++ b/qa/suites/teuthology/workunits/yes.yaml @@ -0,0 +1,8 @@ +roles: + - [client.0] +tasks: +- install: +- workunit: + clients: + all: + - true.sh diff --git a/qa/suites/tgt/.qa b/qa/suites/tgt/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/tgt/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/tgt/basic/% b/qa/suites/tgt/basic/% new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/qa/suites/tgt/basic/% @@ -0,0 +1 @@ + diff --git a/qa/suites/tgt/basic/.qa b/qa/suites/tgt/basic/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/tgt/basic/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/tgt/basic/clusters/.qa b/qa/suites/tgt/basic/clusters/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/tgt/basic/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/tgt/basic/clusters/fixed-3.yaml b/qa/suites/tgt/basic/clusters/fixed-3.yaml new file mode 100644 index 00000000..5e23c9e4 --- /dev/null +++ b/qa/suites/tgt/basic/clusters/fixed-3.yaml @@ -0,0 +1,4 @@ +roles: +- [mon.a, mon.c, osd.0, osd.1, osd.2] +- [mon.b, mgr.x, mds.a, osd.3, osd.4, osd.5] +- [client.0] diff --git a/qa/suites/tgt/basic/msgr-failures/.qa b/qa/suites/tgt/basic/msgr-failures/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/tgt/basic/msgr-failures/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/tgt/basic/msgr-failures/few.yaml b/qa/suites/tgt/basic/msgr-failures/few.yaml new file mode 100644 index 00000000..4326fe23 --- /dev/null +++ b/qa/suites/tgt/basic/msgr-failures/few.yaml @@ -0,0 +1,7 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 5000 + log-whitelist: + - \(OSD_SLOW_PING_TIME diff --git a/qa/suites/tgt/basic/msgr-failures/many.yaml b/qa/suites/tgt/basic/msgr-failures/many.yaml new file mode 100644 index 00000000..4caedaeb --- /dev/null +++ b/qa/suites/tgt/basic/msgr-failures/many.yaml @@ -0,0 +1,7 @@ +overrides: + ceph: + conf: + global: + ms inject socket failures: 500 + log-whitelist: + - \(OSD_SLOW_PING_TIME diff --git a/qa/suites/tgt/basic/tasks/.qa b/qa/suites/tgt/basic/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/tgt/basic/tasks/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/tgt/basic/tasks/blogbench.yaml b/qa/suites/tgt/basic/tasks/blogbench.yaml new file mode 100644 index 00000000..f77a78b6 --- /dev/null +++ b/qa/suites/tgt/basic/tasks/blogbench.yaml @@ -0,0 +1,9 @@ +tasks: +- install: +- ceph: +- tgt: +- iscsi: +- workunit: + clients: + all: + - suites/blogbench.sh diff --git a/qa/suites/tgt/basic/tasks/bonnie.yaml b/qa/suites/tgt/basic/tasks/bonnie.yaml new file mode 100644 index 00000000..2cbfcf88 --- /dev/null +++ b/qa/suites/tgt/basic/tasks/bonnie.yaml @@ -0,0 +1,9 @@ +tasks: +- install: +- ceph: +- tgt: +- iscsi: +- workunit: + clients: + all: + - suites/bonnie.sh diff --git a/qa/suites/tgt/basic/tasks/dbench-short.yaml b/qa/suites/tgt/basic/tasks/dbench-short.yaml new file mode 100644 index 00000000..fcb721a4 --- /dev/null +++ b/qa/suites/tgt/basic/tasks/dbench-short.yaml @@ -0,0 +1,9 @@ +tasks: +- install: +- ceph: +- tgt: +- iscsi: +- workunit: + clients: + all: + - suites/dbench-short.sh diff --git a/qa/suites/tgt/basic/tasks/dbench.yaml b/qa/suites/tgt/basic/tasks/dbench.yaml new file mode 100644 index 00000000..7f732175 --- /dev/null +++ b/qa/suites/tgt/basic/tasks/dbench.yaml @@ -0,0 +1,9 @@ +tasks: +- install: +- ceph: +- tgt: +- iscsi: +- workunit: + clients: + all: + - suites/dbench.sh diff --git a/qa/suites/tgt/basic/tasks/ffsb.yaml b/qa/suites/tgt/basic/tasks/ffsb.yaml new file mode 100644 index 00000000..f50a3a19 --- /dev/null +++ b/qa/suites/tgt/basic/tasks/ffsb.yaml @@ -0,0 +1,9 @@ +tasks: +- install: +- ceph: +- tgt: +- iscsi: +- workunit: + clients: + all: + - suites/ffsb.sh diff --git a/qa/suites/tgt/basic/tasks/fio.yaml b/qa/suites/tgt/basic/tasks/fio.yaml new file mode 100644 index 00000000..e7346ce5 --- /dev/null +++ b/qa/suites/tgt/basic/tasks/fio.yaml @@ -0,0 +1,9 @@ +tasks: +- install: +- ceph: +- tgt: +- iscsi: +- workunit: + clients: + all: + - suites/fio.sh diff --git a/qa/suites/tgt/basic/tasks/fsstress.yaml b/qa/suites/tgt/basic/tasks/fsstress.yaml new file mode 100644 index 00000000..c77f511c --- /dev/null +++ b/qa/suites/tgt/basic/tasks/fsstress.yaml @@ -0,0 +1,9 @@ +tasks: +- install: +- ceph: +- tgt: +- iscsi: +- workunit: + clients: + all: + - suites/fsstress.sh diff --git a/qa/suites/tgt/basic/tasks/fsx.yaml b/qa/suites/tgt/basic/tasks/fsx.yaml new file mode 100644 index 00000000..04732c84 --- /dev/null +++ b/qa/suites/tgt/basic/tasks/fsx.yaml @@ -0,0 +1,9 @@ +tasks: +- install: +- ceph: +- tgt: +- iscsi: +- workunit: + clients: + all: + - suites/fsx.sh diff --git a/qa/suites/tgt/basic/tasks/fsync-tester.yaml b/qa/suites/tgt/basic/tasks/fsync-tester.yaml new file mode 100644 index 00000000..ea627b7d --- /dev/null +++ b/qa/suites/tgt/basic/tasks/fsync-tester.yaml @@ -0,0 +1,9 @@ +tasks: +- install: +- ceph: +- tgt: +- iscsi: +- workunit: + clients: + all: + - suites/fsync-tester.sh diff --git a/qa/suites/tgt/basic/tasks/iogen.yaml b/qa/suites/tgt/basic/tasks/iogen.yaml new file mode 100644 index 00000000..1065c74d --- /dev/null +++ b/qa/suites/tgt/basic/tasks/iogen.yaml @@ -0,0 +1,9 @@ +tasks: +- install: +- ceph: +- tgt: +- iscsi: +- workunit: + clients: + all: + - suites/iogen.sh diff --git a/qa/suites/tgt/basic/tasks/iozone-sync.yaml b/qa/suites/tgt/basic/tasks/iozone-sync.yaml new file mode 100644 index 00000000..ac241a41 --- /dev/null +++ b/qa/suites/tgt/basic/tasks/iozone-sync.yaml @@ -0,0 +1,9 @@ +tasks: +- install: +- ceph: +- tgt: +- iscsi: +- workunit: + clients: + all: + - suites/iozone-sync.sh diff --git a/qa/suites/tgt/basic/tasks/iozone.yaml b/qa/suites/tgt/basic/tasks/iozone.yaml new file mode 100644 index 00000000..cf5604c2 --- /dev/null +++ b/qa/suites/tgt/basic/tasks/iozone.yaml @@ -0,0 +1,9 @@ +tasks: +- install: +- ceph: +- tgt: +- iscsi: +- workunit: + clients: + all: + - suites/iozone.sh diff --git a/qa/suites/tgt/basic/tasks/pjd.yaml b/qa/suites/tgt/basic/tasks/pjd.yaml new file mode 100644 index 00000000..ba5c631f --- /dev/null +++ b/qa/suites/tgt/basic/tasks/pjd.yaml @@ -0,0 +1,9 @@ +tasks: +- install: +- ceph: +- tgt: +- iscsi: +- workunit: + clients: + all: + - suites/pjd.sh diff --git a/qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/.qa b/qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/% b/qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/.qa b/qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/0-cluster/+ b/qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/0-cluster/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/0-cluster/.qa b/qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/0-cluster/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/0-cluster/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/0-cluster/openstack.yaml b/qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/0-cluster/openstack.yaml new file mode 100644 index 00000000..b0f3b9b4 --- /dev/null +++ b/qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/0-cluster/openstack.yaml @@ -0,0 +1,4 @@ +openstack: + - volumes: # attached to each instance + count: 4 + size: 30 # GB diff --git a/qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/0-cluster/start.yaml b/qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/0-cluster/start.yaml new file mode 100644 index 00000000..93f3c471 --- /dev/null +++ b/qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/0-cluster/start.yaml @@ -0,0 +1,21 @@ +meta: +- desc: | + Insatll and run ceph on one node, + with a separate client 1. + Upgrade client 1 to nautilus + Run tests against old cluster +roles: +- - mon.a + - mon.b + - mon.c + - osd.0 + - osd.1 + - osd.2 + - client.0 + - mgr.x +- - client.1 +overrides: + ceph: + log-whitelist: + - failed to encode map + fs: xfs diff --git a/qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/1-install/.qa b/qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/1-install/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/1-install/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/1-install/nautilus-client-x.yaml b/qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/1-install/nautilus-client-x.yaml new file mode 100644 index 00000000..36d6c19b --- /dev/null +++ b/qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/1-install/nautilus-client-x.yaml @@ -0,0 +1,11 @@ +tasks: +- install: + branch: nautilus + exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev','python34-cephfs','python34-rados'] +- print: "**** done install nautilus" +- install.upgrade: + exclude_packages: ['ceph-test', 'ceph-test-dbg','libcephfs1', 'python-ceph'] + client.1: +- print: "**** done install.upgrade to -x on client.0" +- ceph: +- print: "**** done ceph task" diff --git a/qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/2-features/.qa b/qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/2-features/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/2-features/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/2-features/defaults.yaml b/qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/2-features/defaults.yaml new file mode 100644 index 00000000..dff6623a --- /dev/null +++ b/qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/2-features/defaults.yaml @@ -0,0 +1,6 @@ +overrides: + ceph: + conf: + client: + rbd default features: 61 + diff --git a/qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/2-features/layering.yaml b/qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/2-features/layering.yaml new file mode 100644 index 00000000..5613d015 --- /dev/null +++ b/qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/2-features/layering.yaml @@ -0,0 +1,6 @@ +overrides: + ceph: + conf: + client: + rbd default features: 1 + diff --git a/qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/3-workload/.qa b/qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/3-workload/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/3-workload/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/3-workload/rbd_notification_tests.yaml b/qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/3-workload/rbd_notification_tests.yaml new file mode 100644 index 00000000..68b4f51d --- /dev/null +++ b/qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/3-workload/rbd_notification_tests.yaml @@ -0,0 +1,33 @@ +tasks: +- parallel: + - workunit: + branch: nautilus + clients: + client.0: + - rbd/notify_master.sh + env: + RBD_FEATURES: "61" + - workunit: + branch: octopus + clients: + client.1: + - rbd/notify_slave.sh + env: + RBD_FEATURES: "61" +- print: "**** done rbd: old librbd -> new librbd" +- parallel: + - workunit: + branch: nautilus + clients: + client.0: + - rbd/notify_slave.sh + env: + RBD_FEATURES: "61" + - workunit: + branch: octopus + clients: + client.1: + - rbd/notify_master.sh + env: + RBD_FEATURES: "61" +- print: "**** done rbd: new librbd -> old librbd" diff --git a/qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/supported/.qa b/qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/supported/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/supported/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/supported/ubuntu_18.04.yaml b/qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/supported/ubuntu_18.04.yaml new file mode 120000 index 00000000..886e87fa --- /dev/null +++ b/qa/suites/upgrade-clients/client-upgrade-nautilus-octopus/nautilus-client-x/rbd/supported/ubuntu_18.04.yaml @@ -0,0 +1 @@ +../../../../../../distros/all/ubuntu_18.04.yaml \ No newline at end of file diff --git a/qa/suites/upgrade-clients/client-upgrade-nautilus/.qa b/qa/suites/upgrade-clients/client-upgrade-nautilus/.qa new file mode 120000 index 00000000..a23f7e04 --- /dev/null +++ b/qa/suites/upgrade-clients/client-upgrade-nautilus/.qa @@ -0,0 +1 @@ +../../.qa \ No newline at end of file diff --git a/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/.qa b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/% b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/.qa b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/0-cluster/+ b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/0-cluster/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/0-cluster/.qa b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/0-cluster/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/0-cluster/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/0-cluster/openstack.yaml b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/0-cluster/openstack.yaml new file mode 100644 index 00000000..b0f3b9b4 --- /dev/null +++ b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/0-cluster/openstack.yaml @@ -0,0 +1,4 @@ +openstack: + - volumes: # attached to each instance + count: 4 + size: 30 # GB diff --git a/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/0-cluster/start.yaml b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/0-cluster/start.yaml new file mode 100644 index 00000000..7774a2be --- /dev/null +++ b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/0-cluster/start.yaml @@ -0,0 +1,23 @@ +meta: +- desc: | + Insatll and run ceph on one node, + with a separate client 0. + Upgrade client 0 to nautilus + Run tests against old cluster +roles: +- - mon.a + - mon.b + - mon.c + - osd.0 + - osd.1 + - osd.2 + - mgr.x +- - client.0 +overrides: + ceph: + log-whitelist: + - failed to encode map + - CACHE_POOL_NO_HIT_SET + - POOL_APP_NOT_ENABLED + - application not enabled + fs: xfs diff --git a/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/1-install/.qa b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/1-install/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/1-install/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/1-install/nautilus-client-x.yaml b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/1-install/nautilus-client-x.yaml new file mode 100644 index 00000000..27a33e45 --- /dev/null +++ b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/1-install/nautilus-client-x.yaml @@ -0,0 +1,11 @@ +tasks: +- install: + branch: nautilus + exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev','python34-cephfs','python34-rados'] +- print: "**** done install nautilus" +upgrade_workload: + sequential: + - install.upgrade: + exclude_packages: ['ceph-test', 'ceph-test-dbg','libcephfs1'] + client.0: + - print: "**** done install.upgrade to -x on client.0" diff --git a/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/2-workload/.qa b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/2-workload/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/2-workload/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/2-workload/devstack-tempest-gate.yaml b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/2-workload/devstack-tempest-gate.yaml new file mode 100644 index 00000000..cb9ce29c --- /dev/null +++ b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/2-workload/devstack-tempest-gate.yaml @@ -0,0 +1,56 @@ +tasks: +- sequential: + - upgrade_workload +- ceph: +- print: "**** done ceph" +- qemu: + client.0: + type: filesystem + cpus: 4 + memory: 12288 + disks: + - image_size: 30720 + - image_size: 30720 + test: qa/workunits/rbd/run_devstack_tempest.sh + image_url: https://cloud-images.ubuntu.com/releases/16.04/release/ubuntu-16.04-server-cloudimg-amd64-disk1.img + cloud_config_archive: + - type: text/cloud-config + content: | + users: + - name: stack + lock_passwd: False + shell: /bin/bash + sudo: ["ALL=(root) NOPASSWD:ALL\nDefaults:stack,tempest !requiretty"] + - name: tempest + lock_passwd: False + shell: /bin/bash + sudo: + - "ALL=(root) NOPASSWD:/sbin/ip" + - "ALL=(root) NOPASSWD:/sbin/iptables" + - "ALL=(root) NOPASSWD:/usr/bin/ovsdb-client" + - | + #!/bin/bash -ex + wget -q -O- "http://git.ceph.com/?p=ceph.git;a=blob_plain;f=keys/autobuild.asc" | apt-key add - + wget -q -O /etc/apt/sources.list.d/ceph.list "https://shaman.ceph.com/api/repos/ceph/{ceph_branch}/{ceph_sha1}/ubuntu/xenial/repo" + apt-get update + + mount --bind /mnt/test_b /opt + mkdir /opt/stack + chown -R stack:stack /home/stack + chown -R stack:stack /opt/stack + + mkdir /mnt/log/stack + chmod a+rwx /mnt/log/stack + chown -R stack:stack /mnt/log/stack + + apt-get install -y ceph-common librbd1 + + mkdir /mnt/log/stack/ceph + chown -R stack:stack /mnt/log/stack/ceph + chmod a+rwx /mnt/log/stack/ceph + + # sanity check that the cluster is reachable from the VM + echo '[client]' >> /etc/ceph/ceph.conf + echo 'log file = /mnt/log/stack/ceph/$name.$pid.log' >> /etc/ceph/ceph.conf + rbd --debug-ms=10 --debug-rbd=20 info client.0.1 +- print: "**** done qemu task!" diff --git a/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/supported/.qa b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/supported/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/supported/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/supported/centos_7.6.yaml b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/supported/centos_7.6.yaml new file mode 120000 index 00000000..c336cfc6 --- /dev/null +++ b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/supported/centos_7.6.yaml @@ -0,0 +1 @@ +../../../../../../distros/all/centos_7.6.yaml \ No newline at end of file diff --git a/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/supported/rhel_7.6.yaml b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/supported/rhel_7.6.yaml new file mode 120000 index 00000000..4fd43cc0 --- /dev/null +++ b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/supported/rhel_7.6.yaml @@ -0,0 +1 @@ +../../../../../../distros/all/rhel_7.6.yaml \ No newline at end of file diff --git a/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/supported/ubuntu_16.04.yaml b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/supported/ubuntu_16.04.yaml new file mode 120000 index 00000000..9dc1ea99 --- /dev/null +++ b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/supported/ubuntu_16.04.yaml @@ -0,0 +1 @@ +../../../../../../distros/all/ubuntu_16.04.yaml \ No newline at end of file diff --git a/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/supported/ubuntu_18.04.yaml b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/supported/ubuntu_18.04.yaml new file mode 120000 index 00000000..886e87fa --- /dev/null +++ b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/basic/supported/ubuntu_18.04.yaml @@ -0,0 +1 @@ +../../../../../../distros/all/ubuntu_18.04.yaml \ No newline at end of file diff --git a/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/% b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/.qa b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/0-cluster/+ b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/0-cluster/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/0-cluster/.qa b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/0-cluster/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/0-cluster/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/0-cluster/openstack.yaml b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/0-cluster/openstack.yaml new file mode 100644 index 00000000..b0f3b9b4 --- /dev/null +++ b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/0-cluster/openstack.yaml @@ -0,0 +1,4 @@ +openstack: + - volumes: # attached to each instance + count: 4 + size: 30 # GB diff --git a/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/0-cluster/start.yaml b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/0-cluster/start.yaml new file mode 100644 index 00000000..93f3c471 --- /dev/null +++ b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/0-cluster/start.yaml @@ -0,0 +1,21 @@ +meta: +- desc: | + Insatll and run ceph on one node, + with a separate client 1. + Upgrade client 1 to nautilus + Run tests against old cluster +roles: +- - mon.a + - mon.b + - mon.c + - osd.0 + - osd.1 + - osd.2 + - client.0 + - mgr.x +- - client.1 +overrides: + ceph: + log-whitelist: + - failed to encode map + fs: xfs diff --git a/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/1-install/.qa b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/1-install/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/1-install/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/1-install/nautilus-client-x.yaml b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/1-install/nautilus-client-x.yaml new file mode 100644 index 00000000..1fa905e8 --- /dev/null +++ b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/1-install/nautilus-client-x.yaml @@ -0,0 +1,11 @@ +tasks: +- install: + branch: nautilus + exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev','python34-cephfs','python34-rados'] +- print: "**** done install nautilus" +- install.upgrade: + exclude_packages: ['ceph-test', 'ceph-test-dbg','libcephfs1'] + client.1: +- print: "**** done install.upgrade to -x on client.0" +- ceph: +- print: "**** done ceph task" diff --git a/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/2-features/.qa b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/2-features/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/2-features/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/2-features/defaults.yaml b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/2-features/defaults.yaml new file mode 100644 index 00000000..dff6623a --- /dev/null +++ b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/2-features/defaults.yaml @@ -0,0 +1,6 @@ +overrides: + ceph: + conf: + client: + rbd default features: 61 + diff --git a/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/2-features/layering.yaml b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/2-features/layering.yaml new file mode 100644 index 00000000..5613d015 --- /dev/null +++ b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/2-features/layering.yaml @@ -0,0 +1,6 @@ +overrides: + ceph: + conf: + client: + rbd default features: 1 + diff --git a/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/3-workload/.qa b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/3-workload/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/3-workload/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/3-workload/rbd_notification_tests.yaml b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/3-workload/rbd_notification_tests.yaml new file mode 100644 index 00000000..10c89021 --- /dev/null +++ b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/3-workload/rbd_notification_tests.yaml @@ -0,0 +1,21 @@ +tasks: +- workunit: + branch: nautilus + clients: + client.0: + - rbd/notify_master.sh + client.1: + - rbd/notify_slave.sh + env: + RBD_FEATURES: "61" +- print: "**** done rbd: old librbd -> new librbd" +- workunit: + branch: nautilus + clients: + client.0: + - rbd/notify_slave.sh + client.1: + - rbd/notify_master.sh + env: + RBD_FEATURES: "61" +- print: "**** done rbd: new librbd -> old librbd" diff --git a/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/supported/.qa b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/supported/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/supported/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/supported/centos_7.6.yaml b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/supported/centos_7.6.yaml new file mode 120000 index 00000000..c336cfc6 --- /dev/null +++ b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/supported/centos_7.6.yaml @@ -0,0 +1 @@ +../../../../../../distros/all/centos_7.6.yaml \ No newline at end of file diff --git a/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/supported/rhel_7.6.yaml b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/supported/rhel_7.6.yaml new file mode 120000 index 00000000..4fd43cc0 --- /dev/null +++ b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/supported/rhel_7.6.yaml @@ -0,0 +1 @@ +../../../../../../distros/all/rhel_7.6.yaml \ No newline at end of file diff --git a/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/supported/ubuntu_16.04.yaml b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/supported/ubuntu_16.04.yaml new file mode 120000 index 00000000..9dc1ea99 --- /dev/null +++ b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/supported/ubuntu_16.04.yaml @@ -0,0 +1 @@ +../../../../../../distros/all/ubuntu_16.04.yaml \ No newline at end of file diff --git a/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/supported/ubuntu_18.04.yaml b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/supported/ubuntu_18.04.yaml new file mode 120000 index 00000000..886e87fa --- /dev/null +++ b/qa/suites/upgrade-clients/client-upgrade-nautilus/nautilus-client-x/rbd/supported/ubuntu_18.04.yaml @@ -0,0 +1 @@ +../../../../../../distros/all/ubuntu_18.04.yaml \ No newline at end of file diff --git a/qa/suites/upgrade/.qa b/qa/suites/upgrade/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade/luminous-x/.qa b/qa/suites/upgrade/luminous-x/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade/luminous-x/parallel/% b/qa/suites/upgrade/luminous-x/parallel/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/upgrade/luminous-x/parallel/.qa b/qa/suites/upgrade/luminous-x/parallel/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/parallel/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade/luminous-x/parallel/0-cluster/+ b/qa/suites/upgrade/luminous-x/parallel/0-cluster/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/upgrade/luminous-x/parallel/0-cluster/.qa b/qa/suites/upgrade/luminous-x/parallel/0-cluster/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/parallel/0-cluster/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade/luminous-x/parallel/0-cluster/openstack.yaml b/qa/suites/upgrade/luminous-x/parallel/0-cluster/openstack.yaml new file mode 100644 index 00000000..f4d1349b --- /dev/null +++ b/qa/suites/upgrade/luminous-x/parallel/0-cluster/openstack.yaml @@ -0,0 +1,4 @@ +openstack: + - volumes: # attached to each instance + count: 3 + size: 30 # GB diff --git a/qa/suites/upgrade/luminous-x/parallel/0-cluster/start.yaml b/qa/suites/upgrade/luminous-x/parallel/0-cluster/start.yaml new file mode 100644 index 00000000..b86ddcdc --- /dev/null +++ b/qa/suites/upgrade/luminous-x/parallel/0-cluster/start.yaml @@ -0,0 +1,50 @@ +meta: +- desc: | + Run ceph on two nodes, + with a separate client 0,1,2 third node. + Use xfs beneath the osds. + CephFS tests running on client 2,3 +roles: +- - mon.a + - mgr.x + - mds.a + - osd.0 + - osd.1 + - osd.2 + - osd.3 +- - mon.b + - osd.4 + - osd.5 + - osd.6 + - osd.7 +- - mon.c + - osd.8 + - osd.9 + - osd.10 + - osd.11 +- - client.0 + - client.1 + - client.2 + - client.3 +overrides: + ceph: + mon_bind_msgr2: false + mon_bind_addrvec: false + log-whitelist: + - scrub mismatch + - ScrubResult + - wrongly marked + - \(POOL_APP_NOT_ENABLED\) + - \(SLOW_OPS\) + - overall HEALTH_ + - slow request + - \(MON_MSGR2_NOT_ENABLED\) + conf: + global: + enable experimental unrecoverable data corrupting features: "*" + mon: + mon warn on osd down out interval zero: false + osd: + osd class load list: "*" + osd class default list: "*" + fs: xfs diff --git a/qa/suites/upgrade/luminous-x/parallel/1-ceph-install/.qa b/qa/suites/upgrade/luminous-x/parallel/1-ceph-install/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/parallel/1-ceph-install/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade/luminous-x/parallel/1-ceph-install/luminous.yaml b/qa/suites/upgrade/luminous-x/parallel/1-ceph-install/luminous.yaml new file mode 100644 index 00000000..468d07c2 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/parallel/1-ceph-install/luminous.yaml @@ -0,0 +1,54 @@ +meta: +- desc: | + install ceph/luminous latest + run workload and upgrade-sequence in parallel + upgrade the client node +tasks: +- install: + branch: luminous + exclude_packages: + - librados3 + - ceph-mgr-dashboard + - ceph-mgr-diskprediction-local + - ceph-mgr-diskprediction-cloud + - ceph-mgr-rook + - ceph-mgr-ssh + extra_packages: ['librados2'] +- print: "**** done installing luminous" +- ceph: + log-whitelist: + - overall HEALTH_ + - \(FS_ + - \(MDS_ + - \(OSD_ + - \(MON_DOWN\) + - \(CACHE_POOL_ + - \(POOL_ + - \(MGR_DOWN\) + - \(PG_ + - \(SMALLER_PGP_NUM\) + - Monitor daemon marked osd + - Behind on trimming + - Manager daemon + conf: + global: + mon warn on pool no app: false + bluestore_warn_on_legacy_statfs: false + mon pg warn min per osd: 0 +- exec: + osd.0: + - ceph osd require-osd-release luminous + - ceph osd set-require-min-compat-client luminous +- print: "**** done ceph" +- install.upgrade: + mon.a: + mon.b: + mon.c: +- print: "**** done install.upgrade non-client hosts" +- parallel: + - workload + - upgrade-sequence +- print: "**** done parallel" +- install.upgrade: + client.0: +- print: "**** done install.upgrade on client.0" diff --git a/qa/suites/upgrade/luminous-x/parallel/1.1-pg-log-overrides/normal_pg_log.yaml b/qa/suites/upgrade/luminous-x/parallel/1.1-pg-log-overrides/normal_pg_log.yaml new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/parallel/1.1-pg-log-overrides/normal_pg_log.yaml @@ -0,0 +1 @@ + diff --git a/qa/suites/upgrade/luminous-x/parallel/1.1-pg-log-overrides/short_pg_log.yaml b/qa/suites/upgrade/luminous-x/parallel/1.1-pg-log-overrides/short_pg_log.yaml new file mode 100644 index 00000000..e31e37ba --- /dev/null +++ b/qa/suites/upgrade/luminous-x/parallel/1.1-pg-log-overrides/short_pg_log.yaml @@ -0,0 +1,6 @@ +overrides: + ceph: + conf: + osd: + osd min pg log entries: 1 + osd max pg log entries: 2 diff --git a/qa/suites/upgrade/luminous-x/parallel/2-workload/+ b/qa/suites/upgrade/luminous-x/parallel/2-workload/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/upgrade/luminous-x/parallel/2-workload/.qa b/qa/suites/upgrade/luminous-x/parallel/2-workload/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/parallel/2-workload/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade/luminous-x/parallel/2-workload/blogbench.yaml b/qa/suites/upgrade/luminous-x/parallel/2-workload/blogbench.yaml new file mode 100644 index 00000000..021fcc68 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/parallel/2-workload/blogbench.yaml @@ -0,0 +1,14 @@ +meta: +- desc: | + run a cephfs stress test + mount ceph-fuse on client.2 before running workunit +workload: + full_sequential: + - sequential: + - ceph-fuse: + - print: "**** done ceph-fuse 2-workload" + - workunit: + clients: + client.2: + - suites/blogbench.sh + - print: "**** done suites/blogbench.sh 2-workload" diff --git a/qa/suites/upgrade/luminous-x/parallel/2-workload/ec-rados-default.yaml b/qa/suites/upgrade/luminous-x/parallel/2-workload/ec-rados-default.yaml new file mode 100644 index 00000000..5c5a9588 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/parallel/2-workload/ec-rados-default.yaml @@ -0,0 +1,24 @@ +meta: +- desc: | + run run randomized correctness test for rados operations + on an erasure-coded pool +workload: + full_sequential: + - rados: + clients: [client.0] + ops: 4000 + objects: 50 + ec_pool: true + write_append_excl: false + op_weights: + read: 100 + write: 0 + append: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 + setattr: 25 + rmattr: 25 + - print: "**** done rados ec task" diff --git a/qa/suites/upgrade/luminous-x/parallel/2-workload/rados_api.yaml b/qa/suites/upgrade/luminous-x/parallel/2-workload/rados_api.yaml new file mode 100644 index 00000000..e4cc9f96 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/parallel/2-workload/rados_api.yaml @@ -0,0 +1,11 @@ +meta: +- desc: | + object class functional tests +workload: + full_sequential: + - workunit: + branch: luminous + clients: + client.0: + - cls + - print: "**** done cls 2-workload" diff --git a/qa/suites/upgrade/luminous-x/parallel/2-workload/rados_loadgenbig.yaml b/qa/suites/upgrade/luminous-x/parallel/2-workload/rados_loadgenbig.yaml new file mode 100644 index 00000000..874a8c5e --- /dev/null +++ b/qa/suites/upgrade/luminous-x/parallel/2-workload/rados_loadgenbig.yaml @@ -0,0 +1,11 @@ +meta: +- desc: | + generate read/write load with rados objects ranging from 1MB to 25MB +workload: + full_sequential: + - workunit: + branch: luminous + clients: + client.0: + - rados/load-gen-big.sh + - print: "**** done rados/load-gen-big.sh 2-workload" diff --git a/qa/suites/upgrade/luminous-x/parallel/2-workload/rgw_ragweed_prepare.yaml b/qa/suites/upgrade/luminous-x/parallel/2-workload/rgw_ragweed_prepare.yaml new file mode 100644 index 00000000..b95711fe --- /dev/null +++ b/qa/suites/upgrade/luminous-x/parallel/2-workload/rgw_ragweed_prepare.yaml @@ -0,0 +1,14 @@ +meta: +- desc: | + rgw ragweed prepare +workload: + full_sequential: + - sequential: + - rgw: + - client.1 + - ragweed: + client.1: + default-branch: ceph-nautilus + rgw_server: client.1 + stages: prepare + - print: "**** done rgw ragweed prepare 2-workload" diff --git a/qa/suites/upgrade/luminous-x/parallel/2-workload/test_rbd_api.yaml b/qa/suites/upgrade/luminous-x/parallel/2-workload/test_rbd_api.yaml new file mode 100644 index 00000000..81563c90 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/parallel/2-workload/test_rbd_api.yaml @@ -0,0 +1,11 @@ +meta: +- desc: | + librbd C and C++ api tests +workload: + full_sequential: + - workunit: + branch: luminous + clients: + client.0: + - rbd/test_librbd.sh + - print: "**** done rbd/test_librbd.sh 2-workload" diff --git a/qa/suites/upgrade/luminous-x/parallel/2-workload/test_rbd_python.yaml b/qa/suites/upgrade/luminous-x/parallel/2-workload/test_rbd_python.yaml new file mode 100644 index 00000000..e17207d2 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/parallel/2-workload/test_rbd_python.yaml @@ -0,0 +1,11 @@ +meta: +- desc: | + librbd python api tests +workload: + full_sequential: + - workunit: + branch: luminous + clients: + client.0: + - rbd/test_librbd_python.sh + - print: "**** done rbd/test_librbd_python.sh 2-workload" diff --git a/qa/suites/upgrade/luminous-x/parallel/3-upgrade-sequence/.qa b/qa/suites/upgrade/luminous-x/parallel/3-upgrade-sequence/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/parallel/3-upgrade-sequence/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade/luminous-x/parallel/3-upgrade-sequence/upgrade-all.yaml b/qa/suites/upgrade/luminous-x/parallel/3-upgrade-sequence/upgrade-all.yaml new file mode 100644 index 00000000..7fb9829a --- /dev/null +++ b/qa/suites/upgrade/luminous-x/parallel/3-upgrade-sequence/upgrade-all.yaml @@ -0,0 +1,22 @@ +meta: +- desc: | + upgrade the ceph cluster +upgrade-sequence: + sequential: + - ceph.restart: + daemons: [mon.a, mon.b, mon.c, mgr.x] + mon-health-to-clog: false + wait-for-healthy: false + - exec: + mon.a: + - ceph config set global mon_warn_on_msgr2_not_enabled false + - ceph.healthy: + - ceph.restart: + daemons: [osd.0, osd.1, osd.2, osd.3, osd.4, osd.5, osd.6, osd.7, osd.8, osd.9, osd.10, osd.11] + wait-for-healthy: false + wait-for-osds-up: true + - ceph.restart: + daemons: [mds.a] + wait-for-healthy: false + wait-for-osds-up: true + - print: "**** done ceph.restart all" diff --git a/qa/suites/upgrade/luminous-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml b/qa/suites/upgrade/luminous-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml new file mode 100644 index 00000000..4f9aac75 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml @@ -0,0 +1,50 @@ +meta: +- desc: | + upgrade the ceph cluster, + upgrate in two steps + step one ordering: mon.a, osd.0, osd.1, mds.a + step two ordering: mon.b, mon.c, osd.2, osd.3 + ceph expected to be healthy state after each step +upgrade-sequence: + sequential: + - ceph.restart: + daemons: [mon.a] + wait-for-healthy: true + - sleep: + duration: 60 + - ceph.restart: + daemons: [mon.b, mgr.x] + wait-for-healthy: true + mon-health-to-clog: false + - sleep: + duration: 60 + - ceph.restart: + daemons: [mon.c] + wait-for-healthy: false + mon-health-to-clog: false + - exec: + mon.a: + - ceph config set global mon_warn_on_msgr2_not_enabled false + - ceph.healthy: + - sleep: + duration: 60 + - ceph.restart: + daemons: [osd.0, osd.1, osd.2, osd.3] + wait-for-healthy: true + - sleep: + duration: 60 + - ceph.restart: [mds.a] + - sleep: + duration: 60 + - sleep: + duration: 60 + - ceph.restart: + daemons: [osd.4, osd.5, osd.6, osd.7] + wait-for-healthy: true + - sleep: + duration: 60 + - ceph.restart: + daemons: [osd.8, osd.9, osd.10, osd.11] + wait-for-healthy: true + - sleep: + duration: 60 diff --git a/qa/suites/upgrade/luminous-x/parallel/4-msgr2.yaml b/qa/suites/upgrade/luminous-x/parallel/4-msgr2.yaml new file mode 100644 index 00000000..60e3e200 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/parallel/4-msgr2.yaml @@ -0,0 +1,5 @@ +tasks: +- exec: + mon.a: + - ceph mon enable-msgr2 + - ceph config rm global mon_warn_on_msgr2_not_enabled diff --git a/qa/suites/upgrade/luminous-x/parallel/4-nautilus.yaml b/qa/suites/upgrade/luminous-x/parallel/4-nautilus.yaml new file mode 120000 index 00000000..9e99b7d2 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/parallel/4-nautilus.yaml @@ -0,0 +1 @@ +.qa/releases/nautilus.yaml \ No newline at end of file diff --git a/qa/suites/upgrade/luminous-x/parallel/5-final-workload/+ b/qa/suites/upgrade/luminous-x/parallel/5-final-workload/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/upgrade/luminous-x/parallel/5-final-workload/.qa b/qa/suites/upgrade/luminous-x/parallel/5-final-workload/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/parallel/5-final-workload/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade/luminous-x/parallel/5-final-workload/blogbench.yaml b/qa/suites/upgrade/luminous-x/parallel/5-final-workload/blogbench.yaml new file mode 100644 index 00000000..205f72e8 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/parallel/5-final-workload/blogbench.yaml @@ -0,0 +1,13 @@ +meta: +- desc: | + run a cephfs stress test + mount ceph-fuse on client.3 before running workunit +tasks: +- sequential: + - ceph-fuse: + - print: "**** done ceph-fuse 4-final-workload" + - workunit: + clients: + client.3: + - suites/blogbench.sh + - print: "**** done suites/blogbench.sh 4-final-workload" diff --git a/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rados-snaps-few-objects.yaml b/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rados-snaps-few-objects.yaml new file mode 100644 index 00000000..d8b3dcb3 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rados-snaps-few-objects.yaml @@ -0,0 +1,17 @@ +meta: +- desc: | + randomized correctness test for rados operations on a replicated pool with snapshots +tasks: + - rados: + clients: [client.1] + ops: 4000 + objects: 50 + write_append_excl: false + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + - print: "**** done rados 4-final-workload" diff --git a/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rados_loadgenmix.yaml b/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rados_loadgenmix.yaml new file mode 100644 index 00000000..922a9da4 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rados_loadgenmix.yaml @@ -0,0 +1,9 @@ +meta: +- desc: | + generate read/write load with rados objects ranging from 1 byte to 1MB +tasks: + - workunit: + clients: + client.1: + - rados/load-gen-mix.sh + - print: "**** done rados/load-gen-mix.sh 4-final-workload" diff --git a/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rados_mon_thrash.yaml b/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rados_mon_thrash.yaml new file mode 100644 index 00000000..129d1386 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rados_mon_thrash.yaml @@ -0,0 +1,18 @@ +meta: +- desc: | + librados C and C++ api tests +overrides: + ceph: + log-whitelist: + - reached quota + - \(REQUEST_SLOW\) +tasks: + - mon_thrash: + revive_delay: 20 + thrash_delay: 1 + - print: "**** done mon_thrash 4-final-workload" + - workunit: + clients: + client.1: + - rados/test.sh + - print: "**** done rados/test.sh 4-final-workload" diff --git a/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rbd_cls.yaml b/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rbd_cls.yaml new file mode 100644 index 00000000..aaf0a377 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rbd_cls.yaml @@ -0,0 +1,9 @@ +meta: +- desc: | + rbd object class functional tests +tasks: + - workunit: + clients: + client.1: + - cls/test_cls_rbd.sh + - print: "**** done cls/test_cls_rbd.sh 4-final-workload" diff --git a/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rbd_import_export.yaml b/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rbd_import_export.yaml new file mode 100644 index 00000000..46e13550 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rbd_import_export.yaml @@ -0,0 +1,11 @@ +meta: +- desc: | + run basic import/export cli tests for rbd +tasks: + - workunit: + clients: + client.1: + - rbd/import_export.sh + env: + RBD_CREATE_ARGS: --new-format + - print: "**** done rbd/import_export.sh 4-final-workload" diff --git a/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rgw.yaml b/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rgw.yaml new file mode 100644 index 00000000..00855dc5 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rgw.yaml @@ -0,0 +1,8 @@ +overrides: + rgw: + frontend: civetweb +tasks: + - sequential: + - rgw: [client.1] + - print: "**** done rgw 4-final-workload" + - rgw-final-workload diff --git a/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rgw_ragweed_check.yaml b/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rgw_ragweed_check.yaml new file mode 100644 index 00000000..c91d91f6 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rgw_ragweed_check.yaml @@ -0,0 +1,11 @@ +meta: +- desc: | + ragweed check for rgw +rgw-final-workload: + full_sequential: + - ragweed: + client.1: + default-branch: ceph-nautilus + rgw_server: client.1 + stages: check + - print: "**** done ragweed check 4-final-workload" diff --git a/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rgw_swift.yaml b/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rgw_swift.yaml new file mode 100644 index 00000000..e91ccc82 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/parallel/5-final-workload/rgw_swift.yaml @@ -0,0 +1,10 @@ +meta: +- desc: | + swift api tests for rgw +rgw-final-workload: + full_sequential: + - swift: + client.1: + force-branch: ceph-nautilus + rgw_server: client.1 + - print: "**** done swift 4-final-workload" diff --git a/qa/suites/upgrade/luminous-x/parallel/objectstore b/qa/suites/upgrade/luminous-x/parallel/objectstore new file mode 120000 index 00000000..016cbf96 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/parallel/objectstore @@ -0,0 +1 @@ +../stress-split/objectstore/ \ No newline at end of file diff --git a/qa/suites/upgrade/luminous-x/parallel/supported-all-distro b/qa/suites/upgrade/luminous-x/parallel/supported-all-distro new file mode 120000 index 00000000..ca82dde5 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/parallel/supported-all-distro @@ -0,0 +1 @@ +.qa/distros/supported-all-distro \ No newline at end of file diff --git a/qa/suites/upgrade/luminous-x/stress-split-erasure-code/% b/qa/suites/upgrade/luminous-x/stress-split-erasure-code/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/upgrade/luminous-x/stress-split-erasure-code/.qa b/qa/suites/upgrade/luminous-x/stress-split-erasure-code/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/stress-split-erasure-code/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade/luminous-x/stress-split-erasure-code/0-cluster b/qa/suites/upgrade/luminous-x/stress-split-erasure-code/0-cluster new file mode 120000 index 00000000..35809372 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/stress-split-erasure-code/0-cluster @@ -0,0 +1 @@ +../stress-split/0-cluster/ \ No newline at end of file diff --git a/qa/suites/upgrade/luminous-x/stress-split-erasure-code/1-luminous-install b/qa/suites/upgrade/luminous-x/stress-split-erasure-code/1-luminous-install new file mode 120000 index 00000000..0479ac54 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/stress-split-erasure-code/1-luminous-install @@ -0,0 +1 @@ +../stress-split/1-ceph-install/ \ No newline at end of file diff --git a/qa/suites/upgrade/luminous-x/stress-split-erasure-code/1.1-pg-log-overrides/normal_pg_log.yaml b/qa/suites/upgrade/luminous-x/stress-split-erasure-code/1.1-pg-log-overrides/normal_pg_log.yaml new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/stress-split-erasure-code/1.1-pg-log-overrides/normal_pg_log.yaml @@ -0,0 +1 @@ + diff --git a/qa/suites/upgrade/luminous-x/stress-split-erasure-code/1.1-pg-log-overrides/short_pg_log.yaml b/qa/suites/upgrade/luminous-x/stress-split-erasure-code/1.1-pg-log-overrides/short_pg_log.yaml new file mode 100644 index 00000000..e31e37ba --- /dev/null +++ b/qa/suites/upgrade/luminous-x/stress-split-erasure-code/1.1-pg-log-overrides/short_pg_log.yaml @@ -0,0 +1,6 @@ +overrides: + ceph: + conf: + osd: + osd min pg log entries: 1 + osd max pg log entries: 2 diff --git a/qa/suites/upgrade/luminous-x/stress-split-erasure-code/2-partial-upgrade b/qa/suites/upgrade/luminous-x/stress-split-erasure-code/2-partial-upgrade new file mode 120000 index 00000000..ab35fc1a --- /dev/null +++ b/qa/suites/upgrade/luminous-x/stress-split-erasure-code/2-partial-upgrade @@ -0,0 +1 @@ +../stress-split/2-partial-upgrade/ \ No newline at end of file diff --git a/qa/suites/upgrade/luminous-x/stress-split-erasure-code/3-thrash/.qa b/qa/suites/upgrade/luminous-x/stress-split-erasure-code/3-thrash/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/stress-split-erasure-code/3-thrash/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade/luminous-x/stress-split-erasure-code/3-thrash/default.yaml b/qa/suites/upgrade/luminous-x/stress-split-erasure-code/3-thrash/default.yaml new file mode 100644 index 00000000..85625459 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/stress-split-erasure-code/3-thrash/default.yaml @@ -0,0 +1,27 @@ +meta: +- desc: | + randomly kill and revive osd + small chance to increase the number of pgs +overrides: + ceph: + log-whitelist: + - but it is still running + - wrongly marked me down + - objects unfound and apparently lost + - log bound mismatch +tasks: +- parallel: + - stress-tasks +stress-tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgpnum_fix: 1 + min_in: 4 + chance_thrash_cluster_full: 0 + chance_thrash_pg_upmap: 0 + chance_thrash_pg_upmap_items: 0 + chance_force_recovery: 0 + aggressive_pg_num_changes: false + disable_objectstore_tool_tests: true +- print: "**** done thrashosds 3-thrash" diff --git a/qa/suites/upgrade/luminous-x/stress-split-erasure-code/4-ec-workload.yaml b/qa/suites/upgrade/luminous-x/stress-split-erasure-code/4-ec-workload.yaml new file mode 100644 index 00000000..c89551e6 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/stress-split-erasure-code/4-ec-workload.yaml @@ -0,0 +1,22 @@ +meta: +- desc: | + randomized correctness test for rados operations on an erasure coded pool +stress-tasks: + - rados: + clients: [client.0] + ops: 4000 + objects: 50 + ec_pool: true + write_append_excl: false + op_weights: + read: 100 + write: 0 + append: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 + setattr: 25 + rmattr: 25 + - print: "**** done rados ec task" diff --git a/qa/suites/upgrade/luminous-x/stress-split-erasure-code/5-finish-upgrade.yaml b/qa/suites/upgrade/luminous-x/stress-split-erasure-code/5-finish-upgrade.yaml new file mode 120000 index 00000000..a66a7dc1 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/stress-split-erasure-code/5-finish-upgrade.yaml @@ -0,0 +1 @@ +../stress-split/5-finish-upgrade.yaml \ No newline at end of file diff --git a/qa/suites/upgrade/luminous-x/stress-split-erasure-code/7-final-workload.yaml b/qa/suites/upgrade/luminous-x/stress-split-erasure-code/7-final-workload.yaml new file mode 100644 index 00000000..50a14650 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/stress-split-erasure-code/7-final-workload.yaml @@ -0,0 +1,35 @@ +# +# k=3 implies a stripe_width of 1376*3 = 4128 which is different from +# the default value of 4096 It is also not a multiple of 1024*1024 and +# creates situations where rounding rules during recovery becomes +# necessary. +# +meta: +- desc: | + randomized correctness test for rados operations on an erasure coded pool + using the jerasure plugin with k=3 and m=1 +tasks: +- rados: + clients: [client.0] + ops: 4000 + objects: 50 + ec_pool: true + write_append_excl: false + erasure_code_profile: + name: jerasure31profile + plugin: jerasure + k: 3 + m: 1 + technique: reed_sol_van + crush-failure-domain: osd + op_weights: + read: 100 + write: 0 + append: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 + setattr: 25 + rmattr: 25 diff --git a/qa/suites/upgrade/luminous-x/stress-split-erasure-code/objectstore b/qa/suites/upgrade/luminous-x/stress-split-erasure-code/objectstore new file mode 120000 index 00000000..016cbf96 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/stress-split-erasure-code/objectstore @@ -0,0 +1 @@ +../stress-split/objectstore/ \ No newline at end of file diff --git a/qa/suites/upgrade/luminous-x/stress-split-erasure-code/supported-all-distro b/qa/suites/upgrade/luminous-x/stress-split-erasure-code/supported-all-distro new file mode 120000 index 00000000..ca82dde5 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/stress-split-erasure-code/supported-all-distro @@ -0,0 +1 @@ +.qa/distros/supported-all-distro \ No newline at end of file diff --git a/qa/suites/upgrade/luminous-x/stress-split-erasure-code/thrashosds-health.yaml b/qa/suites/upgrade/luminous-x/stress-split-erasure-code/thrashosds-health.yaml new file mode 120000 index 00000000..9124eb1a --- /dev/null +++ b/qa/suites/upgrade/luminous-x/stress-split-erasure-code/thrashosds-health.yaml @@ -0,0 +1 @@ +.qa/tasks/thrashosds-health.yaml \ No newline at end of file diff --git a/qa/suites/upgrade/luminous-x/stress-split/% b/qa/suites/upgrade/luminous-x/stress-split/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/upgrade/luminous-x/stress-split/.qa b/qa/suites/upgrade/luminous-x/stress-split/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/stress-split/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade/luminous-x/stress-split/0-cluster/+ b/qa/suites/upgrade/luminous-x/stress-split/0-cluster/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/upgrade/luminous-x/stress-split/0-cluster/.qa b/qa/suites/upgrade/luminous-x/stress-split/0-cluster/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/stress-split/0-cluster/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade/luminous-x/stress-split/0-cluster/openstack.yaml b/qa/suites/upgrade/luminous-x/stress-split/0-cluster/openstack.yaml new file mode 100644 index 00000000..5caffc35 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/stress-split/0-cluster/openstack.yaml @@ -0,0 +1,6 @@ +openstack: + - machine: + disk: 100 # GB + - volumes: # attached to each instance + count: 4 + size: 30 # GB diff --git a/qa/suites/upgrade/luminous-x/stress-split/0-cluster/start.yaml b/qa/suites/upgrade/luminous-x/stress-split/0-cluster/start.yaml new file mode 100644 index 00000000..e2096deb --- /dev/null +++ b/qa/suites/upgrade/luminous-x/stress-split/0-cluster/start.yaml @@ -0,0 +1,40 @@ +meta: +- desc: | + Run ceph on two nodes, + with a separate client-only node. + Use xfs beneath the osds. +overrides: + ceph: + mon_bind_msgr2: false + mon_bind_addrvec: false + fs: xfs + log-whitelist: + - overall HEALTH_ + - \(MON_DOWN\) + - \(MGR_DOWN\) + - slow request + - \(MON_MSGR2_NOT_ENABLED\) + conf: + global: + enable experimental unrecoverable data corrupting features: "*" + mon warn on msgr2 not enabled: false + mon: + mon warn on osd down out interval zero: false +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - osd.3 +- - mon.b + - osd.4 + - osd.5 + - osd.6 + - osd.7 +- - mon.c +- - osd.8 + - osd.9 + - osd.10 + - osd.11 +- - client.0 diff --git a/qa/suites/upgrade/luminous-x/stress-split/1-ceph-install/.qa b/qa/suites/upgrade/luminous-x/stress-split/1-ceph-install/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/stress-split/1-ceph-install/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade/luminous-x/stress-split/1-ceph-install/luminous.yaml b/qa/suites/upgrade/luminous-x/stress-split/1-ceph-install/luminous.yaml new file mode 100644 index 00000000..0d7eeb38 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/stress-split/1-ceph-install/luminous.yaml @@ -0,0 +1,29 @@ +meta: +- desc: install ceph/luminous latest +tasks: +- install: + branch: luminous + exclude_packages: + - librados3 + - ceph-mgr-dashboard + - ceph-mgr-diskprediction-local + - ceph-mgr-diskprediction-cloud + - ceph-mgr-rook + - ceph-mgr-ssh + extra_packages: ['librados2'] +- print: "**** done install luminous" +- ceph: + conf: + global: + bluestore_warn_on_legacy_statfs: false + mon pg warn min per osd: 0 +- exec: + osd.0: + - ceph osd require-osd-release luminous + - ceph osd set-require-min-compat-client luminous +- print: "**** done ceph" +overrides: + ceph: + conf: + mon: + mon warn on osd down out interval zero: false diff --git a/qa/suites/upgrade/luminous-x/stress-split/1.1-pg-log-overrides/normal_pg_log.yaml b/qa/suites/upgrade/luminous-x/stress-split/1.1-pg-log-overrides/normal_pg_log.yaml new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/stress-split/1.1-pg-log-overrides/normal_pg_log.yaml @@ -0,0 +1 @@ + diff --git a/qa/suites/upgrade/luminous-x/stress-split/1.1-pg-log-overrides/short_pg_log.yaml b/qa/suites/upgrade/luminous-x/stress-split/1.1-pg-log-overrides/short_pg_log.yaml new file mode 100644 index 00000000..e31e37ba --- /dev/null +++ b/qa/suites/upgrade/luminous-x/stress-split/1.1-pg-log-overrides/short_pg_log.yaml @@ -0,0 +1,6 @@ +overrides: + ceph: + conf: + osd: + osd min pg log entries: 1 + osd max pg log entries: 2 diff --git a/qa/suites/upgrade/luminous-x/stress-split/2-partial-upgrade/.qa b/qa/suites/upgrade/luminous-x/stress-split/2-partial-upgrade/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/stress-split/2-partial-upgrade/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade/luminous-x/stress-split/2-partial-upgrade/firsthalf.yaml b/qa/suites/upgrade/luminous-x/stress-split/2-partial-upgrade/firsthalf.yaml new file mode 100644 index 00000000..58ff5ac6 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/stress-split/2-partial-upgrade/firsthalf.yaml @@ -0,0 +1,14 @@ +meta: +- desc: | + install upgrade ceph/-x on 2/3 of cluster + restart : mons, osd.0-7 +tasks: +- install.upgrade: + mon.a: + mon.b: + mon.c: +- print: "**** done install.upgrade of first 3 nodes" +- ceph.restart: + daemons: [mon.a,mon.b,mgr.x,osd.0,osd.1,osd.2,osd.3,osd.4,osd.5,osd.6,osd.7] + mon-health-to-clog: false +- print: "**** done ceph.restart of all mons and 2/3 of osds" diff --git a/qa/suites/upgrade/luminous-x/stress-split/3-thrash/.qa b/qa/suites/upgrade/luminous-x/stress-split/3-thrash/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/stress-split/3-thrash/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade/luminous-x/stress-split/3-thrash/default.yaml b/qa/suites/upgrade/luminous-x/stress-split/3-thrash/default.yaml new file mode 100644 index 00000000..e0f317b0 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/stress-split/3-thrash/default.yaml @@ -0,0 +1,26 @@ +meta: +- desc: | + randomly kill and revive osd + small chance to increase the number of pgs +overrides: + ceph: + log-whitelist: + - but it is still running + - wrongly marked me down + - objects unfound and apparently lost + - log bound mismatch +tasks: +- parallel: + - stress-tasks +stress-tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgpnum_fix: 1 + chance_thrash_cluster_full: 0 + chance_thrash_pg_upmap: 0 + chance_thrash_pg_upmap_items: 0 + disable_objectstore_tool_tests: true + chance_force_recovery: 0 + aggressive_pg_num_changes: false +- print: "**** done thrashosds 3-thrash" diff --git a/qa/suites/upgrade/luminous-x/stress-split/4-workload/+ b/qa/suites/upgrade/luminous-x/stress-split/4-workload/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/upgrade/luminous-x/stress-split/4-workload/.qa b/qa/suites/upgrade/luminous-x/stress-split/4-workload/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/stress-split/4-workload/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade/luminous-x/stress-split/4-workload/radosbench.yaml b/qa/suites/upgrade/luminous-x/stress-split/4-workload/radosbench.yaml new file mode 100644 index 00000000..115939e6 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/stress-split/4-workload/radosbench.yaml @@ -0,0 +1,52 @@ +meta: +- desc: | + run randomized correctness test for rados operations + generate write load with rados bench +stress-tasks: +- full_sequential: + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 +- print: "**** done radosbench 7-workload" diff --git a/qa/suites/upgrade/luminous-x/stress-split/4-workload/rbd-cls.yaml b/qa/suites/upgrade/luminous-x/stress-split/4-workload/rbd-cls.yaml new file mode 100644 index 00000000..f8cc4d8a --- /dev/null +++ b/qa/suites/upgrade/luminous-x/stress-split/4-workload/rbd-cls.yaml @@ -0,0 +1,10 @@ +meta: +- desc: | + run basic cls tests for rbd +stress-tasks: +- workunit: + branch: luminous + clients: + client.0: + - cls/test_cls_rbd.sh +- print: "**** done cls/test_cls_rbd.sh 5-workload" diff --git a/qa/suites/upgrade/luminous-x/stress-split/4-workload/rbd-import-export.yaml b/qa/suites/upgrade/luminous-x/stress-split/4-workload/rbd-import-export.yaml new file mode 100644 index 00000000..30a677af --- /dev/null +++ b/qa/suites/upgrade/luminous-x/stress-split/4-workload/rbd-import-export.yaml @@ -0,0 +1,12 @@ +meta: +- desc: | + run basic import/export cli tests for rbd +stress-tasks: +- workunit: + branch: luminous + clients: + client.0: + - rbd/import_export.sh + env: + RBD_CREATE_ARGS: --new-format +- print: "**** done rbd/import_export.sh 5-workload" diff --git a/qa/suites/upgrade/luminous-x/stress-split/4-workload/rbd_api.yaml b/qa/suites/upgrade/luminous-x/stress-split/4-workload/rbd_api.yaml new file mode 100644 index 00000000..9079aa33 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/stress-split/4-workload/rbd_api.yaml @@ -0,0 +1,10 @@ +meta: +- desc: | + librbd C and C++ api tests +stress-tasks: +- workunit: + branch: luminous + clients: + client.0: + - rbd/test_librbd.sh +- print: "**** done rbd/test_librbd.sh 7-workload" diff --git a/qa/suites/upgrade/luminous-x/stress-split/4-workload/readwrite.yaml b/qa/suites/upgrade/luminous-x/stress-split/4-workload/readwrite.yaml new file mode 100644 index 00000000..41e34d6d --- /dev/null +++ b/qa/suites/upgrade/luminous-x/stress-split/4-workload/readwrite.yaml @@ -0,0 +1,16 @@ +meta: +- desc: | + randomized correctness test for rados operations on a replicated pool, + using only reads, writes, and deletes +stress-tasks: +- full_sequential: + - rados: + clients: [client.0] + ops: 4000 + objects: 500 + write_append_excl: false + op_weights: + read: 45 + write: 45 + delete: 10 +- print: "**** done rados/readwrite 5-workload" diff --git a/qa/suites/upgrade/luminous-x/stress-split/4-workload/snaps-few-objects.yaml b/qa/suites/upgrade/luminous-x/stress-split/4-workload/snaps-few-objects.yaml new file mode 100644 index 00000000..f56d0de0 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/stress-split/4-workload/snaps-few-objects.yaml @@ -0,0 +1,18 @@ +meta: +- desc: | + randomized correctness test for rados operations on a replicated pool with snapshot operations +stress-tasks: +- full_sequential: + - rados: + clients: [client.0] + ops: 4000 + objects: 50 + write_append_excl: false + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 +- print: "**** done rados/snaps-few-objects 5-workload" diff --git a/qa/suites/upgrade/luminous-x/stress-split/5-finish-upgrade.yaml b/qa/suites/upgrade/luminous-x/stress-split/5-finish-upgrade.yaml new file mode 100644 index 00000000..306445c8 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/stress-split/5-finish-upgrade.yaml @@ -0,0 +1,15 @@ +tasks: +- install.upgrade: + osd.8: + client.0: +- ceph.restart: + daemons: [mon.c, osd.8, osd.9, osd.10, osd.11] + wait-for-healthy: false + wait-for-osds-up: true +- exec: + osd.0: + - ceph osd set pglog_hardlimit + - ceph osd dump --format=json-pretty | grep "flags" + - ceph config set global mon_warn_on_msgr2_not_enabled false +- print: "**** try to set pglog_hardlimit again, should succeed" + diff --git a/qa/suites/upgrade/luminous-x/stress-split/6-msgr2.yaml b/qa/suites/upgrade/luminous-x/stress-split/6-msgr2.yaml new file mode 100644 index 00000000..f56c8be0 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/stress-split/6-msgr2.yaml @@ -0,0 +1,6 @@ +tasks: +- exec: + mon.a: + - ceph mon enable-msgr2 + - ceph config rm global mon_warn_on_msgr2_not_enabled +- ceph.healthy: diff --git a/qa/suites/upgrade/luminous-x/stress-split/6-nautilus.yaml b/qa/suites/upgrade/luminous-x/stress-split/6-nautilus.yaml new file mode 120000 index 00000000..9e99b7d2 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/stress-split/6-nautilus.yaml @@ -0,0 +1 @@ +.qa/releases/nautilus.yaml \ No newline at end of file diff --git a/qa/suites/upgrade/luminous-x/stress-split/7-final-workload/+ b/qa/suites/upgrade/luminous-x/stress-split/7-final-workload/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/upgrade/luminous-x/stress-split/7-final-workload/.qa b/qa/suites/upgrade/luminous-x/stress-split/7-final-workload/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/stress-split/7-final-workload/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade/luminous-x/stress-split/7-final-workload/rgw-swift.yaml b/qa/suites/upgrade/luminous-x/stress-split/7-final-workload/rgw-swift.yaml new file mode 100644 index 00000000..1b5710b1 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/stress-split/7-final-workload/rgw-swift.yaml @@ -0,0 +1,12 @@ +meta: +- desc: | + swift api tests for rgw +tasks: +- rgw: + client.0: +- print: "**** done rgw 9-workload" +- swift: + client.0: + force-branch: ceph-nautilus + rgw_server: client.0 +- print: "**** done swift 9-workload" diff --git a/qa/suites/upgrade/luminous-x/stress-split/7-final-workload/snaps-many-objects.yaml b/qa/suites/upgrade/luminous-x/stress-split/7-final-workload/snaps-many-objects.yaml new file mode 100644 index 00000000..805bf97c --- /dev/null +++ b/qa/suites/upgrade/luminous-x/stress-split/7-final-workload/snaps-many-objects.yaml @@ -0,0 +1,16 @@ +meta: +- desc: | + randomized correctness test for rados operations on a replicated pool with snapshot operations +tasks: +- rados: + clients: [client.0] + ops: 4000 + objects: 500 + write_append_excl: false + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 diff --git a/qa/suites/upgrade/luminous-x/stress-split/objectstore/.qa b/qa/suites/upgrade/luminous-x/stress-split/objectstore/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/stress-split/objectstore/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade/luminous-x/stress-split/objectstore/bluestore-bitmap.yaml b/qa/suites/upgrade/luminous-x/stress-split/objectstore/bluestore-bitmap.yaml new file mode 120000 index 00000000..a59cf517 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/stress-split/objectstore/bluestore-bitmap.yaml @@ -0,0 +1 @@ +.qa/objectstore/bluestore-bitmap.yaml \ No newline at end of file diff --git a/qa/suites/upgrade/luminous-x/stress-split/objectstore/filestore-xfs.yaml b/qa/suites/upgrade/luminous-x/stress-split/objectstore/filestore-xfs.yaml new file mode 120000 index 00000000..41f2a9d1 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/stress-split/objectstore/filestore-xfs.yaml @@ -0,0 +1 @@ +.qa/objectstore/filestore-xfs.yaml \ No newline at end of file diff --git a/qa/suites/upgrade/luminous-x/stress-split/supported-all-distro b/qa/suites/upgrade/luminous-x/stress-split/supported-all-distro new file mode 120000 index 00000000..ca82dde5 --- /dev/null +++ b/qa/suites/upgrade/luminous-x/stress-split/supported-all-distro @@ -0,0 +1 @@ +.qa/distros/supported-all-distro \ No newline at end of file diff --git a/qa/suites/upgrade/luminous-x/stress-split/thrashosds-health.yaml b/qa/suites/upgrade/luminous-x/stress-split/thrashosds-health.yaml new file mode 120000 index 00000000..9124eb1a --- /dev/null +++ b/qa/suites/upgrade/luminous-x/stress-split/thrashosds-health.yaml @@ -0,0 +1 @@ +.qa/tasks/thrashosds-health.yaml \ No newline at end of file diff --git a/qa/suites/upgrade/mimic-x-singleton/% b/qa/suites/upgrade/mimic-x-singleton/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/upgrade/mimic-x-singleton/.qa b/qa/suites/upgrade/mimic-x-singleton/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade/mimic-x-singleton/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade/mimic-x-singleton/0-cluster/+ b/qa/suites/upgrade/mimic-x-singleton/0-cluster/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/upgrade/mimic-x-singleton/0-cluster/.qa b/qa/suites/upgrade/mimic-x-singleton/0-cluster/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade/mimic-x-singleton/0-cluster/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade/mimic-x-singleton/0-cluster/openstack.yaml b/qa/suites/upgrade/mimic-x-singleton/0-cluster/openstack.yaml new file mode 100644 index 00000000..a0d5c201 --- /dev/null +++ b/qa/suites/upgrade/mimic-x-singleton/0-cluster/openstack.yaml @@ -0,0 +1,6 @@ +openstack: + - machine: + disk: 100 # GB + - volumes: # attached to each instance + count: 3 + size: 30 # GB diff --git a/qa/suites/upgrade/mimic-x-singleton/0-cluster/start.yaml b/qa/suites/upgrade/mimic-x-singleton/0-cluster/start.yaml new file mode 100644 index 00000000..3c65ea9c --- /dev/null +++ b/qa/suites/upgrade/mimic-x-singleton/0-cluster/start.yaml @@ -0,0 +1,37 @@ +meta: +- desc: | + Run ceph on two nodes, + with a separate client-only node. + Use xfs beneath the osds. +overrides: + ceph: + mon_bind_addrvec: false + mon_bind_msgr2: false + fs: xfs + conf: + global: + ms dump corrupt message level: 0 + ms bind msgr2: false + mds: + debug ms: 1 + debug mds: 20 +roles: +- - mon.a + - mgr.x + - mds.a + - osd.0 + - osd.1 + - osd.2 + - osd.3 +- - mon.b + - osd.4 + - osd.5 + - osd.6 + - osd.7 +- - mon.c + - mgr.y + - osd.8 + - osd.9 + - osd.10 + - osd.11 +- - client.0 diff --git a/qa/suites/upgrade/mimic-x-singleton/1-install/.qa b/qa/suites/upgrade/mimic-x-singleton/1-install/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade/mimic-x-singleton/1-install/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade/mimic-x-singleton/1-install/mimic.yaml b/qa/suites/upgrade/mimic-x-singleton/1-install/mimic.yaml new file mode 100644 index 00000000..c92dbc54 --- /dev/null +++ b/qa/suites/upgrade/mimic-x-singleton/1-install/mimic.yaml @@ -0,0 +1,25 @@ +overrides: + ceph: + log-whitelist: + - \(MON_DOWN\) + - \(MGR_DOWN\) + - slow request +meta: +- desc: install ceph/mimic latest +tasks: +- install: + branch: mimic + exclude_packages: + - librados3 + - ceph-mgr-dashboard + - ceph-mgr-diskprediction-local + - ceph-mgr-diskprediction-cloud + - ceph-mgr-rook + - ceph-mgr-ssh + extra_packages: ['librados2'] +- print: "**** done install mimic" +- ceph: + conf: + global: + mon pg warn min per osd: 0 +- print: "**** done ceph" diff --git a/qa/suites/upgrade/mimic-x-singleton/2-partial-upgrade/.qa b/qa/suites/upgrade/mimic-x-singleton/2-partial-upgrade/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade/mimic-x-singleton/2-partial-upgrade/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade/mimic-x-singleton/2-partial-upgrade/firsthalf.yaml b/qa/suites/upgrade/mimic-x-singleton/2-partial-upgrade/firsthalf.yaml new file mode 100644 index 00000000..a6cf4a46 --- /dev/null +++ b/qa/suites/upgrade/mimic-x-singleton/2-partial-upgrade/firsthalf.yaml @@ -0,0 +1,18 @@ +meta: +- desc: | + install upgrade ceph/-x on one node only + 1st half + restart : osd.0,1,2,3,4,5 +tasks: +- install.upgrade: + mon.a: + mon.b: +- print: "**** done install.upgrade osd.0" +- ceph.restart: + daemons: [mon.a, mon.b] + wait-for-healthy: false + mon-health-to-clog: false +- ceph.restart: + daemons: [osd.0, osd.1, osd.2, osd.3, osd.4, osd.5, osd.6, osd.7] + wait-for-healthy: false +- print: "**** done ceph.restart 1st 2/3s" diff --git a/qa/suites/upgrade/mimic-x-singleton/3-thrash/.qa b/qa/suites/upgrade/mimic-x-singleton/3-thrash/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade/mimic-x-singleton/3-thrash/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade/mimic-x-singleton/3-thrash/default.yaml b/qa/suites/upgrade/mimic-x-singleton/3-thrash/default.yaml new file mode 100644 index 00000000..e36882da --- /dev/null +++ b/qa/suites/upgrade/mimic-x-singleton/3-thrash/default.yaml @@ -0,0 +1,22 @@ +meta: +- desc: | + randomly kill and revive osd + small chance to increase the number of pgs +overrides: + ceph: + log-whitelist: + - but it is still running + - objects unfound and apparently lost + - log bound mismatch +tasks: +- parallel: + - split_tasks +split_tasks: + sequential: + - thrashosds: + disable_objectstore_tool_tests: true + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgpnum_fix: 1 + aggressive_pg_num_changes: false + - print: "**** done thrashosds 3-thrash" diff --git a/qa/suites/upgrade/mimic-x-singleton/4-workload/+ b/qa/suites/upgrade/mimic-x-singleton/4-workload/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/upgrade/mimic-x-singleton/4-workload/.qa b/qa/suites/upgrade/mimic-x-singleton/4-workload/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade/mimic-x-singleton/4-workload/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade/mimic-x-singleton/4-workload/rbd-cls.yaml b/qa/suites/upgrade/mimic-x-singleton/4-workload/rbd-cls.yaml new file mode 100644 index 00000000..c0415554 --- /dev/null +++ b/qa/suites/upgrade/mimic-x-singleton/4-workload/rbd-cls.yaml @@ -0,0 +1,11 @@ +meta: +- desc: | + run basic cls tests for rbd +split_tasks: + sequential: + - workunit: + branch: mimic + clients: + client.0: + - cls/test_cls_rbd.sh + - print: "**** done cls/test_cls_rbd.sh 5-workload" diff --git a/qa/suites/upgrade/mimic-x-singleton/4-workload/rbd-import-export.yaml b/qa/suites/upgrade/mimic-x-singleton/4-workload/rbd-import-export.yaml new file mode 100644 index 00000000..997f452d --- /dev/null +++ b/qa/suites/upgrade/mimic-x-singleton/4-workload/rbd-import-export.yaml @@ -0,0 +1,13 @@ +meta: +- desc: | + run basic import/export cli tests for rbd +split_tasks: + sequential: + - workunit: + branch: mimic + clients: + client.0: + - rbd/import_export.sh + env: + RBD_CREATE_ARGS: --new-format + - print: "**** done rbd/import_export.sh 5-workload" diff --git a/qa/suites/upgrade/mimic-x-singleton/4-workload/readwrite.yaml b/qa/suites/upgrade/mimic-x-singleton/4-workload/readwrite.yaml new file mode 100644 index 00000000..8833d4d8 --- /dev/null +++ b/qa/suites/upgrade/mimic-x-singleton/4-workload/readwrite.yaml @@ -0,0 +1,17 @@ +meta: +- desc: | + randomized correctness test for rados operations on a replicated pool, + using only reads, writes, and deletes +split_tasks: + sequential: + - full_sequential: + - rados: + clients: [client.0] + ops: 4000 + objects: 500 + write_append_excl: false + op_weights: + read: 45 + write: 45 + delete: 10 + - print: "**** done rados/readwrite 5-workload" diff --git a/qa/suites/upgrade/mimic-x-singleton/4-workload/snaps-few-objects.yaml b/qa/suites/upgrade/mimic-x-singleton/4-workload/snaps-few-objects.yaml new file mode 100644 index 00000000..c96cfbe3 --- /dev/null +++ b/qa/suites/upgrade/mimic-x-singleton/4-workload/snaps-few-objects.yaml @@ -0,0 +1,19 @@ +meta: +- desc: | + randomized correctness test for rados operations on a replicated pool with snapshot operations +split_tasks: + sequential: + - full_sequential: + - rados: + clients: [client.0] + ops: 4000 + objects: 50 + write_append_excl: false + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + - print: "**** done rados/snaps-few-objects 5-workload" diff --git a/qa/suites/upgrade/mimic-x-singleton/5-workload/+ b/qa/suites/upgrade/mimic-x-singleton/5-workload/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/upgrade/mimic-x-singleton/5-workload/.qa b/qa/suites/upgrade/mimic-x-singleton/5-workload/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade/mimic-x-singleton/5-workload/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade/mimic-x-singleton/5-workload/radosbench.yaml b/qa/suites/upgrade/mimic-x-singleton/5-workload/radosbench.yaml new file mode 100644 index 00000000..2cfbf1dc --- /dev/null +++ b/qa/suites/upgrade/mimic-x-singleton/5-workload/radosbench.yaml @@ -0,0 +1,41 @@ +meta: +- desc: | + run randomized correctness test for rados operations + generate write load with rados bench +split_tasks: + sequential: + - full_sequential: + - radosbench: + clients: [client.0] + time: 150 + - radosbench: + clients: [client.0] + time: 150 + - radosbench: + clients: [client.0] + time: 150 + - radosbench: + clients: [client.0] + time: 150 + - radosbench: + clients: [client.0] + time: 150 + - radosbench: + clients: [client.0] + time: 150 + - radosbench: + clients: [client.0] + time: 150 + - radosbench: + clients: [client.0] + time: 150 + - radosbench: + clients: [client.0] + time: 150 + - radosbench: + clients: [client.0] + time: 150 + - radosbench: + clients: [client.0] + time: 150 + - print: "**** done radosbench 7-workload" diff --git a/qa/suites/upgrade/mimic-x-singleton/5-workload/rbd_api.yaml b/qa/suites/upgrade/mimic-x-singleton/5-workload/rbd_api.yaml new file mode 100644 index 00000000..ccfc8385 --- /dev/null +++ b/qa/suites/upgrade/mimic-x-singleton/5-workload/rbd_api.yaml @@ -0,0 +1,11 @@ +meta: +- desc: | + librbd C and C++ api tests +split_tasks: + sequential: + - workunit: + branch: mimic + clients: + client.0: + - rbd/test_librbd.sh + - print: "**** done rbd/test_librbd.sh 7-workload" diff --git a/qa/suites/upgrade/mimic-x-singleton/6-finish-upgrade.yaml b/qa/suites/upgrade/mimic-x-singleton/6-finish-upgrade.yaml new file mode 100644 index 00000000..e7fa4b2f --- /dev/null +++ b/qa/suites/upgrade/mimic-x-singleton/6-finish-upgrade.yaml @@ -0,0 +1,30 @@ +meta: +- desc: | + install upgrade on remaining node + restartin remaining osds +overrides: + ceph: + log-whitelist: + - overall HEALTH_ + - \(FS_DEGRADED\) + - \(MDS_ +tasks: +- install.upgrade: + mon.c: +- ceph.restart: + daemons: [mon.c, mgr.x, mgr.y] + wait-for-up: true + wait-for-healthy: false +- ceph.restart: + daemons: [osd.8, osd.9, osd.10, osd.11] + wait-for-up: true + wait-for-healthy: false +- ceph.restart: + daemons: [mds.a] + wait-for-up: true + wait-for-healthy: false +- exec: + mon.a: + - ceph mon enable-msgr2 +- install.upgrade: + client.0: diff --git a/qa/suites/upgrade/mimic-x-singleton/7-nautilus.yaml b/qa/suites/upgrade/mimic-x-singleton/7-nautilus.yaml new file mode 120000 index 00000000..9e99b7d2 --- /dev/null +++ b/qa/suites/upgrade/mimic-x-singleton/7-nautilus.yaml @@ -0,0 +1 @@ +.qa/releases/nautilus.yaml \ No newline at end of file diff --git a/qa/suites/upgrade/mimic-x-singleton/8-workload/+ b/qa/suites/upgrade/mimic-x-singleton/8-workload/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/upgrade/mimic-x-singleton/8-workload/.qa b/qa/suites/upgrade/mimic-x-singleton/8-workload/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade/mimic-x-singleton/8-workload/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade/mimic-x-singleton/8-workload/rbd-python.yaml b/qa/suites/upgrade/mimic-x-singleton/8-workload/rbd-python.yaml new file mode 100644 index 00000000..56ba21d7 --- /dev/null +++ b/qa/suites/upgrade/mimic-x-singleton/8-workload/rbd-python.yaml @@ -0,0 +1,9 @@ +meta: +- desc: | + librbd python api tests +tasks: +- workunit: + clients: + client.0: + - rbd/test_librbd_python.sh +- print: "**** done rbd/test_librbd_python.sh 9-workload" diff --git a/qa/suites/upgrade/mimic-x-singleton/8-workload/rgw-swift.yaml b/qa/suites/upgrade/mimic-x-singleton/8-workload/rgw-swift.yaml new file mode 100644 index 00000000..1b5710b1 --- /dev/null +++ b/qa/suites/upgrade/mimic-x-singleton/8-workload/rgw-swift.yaml @@ -0,0 +1,12 @@ +meta: +- desc: | + swift api tests for rgw +tasks: +- rgw: + client.0: +- print: "**** done rgw 9-workload" +- swift: + client.0: + force-branch: ceph-nautilus + rgw_server: client.0 +- print: "**** done swift 9-workload" diff --git a/qa/suites/upgrade/mimic-x-singleton/8-workload/snaps-many-objects.yaml b/qa/suites/upgrade/mimic-x-singleton/8-workload/snaps-many-objects.yaml new file mode 100644 index 00000000..805bf97c --- /dev/null +++ b/qa/suites/upgrade/mimic-x-singleton/8-workload/snaps-many-objects.yaml @@ -0,0 +1,16 @@ +meta: +- desc: | + randomized correctness test for rados operations on a replicated pool with snapshot operations +tasks: +- rados: + clients: [client.0] + ops: 4000 + objects: 500 + write_append_excl: false + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 diff --git a/qa/suites/upgrade/mimic-x-singleton/supported-random-distro$ b/qa/suites/upgrade/mimic-x-singleton/supported-random-distro$ new file mode 120000 index 00000000..0862b445 --- /dev/null +++ b/qa/suites/upgrade/mimic-x-singleton/supported-random-distro$ @@ -0,0 +1 @@ +.qa/distros/supported-random-distro$ \ No newline at end of file diff --git a/qa/suites/upgrade/mimic-x-singleton/thrashosds-health.yaml b/qa/suites/upgrade/mimic-x-singleton/thrashosds-health.yaml new file mode 120000 index 00000000..9124eb1a --- /dev/null +++ b/qa/suites/upgrade/mimic-x-singleton/thrashosds-health.yaml @@ -0,0 +1 @@ +.qa/tasks/thrashosds-health.yaml \ No newline at end of file diff --git a/qa/suites/upgrade/mimic-x/.qa b/qa/suites/upgrade/mimic-x/.qa new file mode 120000 index 00000000..fea2489f --- /dev/null +++ b/qa/suites/upgrade/mimic-x/.qa @@ -0,0 +1 @@ +../.qa \ No newline at end of file diff --git a/qa/suites/upgrade/mimic-x/parallel/% b/qa/suites/upgrade/mimic-x/parallel/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/upgrade/mimic-x/parallel/.qa b/qa/suites/upgrade/mimic-x/parallel/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/parallel/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade/mimic-x/parallel/0-cluster/+ b/qa/suites/upgrade/mimic-x/parallel/0-cluster/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/upgrade/mimic-x/parallel/0-cluster/.qa b/qa/suites/upgrade/mimic-x/parallel/0-cluster/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/parallel/0-cluster/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade/mimic-x/parallel/0-cluster/openstack.yaml b/qa/suites/upgrade/mimic-x/parallel/0-cluster/openstack.yaml new file mode 100644 index 00000000..f4d1349b --- /dev/null +++ b/qa/suites/upgrade/mimic-x/parallel/0-cluster/openstack.yaml @@ -0,0 +1,4 @@ +openstack: + - volumes: # attached to each instance + count: 3 + size: 30 # GB diff --git a/qa/suites/upgrade/mimic-x/parallel/0-cluster/start.yaml b/qa/suites/upgrade/mimic-x/parallel/0-cluster/start.yaml new file mode 100644 index 00000000..099b70c3 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/parallel/0-cluster/start.yaml @@ -0,0 +1,49 @@ +meta: +- desc: | + Run ceph on two nodes, + with a separate client 0,1,2 third node. + Use xfs beneath the osds. + CephFS tests running on client 2,3 + #Note-To enable RHEL runs on ovh nodes, add the following to overrides + #ansible.cephlab: + # skip_tags: entitlements,packages,repos +roles: +- - mon.a + - mgr.x + - mds.a + - osd.0 + - osd.1 +- - mon.b + - mgr.y + - osd.2 + - osd.3 +- - mon.c + - mds.b + - osd.4 + - osd.5 +- - client.0 + - client.1 + - client.2 + - client.3 +overrides: + ceph: + mon_bind_msgr2: false + mon_bind_addrvec: false + log-whitelist: + - scrub mismatch + - ScrubResult + - wrongly marked + - \(POOL_APP_NOT_ENABLED\) + - \(SLOW_OPS\) + - overall HEALTH_ + - \(MON_MSGR2_NOT_ENABLED\) + - slow request + conf: + global: + enable experimental unrecoverable data corrupting features: "*" + mon: + mon warn on osd down out interval zero: false + osd: + osd_class_load_list: "*" + osd_class_default_list: "*" + fs: xfs diff --git a/qa/suites/upgrade/mimic-x/parallel/1-ceph-install/.qa b/qa/suites/upgrade/mimic-x/parallel/1-ceph-install/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/parallel/1-ceph-install/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade/mimic-x/parallel/1-ceph-install/mimic.yaml b/qa/suites/upgrade/mimic-x/parallel/1-ceph-install/mimic.yaml new file mode 100644 index 00000000..8d816dfe --- /dev/null +++ b/qa/suites/upgrade/mimic-x/parallel/1-ceph-install/mimic.yaml @@ -0,0 +1,55 @@ +meta: +- desc: | + install ceph/mimic latest + run workload and upgrade-sequence in parallel + upgrade the client node +tasks: +- install: + branch: mimic + exclude_packages: + - librados3 + - ceph-mgr-dashboard + - ceph-mgr-diskprediction-local + - ceph-mgr-diskprediction-cloud + - ceph-mgr-rook + - ceph-mgr-ssh + extra_packages: ['librados2'] +- print: "**** done installing mimic" +- ceph: + log-whitelist: + - overall HEALTH_ + - \(FS_ + - \(MDS_ + - \(OSD_ + - \(MON_DOWN\) + - \(CACHE_POOL_ + - \(POOL_ + - \(MGR_DOWN\) + - \(PG_ + - \(SMALLER_PGP_NUM\) + - Monitor daemon marked osd + - Behind on trimming + - Manager daemon + - evicting unresponsive client + conf: + global: + mon warn on pool no app: false + bluestore_warn_on_legacy_statfs: false + mon pg warn min per osd: 0 +- exec: + osd.0: + - ceph osd require-osd-release mimic + - ceph osd set-require-min-compat-client mimic +- print: "**** done ceph" +- install.upgrade: + mon.a: + mon.b: + mon.c: +- print: "**** done install.upgrade non-client hosts" +- parallel: + - workload + - upgrade-sequence +- print: "**** done parallel" +- install.upgrade: + client.0: +- print: "**** done install.upgrade on client.0" diff --git a/qa/suites/upgrade/mimic-x/parallel/1.1-pg-log-overrides/normal_pg_log.yaml b/qa/suites/upgrade/mimic-x/parallel/1.1-pg-log-overrides/normal_pg_log.yaml new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/parallel/1.1-pg-log-overrides/normal_pg_log.yaml @@ -0,0 +1 @@ + diff --git a/qa/suites/upgrade/mimic-x/parallel/1.1-pg-log-overrides/short_pg_log.yaml b/qa/suites/upgrade/mimic-x/parallel/1.1-pg-log-overrides/short_pg_log.yaml new file mode 100644 index 00000000..e31e37ba --- /dev/null +++ b/qa/suites/upgrade/mimic-x/parallel/1.1-pg-log-overrides/short_pg_log.yaml @@ -0,0 +1,6 @@ +overrides: + ceph: + conf: + osd: + osd min pg log entries: 1 + osd max pg log entries: 2 diff --git a/qa/suites/upgrade/mimic-x/parallel/2-workload/+ b/qa/suites/upgrade/mimic-x/parallel/2-workload/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/upgrade/mimic-x/parallel/2-workload/.qa b/qa/suites/upgrade/mimic-x/parallel/2-workload/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/parallel/2-workload/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade/mimic-x/parallel/2-workload/blogbench.yaml b/qa/suites/upgrade/mimic-x/parallel/2-workload/blogbench.yaml new file mode 100644 index 00000000..021fcc68 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/parallel/2-workload/blogbench.yaml @@ -0,0 +1,14 @@ +meta: +- desc: | + run a cephfs stress test + mount ceph-fuse on client.2 before running workunit +workload: + full_sequential: + - sequential: + - ceph-fuse: + - print: "**** done ceph-fuse 2-workload" + - workunit: + clients: + client.2: + - suites/blogbench.sh + - print: "**** done suites/blogbench.sh 2-workload" diff --git a/qa/suites/upgrade/mimic-x/parallel/2-workload/ec-rados-default.yaml b/qa/suites/upgrade/mimic-x/parallel/2-workload/ec-rados-default.yaml new file mode 100644 index 00000000..5c5a9588 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/parallel/2-workload/ec-rados-default.yaml @@ -0,0 +1,24 @@ +meta: +- desc: | + run run randomized correctness test for rados operations + on an erasure-coded pool +workload: + full_sequential: + - rados: + clients: [client.0] + ops: 4000 + objects: 50 + ec_pool: true + write_append_excl: false + op_weights: + read: 100 + write: 0 + append: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 + setattr: 25 + rmattr: 25 + - print: "**** done rados ec task" diff --git a/qa/suites/upgrade/mimic-x/parallel/2-workload/rados_api.yaml b/qa/suites/upgrade/mimic-x/parallel/2-workload/rados_api.yaml new file mode 100644 index 00000000..fcbdf57d --- /dev/null +++ b/qa/suites/upgrade/mimic-x/parallel/2-workload/rados_api.yaml @@ -0,0 +1,11 @@ +meta: +- desc: | + object class functional tests +workload: + full_sequential: + - workunit: + branch: mimic + clients: + client.0: + - cls + - print: "**** done cls 2-workload" diff --git a/qa/suites/upgrade/mimic-x/parallel/2-workload/rados_loadgenbig.yaml b/qa/suites/upgrade/mimic-x/parallel/2-workload/rados_loadgenbig.yaml new file mode 100644 index 00000000..fe88cc5b --- /dev/null +++ b/qa/suites/upgrade/mimic-x/parallel/2-workload/rados_loadgenbig.yaml @@ -0,0 +1,11 @@ +meta: +- desc: | + generate read/write load with rados objects ranging from 1MB to 25MB +workload: + full_sequential: + - workunit: + branch: mimic + clients: + client.0: + - rados/load-gen-big.sh + - print: "**** done rados/load-gen-big.sh 2-workload" diff --git a/qa/suites/upgrade/mimic-x/parallel/2-workload/rgw_ragweed_prepare.yaml b/qa/suites/upgrade/mimic-x/parallel/2-workload/rgw_ragweed_prepare.yaml new file mode 100644 index 00000000..b95711fe --- /dev/null +++ b/qa/suites/upgrade/mimic-x/parallel/2-workload/rgw_ragweed_prepare.yaml @@ -0,0 +1,14 @@ +meta: +- desc: | + rgw ragweed prepare +workload: + full_sequential: + - sequential: + - rgw: + - client.1 + - ragweed: + client.1: + default-branch: ceph-nautilus + rgw_server: client.1 + stages: prepare + - print: "**** done rgw ragweed prepare 2-workload" diff --git a/qa/suites/upgrade/mimic-x/parallel/2-workload/test_rbd_api.yaml b/qa/suites/upgrade/mimic-x/parallel/2-workload/test_rbd_api.yaml new file mode 100644 index 00000000..b2f6766e --- /dev/null +++ b/qa/suites/upgrade/mimic-x/parallel/2-workload/test_rbd_api.yaml @@ -0,0 +1,11 @@ +meta: +- desc: | + librbd C and C++ api tests +workload: + full_sequential: + - workunit: + branch: mimic + clients: + client.0: + - rbd/test_librbd.sh + - print: "**** done rbd/test_librbd.sh 2-workload" diff --git a/qa/suites/upgrade/mimic-x/parallel/2-workload/test_rbd_python.yaml b/qa/suites/upgrade/mimic-x/parallel/2-workload/test_rbd_python.yaml new file mode 100644 index 00000000..c8b4fca7 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/parallel/2-workload/test_rbd_python.yaml @@ -0,0 +1,11 @@ +meta: +- desc: | + librbd python api tests +workload: + full_sequential: + - workunit: + branch: mimic + clients: + client.0: + - rbd/test_librbd_python.sh + - print: "**** done rbd/test_librbd_python.sh 2-workload" diff --git a/qa/suites/upgrade/mimic-x/parallel/3-upgrade-sequence/.qa b/qa/suites/upgrade/mimic-x/parallel/3-upgrade-sequence/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/parallel/3-upgrade-sequence/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade/mimic-x/parallel/3-upgrade-sequence/upgrade-all.yaml b/qa/suites/upgrade/mimic-x/parallel/3-upgrade-sequence/upgrade-all.yaml new file mode 100644 index 00000000..15a3271f --- /dev/null +++ b/qa/suites/upgrade/mimic-x/parallel/3-upgrade-sequence/upgrade-all.yaml @@ -0,0 +1,22 @@ +meta: +- desc: | + upgrade the ceph cluster +upgrade-sequence: + sequential: + - ceph.restart: + daemons: [mon.a, mon.b, mon.c, mgr.x, mgr.y] + mon-health-to-clog: false + wait-for-healthy: false + - exec: + mon.a: + - ceph config set global mon_warn_on_msgr2_not_enabled false + - ceph.healthy: + - ceph.restart: + daemons: [osd.0, osd.1, osd.2, osd.3, osd.4, osd.5] + wait-for-healthy: false + wait-for-osds-up: true + - ceph.restart: + daemons: [mds.a, mds.b] + wait-for-healthy: false + wait-for-osds-up: true + - print: "**** done ceph.restart all" diff --git a/qa/suites/upgrade/mimic-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml b/qa/suites/upgrade/mimic-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml new file mode 100644 index 00000000..cf8b7164 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/parallel/3-upgrade-sequence/upgrade-mon-osd-mds.yaml @@ -0,0 +1,51 @@ +meta: +- desc: | + upgrade the ceph cluster, + upgrate in two steps + step one ordering: mon.a, osd.0, osd.1, mds.a + step two ordering: mon.b, mon.c, osd.2, osd.3 + ceph expected to be healthy state after each step +upgrade-sequence: + sequential: + - ceph.restart: + daemons: [mon.a] + wait-for-healthy: true + - sleep: + duration: 60 + - ceph.restart: + daemons: [mon.b, mgr.x] + wait-for-healthy: true + mon-health-to-clog: false + - sleep: + duration: 60 + - ceph.restart: + daemons: [mon.c] + wait-for-healthy: false + mon-health-to-clog: false + - exec: + mon.a: + - ceph config set global mon_warn_on_msgr2_not_enabled false + - ceph.healthy: + - sleep: + duration: 60 + - ceph.restart: + daemons: [osd.0, osd.1] + wait-for-healthy: true + - sleep: + duration: 60 + - ceph.restart: [mds.a, mds.b] + - sleep: + duration: 60 + - ceph.restart: + daemons: [osd.4, osd.5] + wait-for-healthy: true + - sleep: + duration: 60 + - sleep: + duration: 60 + - ceph.restart: + daemons: [osd.2, osd.3] + wait-for-healthy: false + wait-for-osds-up: true + - sleep: + duration: 60 diff --git a/qa/suites/upgrade/mimic-x/parallel/4-msgr2.yaml b/qa/suites/upgrade/mimic-x/parallel/4-msgr2.yaml new file mode 100644 index 00000000..60e3e200 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/parallel/4-msgr2.yaml @@ -0,0 +1,5 @@ +tasks: +- exec: + mon.a: + - ceph mon enable-msgr2 + - ceph config rm global mon_warn_on_msgr2_not_enabled diff --git a/qa/suites/upgrade/mimic-x/parallel/4-nautilus.yaml b/qa/suites/upgrade/mimic-x/parallel/4-nautilus.yaml new file mode 120000 index 00000000..9e99b7d2 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/parallel/4-nautilus.yaml @@ -0,0 +1 @@ +.qa/releases/nautilus.yaml \ No newline at end of file diff --git a/qa/suites/upgrade/mimic-x/parallel/5-final-workload/+ b/qa/suites/upgrade/mimic-x/parallel/5-final-workload/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/upgrade/mimic-x/parallel/5-final-workload/.qa b/qa/suites/upgrade/mimic-x/parallel/5-final-workload/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/parallel/5-final-workload/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade/mimic-x/parallel/5-final-workload/blogbench.yaml b/qa/suites/upgrade/mimic-x/parallel/5-final-workload/blogbench.yaml new file mode 100644 index 00000000..d2629c03 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/parallel/5-final-workload/blogbench.yaml @@ -0,0 +1,13 @@ +meta: +- desc: | + run a cephfs stress test + mount ceph-fuse on client.3 before running workunit +tasks: +- sequential: + - ceph-fuse: + - print: "**** done ceph-fuse 5-final-workload" + - workunit: + clients: + client.3: + - suites/blogbench.sh + - print: "**** done suites/blogbench.sh 5-final-workload" diff --git a/qa/suites/upgrade/mimic-x/parallel/5-final-workload/rados-snaps-few-objects.yaml b/qa/suites/upgrade/mimic-x/parallel/5-final-workload/rados-snaps-few-objects.yaml new file mode 100644 index 00000000..d8b3dcb3 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/parallel/5-final-workload/rados-snaps-few-objects.yaml @@ -0,0 +1,17 @@ +meta: +- desc: | + randomized correctness test for rados operations on a replicated pool with snapshots +tasks: + - rados: + clients: [client.1] + ops: 4000 + objects: 50 + write_append_excl: false + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + - print: "**** done rados 4-final-workload" diff --git a/qa/suites/upgrade/mimic-x/parallel/5-final-workload/rados_loadgenmix.yaml b/qa/suites/upgrade/mimic-x/parallel/5-final-workload/rados_loadgenmix.yaml new file mode 100644 index 00000000..922a9da4 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/parallel/5-final-workload/rados_loadgenmix.yaml @@ -0,0 +1,9 @@ +meta: +- desc: | + generate read/write load with rados objects ranging from 1 byte to 1MB +tasks: + - workunit: + clients: + client.1: + - rados/load-gen-mix.sh + - print: "**** done rados/load-gen-mix.sh 4-final-workload" diff --git a/qa/suites/upgrade/mimic-x/parallel/5-final-workload/rados_mon_thrash.yaml b/qa/suites/upgrade/mimic-x/parallel/5-final-workload/rados_mon_thrash.yaml new file mode 100644 index 00000000..129d1386 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/parallel/5-final-workload/rados_mon_thrash.yaml @@ -0,0 +1,18 @@ +meta: +- desc: | + librados C and C++ api tests +overrides: + ceph: + log-whitelist: + - reached quota + - \(REQUEST_SLOW\) +tasks: + - mon_thrash: + revive_delay: 20 + thrash_delay: 1 + - print: "**** done mon_thrash 4-final-workload" + - workunit: + clients: + client.1: + - rados/test.sh + - print: "**** done rados/test.sh 4-final-workload" diff --git a/qa/suites/upgrade/mimic-x/parallel/5-final-workload/rbd_cls.yaml b/qa/suites/upgrade/mimic-x/parallel/5-final-workload/rbd_cls.yaml new file mode 100644 index 00000000..aaf0a377 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/parallel/5-final-workload/rbd_cls.yaml @@ -0,0 +1,9 @@ +meta: +- desc: | + rbd object class functional tests +tasks: + - workunit: + clients: + client.1: + - cls/test_cls_rbd.sh + - print: "**** done cls/test_cls_rbd.sh 4-final-workload" diff --git a/qa/suites/upgrade/mimic-x/parallel/5-final-workload/rbd_import_export.yaml b/qa/suites/upgrade/mimic-x/parallel/5-final-workload/rbd_import_export.yaml new file mode 100644 index 00000000..46e13550 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/parallel/5-final-workload/rbd_import_export.yaml @@ -0,0 +1,11 @@ +meta: +- desc: | + run basic import/export cli tests for rbd +tasks: + - workunit: + clients: + client.1: + - rbd/import_export.sh + env: + RBD_CREATE_ARGS: --new-format + - print: "**** done rbd/import_export.sh 4-final-workload" diff --git a/qa/suites/upgrade/mimic-x/parallel/5-final-workload/rgw.yaml b/qa/suites/upgrade/mimic-x/parallel/5-final-workload/rgw.yaml new file mode 100644 index 00000000..00855dc5 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/parallel/5-final-workload/rgw.yaml @@ -0,0 +1,8 @@ +overrides: + rgw: + frontend: civetweb +tasks: + - sequential: + - rgw: [client.1] + - print: "**** done rgw 4-final-workload" + - rgw-final-workload diff --git a/qa/suites/upgrade/mimic-x/parallel/5-final-workload/rgw_ragweed_check.yaml b/qa/suites/upgrade/mimic-x/parallel/5-final-workload/rgw_ragweed_check.yaml new file mode 100644 index 00000000..c91d91f6 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/parallel/5-final-workload/rgw_ragweed_check.yaml @@ -0,0 +1,11 @@ +meta: +- desc: | + ragweed check for rgw +rgw-final-workload: + full_sequential: + - ragweed: + client.1: + default-branch: ceph-nautilus + rgw_server: client.1 + stages: check + - print: "**** done ragweed check 4-final-workload" diff --git a/qa/suites/upgrade/mimic-x/parallel/5-final-workload/rgw_swift.yaml b/qa/suites/upgrade/mimic-x/parallel/5-final-workload/rgw_swift.yaml new file mode 100644 index 00000000..e91ccc82 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/parallel/5-final-workload/rgw_swift.yaml @@ -0,0 +1,10 @@ +meta: +- desc: | + swift api tests for rgw +rgw-final-workload: + full_sequential: + - swift: + client.1: + force-branch: ceph-nautilus + rgw_server: client.1 + - print: "**** done swift 4-final-workload" diff --git a/qa/suites/upgrade/mimic-x/parallel/objectstore b/qa/suites/upgrade/mimic-x/parallel/objectstore new file mode 120000 index 00000000..016cbf96 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/parallel/objectstore @@ -0,0 +1 @@ +../stress-split/objectstore/ \ No newline at end of file diff --git a/qa/suites/upgrade/mimic-x/parallel/supported-all-distro b/qa/suites/upgrade/mimic-x/parallel/supported-all-distro new file mode 120000 index 00000000..44d01d18 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/parallel/supported-all-distro @@ -0,0 +1 @@ +../../../../../qa/distros/supported-all-distro/ \ No newline at end of file diff --git a/qa/suites/upgrade/mimic-x/stress-split-erasure-code/% b/qa/suites/upgrade/mimic-x/stress-split-erasure-code/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/upgrade/mimic-x/stress-split-erasure-code/.qa b/qa/suites/upgrade/mimic-x/stress-split-erasure-code/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/stress-split-erasure-code/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade/mimic-x/stress-split-erasure-code/0-cluster b/qa/suites/upgrade/mimic-x/stress-split-erasure-code/0-cluster new file mode 120000 index 00000000..35809372 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/stress-split-erasure-code/0-cluster @@ -0,0 +1 @@ +../stress-split/0-cluster/ \ No newline at end of file diff --git a/qa/suites/upgrade/mimic-x/stress-split-erasure-code/1-luminous-install b/qa/suites/upgrade/mimic-x/stress-split-erasure-code/1-luminous-install new file mode 120000 index 00000000..0479ac54 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/stress-split-erasure-code/1-luminous-install @@ -0,0 +1 @@ +../stress-split/1-ceph-install/ \ No newline at end of file diff --git a/qa/suites/upgrade/mimic-x/stress-split-erasure-code/1.1-pg-log-overrides/normal_pg_log.yaml b/qa/suites/upgrade/mimic-x/stress-split-erasure-code/1.1-pg-log-overrides/normal_pg_log.yaml new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/stress-split-erasure-code/1.1-pg-log-overrides/normal_pg_log.yaml @@ -0,0 +1 @@ + diff --git a/qa/suites/upgrade/mimic-x/stress-split-erasure-code/1.1-pg-log-overrides/short_pg_log.yaml b/qa/suites/upgrade/mimic-x/stress-split-erasure-code/1.1-pg-log-overrides/short_pg_log.yaml new file mode 100644 index 00000000..e31e37ba --- /dev/null +++ b/qa/suites/upgrade/mimic-x/stress-split-erasure-code/1.1-pg-log-overrides/short_pg_log.yaml @@ -0,0 +1,6 @@ +overrides: + ceph: + conf: + osd: + osd min pg log entries: 1 + osd max pg log entries: 2 diff --git a/qa/suites/upgrade/mimic-x/stress-split-erasure-code/2-partial-upgrade b/qa/suites/upgrade/mimic-x/stress-split-erasure-code/2-partial-upgrade new file mode 120000 index 00000000..ab35fc1a --- /dev/null +++ b/qa/suites/upgrade/mimic-x/stress-split-erasure-code/2-partial-upgrade @@ -0,0 +1 @@ +../stress-split/2-partial-upgrade/ \ No newline at end of file diff --git a/qa/suites/upgrade/mimic-x/stress-split-erasure-code/3-thrash/.qa b/qa/suites/upgrade/mimic-x/stress-split-erasure-code/3-thrash/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/stress-split-erasure-code/3-thrash/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade/mimic-x/stress-split-erasure-code/3-thrash/default.yaml b/qa/suites/upgrade/mimic-x/stress-split-erasure-code/3-thrash/default.yaml new file mode 100644 index 00000000..b12d76e9 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/stress-split-erasure-code/3-thrash/default.yaml @@ -0,0 +1,26 @@ +meta: +- desc: | + randomly kill and revive osd + small chance to increase the number of pgs +overrides: + ceph: + log-whitelist: + - but it is still running + - wrongly marked me down + - objects unfound and apparently lost + - log bound mismatch +tasks: +- parallel: + - stress-tasks +stress-tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgpnum_fix: 1 + min_in: 4 + chance_thrash_cluster_full: 0 + chance_thrash_pg_upmap: 0 + chance_thrash_pg_upmap_items: 0 + chance_force_recovery: 0 + aggressive_pg_num_changes: false +- print: "**** done thrashosds 3-thrash" diff --git a/qa/suites/upgrade/mimic-x/stress-split-erasure-code/4-ec-workload.yaml b/qa/suites/upgrade/mimic-x/stress-split-erasure-code/4-ec-workload.yaml new file mode 100644 index 00000000..c89551e6 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/stress-split-erasure-code/4-ec-workload.yaml @@ -0,0 +1,22 @@ +meta: +- desc: | + randomized correctness test for rados operations on an erasure coded pool +stress-tasks: + - rados: + clients: [client.0] + ops: 4000 + objects: 50 + ec_pool: true + write_append_excl: false + op_weights: + read: 100 + write: 0 + append: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 + setattr: 25 + rmattr: 25 + - print: "**** done rados ec task" diff --git a/qa/suites/upgrade/mimic-x/stress-split-erasure-code/5-finish-upgrade.yaml b/qa/suites/upgrade/mimic-x/stress-split-erasure-code/5-finish-upgrade.yaml new file mode 120000 index 00000000..a66a7dc1 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/stress-split-erasure-code/5-finish-upgrade.yaml @@ -0,0 +1 @@ +../stress-split/5-finish-upgrade.yaml \ No newline at end of file diff --git a/qa/suites/upgrade/mimic-x/stress-split-erasure-code/7-final-workload.yaml b/qa/suites/upgrade/mimic-x/stress-split-erasure-code/7-final-workload.yaml new file mode 100644 index 00000000..50a14650 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/stress-split-erasure-code/7-final-workload.yaml @@ -0,0 +1,35 @@ +# +# k=3 implies a stripe_width of 1376*3 = 4128 which is different from +# the default value of 4096 It is also not a multiple of 1024*1024 and +# creates situations where rounding rules during recovery becomes +# necessary. +# +meta: +- desc: | + randomized correctness test for rados operations on an erasure coded pool + using the jerasure plugin with k=3 and m=1 +tasks: +- rados: + clients: [client.0] + ops: 4000 + objects: 50 + ec_pool: true + write_append_excl: false + erasure_code_profile: + name: jerasure31profile + plugin: jerasure + k: 3 + m: 1 + technique: reed_sol_van + crush-failure-domain: osd + op_weights: + read: 100 + write: 0 + append: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 + copy_from: 50 + setattr: 25 + rmattr: 25 diff --git a/qa/suites/upgrade/mimic-x/stress-split-erasure-code/objectstore b/qa/suites/upgrade/mimic-x/stress-split-erasure-code/objectstore new file mode 120000 index 00000000..016cbf96 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/stress-split-erasure-code/objectstore @@ -0,0 +1 @@ +../stress-split/objectstore/ \ No newline at end of file diff --git a/qa/suites/upgrade/mimic-x/stress-split-erasure-code/supported-all-distro b/qa/suites/upgrade/mimic-x/stress-split-erasure-code/supported-all-distro new file mode 120000 index 00000000..44d01d18 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/stress-split-erasure-code/supported-all-distro @@ -0,0 +1 @@ +../../../../../qa/distros/supported-all-distro/ \ No newline at end of file diff --git a/qa/suites/upgrade/mimic-x/stress-split-erasure-code/thrashosds-health.yaml b/qa/suites/upgrade/mimic-x/stress-split-erasure-code/thrashosds-health.yaml new file mode 120000 index 00000000..a18eda11 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/stress-split-erasure-code/thrashosds-health.yaml @@ -0,0 +1 @@ +../../../../../qa/tasks/thrashosds-health.yaml \ No newline at end of file diff --git a/qa/suites/upgrade/mimic-x/stress-split/% b/qa/suites/upgrade/mimic-x/stress-split/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/upgrade/mimic-x/stress-split/.qa b/qa/suites/upgrade/mimic-x/stress-split/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/stress-split/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade/mimic-x/stress-split/0-cluster/+ b/qa/suites/upgrade/mimic-x/stress-split/0-cluster/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/upgrade/mimic-x/stress-split/0-cluster/.qa b/qa/suites/upgrade/mimic-x/stress-split/0-cluster/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/stress-split/0-cluster/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade/mimic-x/stress-split/0-cluster/openstack.yaml b/qa/suites/upgrade/mimic-x/stress-split/0-cluster/openstack.yaml new file mode 100644 index 00000000..5caffc35 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/stress-split/0-cluster/openstack.yaml @@ -0,0 +1,6 @@ +openstack: + - machine: + disk: 100 # GB + - volumes: # attached to each instance + count: 4 + size: 30 # GB diff --git a/qa/suites/upgrade/mimic-x/stress-split/0-cluster/start.yaml b/qa/suites/upgrade/mimic-x/stress-split/0-cluster/start.yaml new file mode 100644 index 00000000..433f3534 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/stress-split/0-cluster/start.yaml @@ -0,0 +1,42 @@ +meta: +- desc: | + Run ceph on two nodes, + with a separate client-only node. + Use xfs beneath the osds. + #Note-To enable RHEL runs on ovh nodes, add the following to overrides + #ansible.cephlab: + # skip_tags: entitlements,packages,repos +overrides: + ceph: + mon_bind_msgr2: false + mon_bind_addrvec: false + fs: xfs + log-whitelist: + - overall HEALTH_ + - \(MON_DOWN\) + - \(MGR_DOWN\) + - \(MON_MSGR2_NOT_ENABLED\) + conf: + global: + enable experimental unrecoverable data corrupting features: "*" + mon warn on msgr2 not enabled: false + mon: + mon warn on osd down out interval zero: false +roles: +- - mon.a + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - osd.3 +- - mon.b + - osd.4 + - osd.5 + - osd.6 + - osd.7 +- - mon.c +- - osd.8 + - osd.9 + - osd.10 + - osd.11 +- - client.0 diff --git a/qa/suites/upgrade/mimic-x/stress-split/1-ceph-install/.qa b/qa/suites/upgrade/mimic-x/stress-split/1-ceph-install/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/stress-split/1-ceph-install/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade/mimic-x/stress-split/1-ceph-install/mimic.yaml b/qa/suites/upgrade/mimic-x/stress-split/1-ceph-install/mimic.yaml new file mode 100644 index 00000000..ae2fa2f5 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/stress-split/1-ceph-install/mimic.yaml @@ -0,0 +1,29 @@ +meta: +- desc: install ceph/mimic latest +tasks: +- install: + branch: mimic + exclude_packages: + - librados3 + - ceph-mgr-dashboard + - ceph-mgr-diskprediction-local + - ceph-mgr-diskprediction-cloud + - ceph-mgr-rook + - ceph-mgr-ssh + extra_packages: ['librados2'] +- print: "**** done install mimic" +- ceph: + conf: + global: + bluestore_warn_on_legacy_statfs: false + mon pg warn min per osd: 0 +- exec: + osd.0: + - ceph osd require-osd-release mimic + - ceph osd set-require-min-compat-client mimic +- print: "**** done ceph" +overrides: + ceph: + conf: + mon: + mon warn on osd down out interval zero: false diff --git a/qa/suites/upgrade/mimic-x/stress-split/1.1-pg-log-overrides/normal_pg_log.yaml b/qa/suites/upgrade/mimic-x/stress-split/1.1-pg-log-overrides/normal_pg_log.yaml new file mode 100644 index 00000000..8b137891 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/stress-split/1.1-pg-log-overrides/normal_pg_log.yaml @@ -0,0 +1 @@ + diff --git a/qa/suites/upgrade/mimic-x/stress-split/1.1-pg-log-overrides/short_pg_log.yaml b/qa/suites/upgrade/mimic-x/stress-split/1.1-pg-log-overrides/short_pg_log.yaml new file mode 100644 index 00000000..e31e37ba --- /dev/null +++ b/qa/suites/upgrade/mimic-x/stress-split/1.1-pg-log-overrides/short_pg_log.yaml @@ -0,0 +1,6 @@ +overrides: + ceph: + conf: + osd: + osd min pg log entries: 1 + osd max pg log entries: 2 diff --git a/qa/suites/upgrade/mimic-x/stress-split/2-partial-upgrade/.qa b/qa/suites/upgrade/mimic-x/stress-split/2-partial-upgrade/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/stress-split/2-partial-upgrade/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade/mimic-x/stress-split/2-partial-upgrade/firsthalf.yaml b/qa/suites/upgrade/mimic-x/stress-split/2-partial-upgrade/firsthalf.yaml new file mode 100644 index 00000000..c88a297a --- /dev/null +++ b/qa/suites/upgrade/mimic-x/stress-split/2-partial-upgrade/firsthalf.yaml @@ -0,0 +1,14 @@ +meta: +- desc: | + install upgrade ceph/-x on 2/3 of cluster + restart : mons, osd 0-7 +tasks: +- install.upgrade: + mon.a: + mon.b: + mon.c: +- print: "**** done install.upgrade first 3 nodes" +- ceph.restart: + daemons: [mon.a,mon.b,mon.c,mgr.x,osd.0,osd.1,osd.2,osd.3,osd.4,osd.5,osd.6,osd.7] + mon-health-to-clog: false +- print: "**** done ceph.restart all mons and 2/3 of osds" diff --git a/qa/suites/upgrade/mimic-x/stress-split/3-thrash/.qa b/qa/suites/upgrade/mimic-x/stress-split/3-thrash/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/stress-split/3-thrash/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade/mimic-x/stress-split/3-thrash/default.yaml b/qa/suites/upgrade/mimic-x/stress-split/3-thrash/default.yaml new file mode 100644 index 00000000..e0f317b0 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/stress-split/3-thrash/default.yaml @@ -0,0 +1,26 @@ +meta: +- desc: | + randomly kill and revive osd + small chance to increase the number of pgs +overrides: + ceph: + log-whitelist: + - but it is still running + - wrongly marked me down + - objects unfound and apparently lost + - log bound mismatch +tasks: +- parallel: + - stress-tasks +stress-tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgpnum_fix: 1 + chance_thrash_cluster_full: 0 + chance_thrash_pg_upmap: 0 + chance_thrash_pg_upmap_items: 0 + disable_objectstore_tool_tests: true + chance_force_recovery: 0 + aggressive_pg_num_changes: false +- print: "**** done thrashosds 3-thrash" diff --git a/qa/suites/upgrade/mimic-x/stress-split/4-workload/+ b/qa/suites/upgrade/mimic-x/stress-split/4-workload/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/upgrade/mimic-x/stress-split/4-workload/.qa b/qa/suites/upgrade/mimic-x/stress-split/4-workload/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/stress-split/4-workload/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade/mimic-x/stress-split/4-workload/radosbench.yaml b/qa/suites/upgrade/mimic-x/stress-split/4-workload/radosbench.yaml new file mode 100644 index 00000000..115939e6 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/stress-split/4-workload/radosbench.yaml @@ -0,0 +1,52 @@ +meta: +- desc: | + run randomized correctness test for rados operations + generate write load with rados bench +stress-tasks: +- full_sequential: + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 +- print: "**** done radosbench 7-workload" diff --git a/qa/suites/upgrade/mimic-x/stress-split/4-workload/rbd-cls.yaml b/qa/suites/upgrade/mimic-x/stress-split/4-workload/rbd-cls.yaml new file mode 100644 index 00000000..52ccaec9 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/stress-split/4-workload/rbd-cls.yaml @@ -0,0 +1,10 @@ +meta: +- desc: | + run basic cls tests for rbd +stress-tasks: +- workunit: + branch: mimic + clients: + client.0: + - cls/test_cls_rbd.sh +- print: "**** done cls/test_cls_rbd.sh 5-workload" diff --git a/qa/suites/upgrade/mimic-x/stress-split/4-workload/rbd-import-export.yaml b/qa/suites/upgrade/mimic-x/stress-split/4-workload/rbd-import-export.yaml new file mode 100644 index 00000000..1761f4f9 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/stress-split/4-workload/rbd-import-export.yaml @@ -0,0 +1,12 @@ +meta: +- desc: | + run basic import/export cli tests for rbd +stress-tasks: +- workunit: + branch: mimic + clients: + client.0: + - rbd/import_export.sh + env: + RBD_CREATE_ARGS: --new-format +- print: "**** done rbd/import_export.sh 5-workload" diff --git a/qa/suites/upgrade/mimic-x/stress-split/4-workload/rbd_api.yaml b/qa/suites/upgrade/mimic-x/stress-split/4-workload/rbd_api.yaml new file mode 100644 index 00000000..8f010cb2 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/stress-split/4-workload/rbd_api.yaml @@ -0,0 +1,10 @@ +meta: +- desc: | + librbd C and C++ api tests +stress-tasks: +- workunit: + branch: mimic + clients: + client.0: + - rbd/test_librbd.sh +- print: "**** done rbd/test_librbd.sh 7-workload" diff --git a/qa/suites/upgrade/mimic-x/stress-split/4-workload/readwrite.yaml b/qa/suites/upgrade/mimic-x/stress-split/4-workload/readwrite.yaml new file mode 100644 index 00000000..41e34d6d --- /dev/null +++ b/qa/suites/upgrade/mimic-x/stress-split/4-workload/readwrite.yaml @@ -0,0 +1,16 @@ +meta: +- desc: | + randomized correctness test for rados operations on a replicated pool, + using only reads, writes, and deletes +stress-tasks: +- full_sequential: + - rados: + clients: [client.0] + ops: 4000 + objects: 500 + write_append_excl: false + op_weights: + read: 45 + write: 45 + delete: 10 +- print: "**** done rados/readwrite 5-workload" diff --git a/qa/suites/upgrade/mimic-x/stress-split/4-workload/rgw_ragweed_prepare.yaml b/qa/suites/upgrade/mimic-x/stress-split/4-workload/rgw_ragweed_prepare.yaml new file mode 100644 index 00000000..b1f06bcc --- /dev/null +++ b/qa/suites/upgrade/mimic-x/stress-split/4-workload/rgw_ragweed_prepare.yaml @@ -0,0 +1,14 @@ +meta: +- desc: | + rgw ragweed prepare before upgrade +stress-tasks: + - full_sequential: + - sequential: + - rgw: + - client.0 + - ragweed: + client.0: + default-branch: ceph-nautilus + rgw_server: client.0 + stages: prepare + - print: "**** done rgw ragweed prepare 4-workload" diff --git a/qa/suites/upgrade/mimic-x/stress-split/4-workload/snaps-few-objects.yaml b/qa/suites/upgrade/mimic-x/stress-split/4-workload/snaps-few-objects.yaml new file mode 100644 index 00000000..f56d0de0 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/stress-split/4-workload/snaps-few-objects.yaml @@ -0,0 +1,18 @@ +meta: +- desc: | + randomized correctness test for rados operations on a replicated pool with snapshot operations +stress-tasks: +- full_sequential: + - rados: + clients: [client.0] + ops: 4000 + objects: 50 + write_append_excl: false + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 +- print: "**** done rados/snaps-few-objects 5-workload" diff --git a/qa/suites/upgrade/mimic-x/stress-split/5-finish-upgrade.yaml b/qa/suites/upgrade/mimic-x/stress-split/5-finish-upgrade.yaml new file mode 100644 index 00000000..a76cda76 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/stress-split/5-finish-upgrade.yaml @@ -0,0 +1,14 @@ +tasks: +- install.upgrade: + osd.8: + client.0: +- ceph.restart: + daemons: [osd.8, osd.9, osd.10, osd.11] + wait-for-healthy: false + wait-for-osds-up: true +- exec: + osd.0: + - ceph osd set pglog_hardlimit + - ceph osd dump --format=json-pretty | grep "flags" +- print: "**** try to set pglog_hardlimit again, should succeed" + diff --git a/qa/suites/upgrade/mimic-x/stress-split/6-nautilus.yaml b/qa/suites/upgrade/mimic-x/stress-split/6-nautilus.yaml new file mode 120000 index 00000000..9e99b7d2 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/stress-split/6-nautilus.yaml @@ -0,0 +1 @@ +.qa/releases/nautilus.yaml \ No newline at end of file diff --git a/qa/suites/upgrade/mimic-x/stress-split/6.1-msgr2.yaml b/qa/suites/upgrade/mimic-x/stress-split/6.1-msgr2.yaml new file mode 100644 index 00000000..ad3858e9 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/stress-split/6.1-msgr2.yaml @@ -0,0 +1,5 @@ +tasks: +- exec: + mon.a: + - ceph mon enable-msgr2 +- ceph.healthy: diff --git a/qa/suites/upgrade/mimic-x/stress-split/7-final-workload/+ b/qa/suites/upgrade/mimic-x/stress-split/7-final-workload/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/upgrade/mimic-x/stress-split/7-final-workload/.qa b/qa/suites/upgrade/mimic-x/stress-split/7-final-workload/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/stress-split/7-final-workload/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade/mimic-x/stress-split/7-final-workload/rbd-python.yaml b/qa/suites/upgrade/mimic-x/stress-split/7-final-workload/rbd-python.yaml new file mode 100644 index 00000000..a1a3d9e9 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/stress-split/7-final-workload/rbd-python.yaml @@ -0,0 +1,10 @@ +meta: +- desc: | + librbd python api tests +tasks: +- workunit: + branch: mimic + clients: + client.0: + - rbd/test_librbd_python.sh +- print: "**** done rbd/test_librbd_python.sh 9-workload" diff --git a/qa/suites/upgrade/mimic-x/stress-split/7-final-workload/rgw-swift-ragweed_check.yaml b/qa/suites/upgrade/mimic-x/stress-split/7-final-workload/rgw-swift-ragweed_check.yaml new file mode 100644 index 00000000..8867a409 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/stress-split/7-final-workload/rgw-swift-ragweed_check.yaml @@ -0,0 +1,19 @@ +meta: +- desc: | + swift api tests for rgw + rgw ragweed check after upgrade +tasks: +- rgw: + client.0: +- print: "**** done rgw 7-workload" +- swift: + client.0: + force-branch: ceph-nautilus + rgw_server: client.0 +- print: "**** done swift 7-workload" +- ragweed: + client.0: + default-branch: ceph-nautilus + rgw_server: client.0 + stages: check +- print: "**** done rgw ragweed check 7-workload" diff --git a/qa/suites/upgrade/mimic-x/stress-split/7-final-workload/snaps-many-objects.yaml b/qa/suites/upgrade/mimic-x/stress-split/7-final-workload/snaps-many-objects.yaml new file mode 100644 index 00000000..805bf97c --- /dev/null +++ b/qa/suites/upgrade/mimic-x/stress-split/7-final-workload/snaps-many-objects.yaml @@ -0,0 +1,16 @@ +meta: +- desc: | + randomized correctness test for rados operations on a replicated pool with snapshot operations +tasks: +- rados: + clients: [client.0] + ops: 4000 + objects: 500 + write_append_excl: false + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 diff --git a/qa/suites/upgrade/mimic-x/stress-split/objectstore/.qa b/qa/suites/upgrade/mimic-x/stress-split/objectstore/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/stress-split/objectstore/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade/mimic-x/stress-split/objectstore/bluestore-bitmap.yaml b/qa/suites/upgrade/mimic-x/stress-split/objectstore/bluestore-bitmap.yaml new file mode 120000 index 00000000..a59cf517 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/stress-split/objectstore/bluestore-bitmap.yaml @@ -0,0 +1 @@ +.qa/objectstore/bluestore-bitmap.yaml \ No newline at end of file diff --git a/qa/suites/upgrade/mimic-x/stress-split/objectstore/filestore-xfs.yaml b/qa/suites/upgrade/mimic-x/stress-split/objectstore/filestore-xfs.yaml new file mode 120000 index 00000000..03750e5a --- /dev/null +++ b/qa/suites/upgrade/mimic-x/stress-split/objectstore/filestore-xfs.yaml @@ -0,0 +1 @@ +../../../../../objectstore/filestore-xfs.yaml \ No newline at end of file diff --git a/qa/suites/upgrade/mimic-x/stress-split/supported-all-distro b/qa/suites/upgrade/mimic-x/stress-split/supported-all-distro new file mode 120000 index 00000000..44d01d18 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/stress-split/supported-all-distro @@ -0,0 +1 @@ +../../../../../qa/distros/supported-all-distro/ \ No newline at end of file diff --git a/qa/suites/upgrade/mimic-x/stress-split/thrashosds-health.yaml b/qa/suites/upgrade/mimic-x/stress-split/thrashosds-health.yaml new file mode 120000 index 00000000..a18eda11 --- /dev/null +++ b/qa/suites/upgrade/mimic-x/stress-split/thrashosds-health.yaml @@ -0,0 +1 @@ +../../../../../qa/tasks/thrashosds-health.yaml \ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-p2p/.qa b/qa/suites/upgrade/nautilus-p2p/.qa new file mode 120000 index 00000000..fea2489f --- /dev/null +++ b/qa/suites/upgrade/nautilus-p2p/.qa @@ -0,0 +1 @@ +../.qa \ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-parallel/% b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-parallel/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-parallel/point-to-point-upgrade.yaml b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-parallel/point-to-point-upgrade.yaml new file mode 100644 index 00000000..b25b5176 --- /dev/null +++ b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-parallel/point-to-point-upgrade.yaml @@ -0,0 +1,182 @@ +meta: +- desc: | + Run ceph on two nodes, using one of them as a client, + with a separate client-only node. + Use xfs beneath the osds. + install ceph/nautilus v14.2.2 point version + run workload and upgrade-sequence in parallel + (every point reslease should be tested) + run workload and upgrade-sequence in parallel + install ceph/nautilus latest version + run workload and upgrade-sequence in parallel +overrides: + ceph: + log-whitelist: + - evicting unresponsive client + - reached quota + - scrub + - osd_map_max_advance + - wrongly marked + - FS_DEGRADED + - POOL_APP_NOT_ENABLED + - CACHE_POOL_NO_HIT_SET + - POOL_FULL + - SMALLER_PG + - pool\(s\) full + - OSD_DOWN + - missing hit_sets + - CACHE_POOL_NEAR_FULL + - PG_AVAILABILITY + - PG_DEGRADED + - application not enabled + - cache pools at or near target size + - filesystem is degraded + - OBJECT_MISPLACED + ### ref: https://tracker.ceph.com/issues/40251 + #removed see ^ - failed to encode map + + fs: xfs + + conf: + global: + mon_warn_on_pool_no_app: false + mon pg warn min per osd: 0 + mon: + mon debug unsafe allow tier with nonempty snaps: true + osd: + osd map max advance: 1000 + osd_class_default_list: "*" + osd_class_load_list: "*" + client: + rgw_crypt_require_ssl: false + rgw crypt s3 kms encryption keys: testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo= +roles: +- - mon.a + - mds.a + - osd.0 + - osd.1 + - osd.2 + - mgr.x +- - mon.b + - mon.c + - osd.3 + - osd.4 + - osd.5 + - client.0 +- - client.1 +openstack: +- volumes: # attached to each instance + count: 3 + size: 30 # GB +tasks: +# v14.2.0 removed per http://tracker.ceph.com/issues/40251 +- print: "**** done nautilus v14.2.2 about to install" +- install: + tag: v14.2.2 + # line below can be removed its from jewel test + #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev', 'librgw2'] +- print: "**** done v14.2.2 install" +- ceph: + fs: xfs + add_osds_to_crush: true +- print: "**** done ceph xfs" +- sequential: + - workload +- print: "**** done workload v14.2.2" + + +# v14.2.1 removed per http://tracker.ceph.com/issues/40251 +# v14.2.2 + +####### upgrade to v14.2.?? PLACEHOLDER +#- install.upgrade: +# #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev'] +# mon.a: +# tag: v14.2.?? +# mon.b: +# tag: v14.2.?? +# # Note that client.a IS NOT upgraded at this point +#- parallel: +# - workload_nautilus +# - upgrade-sequence_nautilus +#- print: "**** done parallel nautilus v14.2.??" + +#### upgrade to latest nautilus +- install.upgrade: + mon.a: + mon.b: +- parallel: + - workload_nautilus + - upgrade-sequence_nautilus +- print: "**** done parallel nautilus branch" + +####################### +workload: + sequential: + - workunit: + clients: + client.0: + - suites/blogbench.sh +workload_nautilus: + full_sequential: + - workunit: + branch: nautilus + #tag: v14.2.0 + clients: + client.1: + - rados/test.sh + - cls + env: + CLS_RBD_GTEST_FILTER: '*:-TestClsRbd.snapshots_namespaces' + - print: "**** done rados/test.sh & cls workload_nautilus" + - sequential: + - rgw: [client.0] + - print: "**** done rgw workload_nautilus" + - s3tests: + client.0: + force-branch: ceph-nautilus + rgw_server: client.0 + scan_for_encryption_keys: false + - print: "**** done s3tests workload_nautilus" + - rbd_fsx: + clients: [client.0] + size: 134217728 + - print: "**** done rbd_fsx workload_nautilus" + +upgrade-sequence_nautilus: + sequential: + - print: "**** done branch: nautilus install.upgrade" + - ceph.restart: [mds.a] + - sleep: + duration: 60 + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] + - sleep: + duration: 60 + - ceph.restart: [mgr.x] + - sleep: + duration: 60 + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - print: "**** done ceph.restart all nautilus branch mds/osd/mon" diff --git a/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-parallel/supported-all-distro b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-parallel/supported-all-distro new file mode 120000 index 00000000..0f102120 --- /dev/null +++ b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-parallel/supported-all-distro @@ -0,0 +1 @@ +../../../../distros/supported-all-distro/ \ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/% b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/% new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/.qa b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/0-cluster/+ b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/0-cluster/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/0-cluster/.qa b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/0-cluster/.qa new file mode 120000 index 00000000..fea2489f --- /dev/null +++ b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/0-cluster/.qa @@ -0,0 +1 @@ +../.qa \ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/0-cluster/openstack.yaml b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/0-cluster/openstack.yaml new file mode 100644 index 00000000..5caffc35 --- /dev/null +++ b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/0-cluster/openstack.yaml @@ -0,0 +1,6 @@ +openstack: + - machine: + disk: 100 # GB + - volumes: # attached to each instance + count: 4 + size: 30 # GB diff --git a/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/0-cluster/start.yaml b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/0-cluster/start.yaml new file mode 100644 index 00000000..7cec8127 --- /dev/null +++ b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/0-cluster/start.yaml @@ -0,0 +1,35 @@ +meta: +- desc: | + Run ceph on two nodes, + with a separate client-only node. + Use xfs beneath the osds. +overrides: + ceph: + fs: xfs + log-whitelist: + - evicting unresponsive client + - overall HEALTH_ + - \(MON_DOWN\) + - \(MGR_DOWN\) + ### ref: https://tracker.ceph.com/issues/40251 + #removed see ^ - failed to encode map + conf: + global: + enable experimental unrecoverable data corrupting features: "*" + mon pg warn min per osd: 0 + mon: + mon warn on osd down out interval zero: false +roles: +- - mon.a + - mon.b + - mon.c + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - osd.3 +- - osd.4 + - osd.5 + - osd.6 + - osd.7 +- - client.0 diff --git a/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/1-ceph-install/.qa b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/1-ceph-install/.qa new file mode 120000 index 00000000..fea2489f --- /dev/null +++ b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/1-ceph-install/.qa @@ -0,0 +1 @@ +../.qa \ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/1-ceph-install/nautilus.yaml b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/1-ceph-install/nautilus.yaml new file mode 100644 index 00000000..75dc71fe --- /dev/null +++ b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/1-ceph-install/nautilus.yaml @@ -0,0 +1,19 @@ +meta: +- desc: install ceph/nautilus v14.2.2 +tasks: +- install: + tag: v14.2.2 + exclude_packages: ['librados3'] + extra_packages: ['librados2'] +- print: "**** done install nautilus v14.2.2" +- ceph: +- exec: + osd.0: + - ceph osd require-osd-release nautilus + - ceph osd set-require-min-compat-client nautilus +- print: "**** done ceph" +overrides: + ceph: + conf: + mon: + mon warn on osd down out interval zero: false diff --git a/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/1.1.short_pg_log.yaml b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/1.1.short_pg_log.yaml new file mode 120000 index 00000000..62010f4f --- /dev/null +++ b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/1.1.short_pg_log.yaml @@ -0,0 +1 @@ +../../../../overrides/short_pg_log.yaml \ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/2-partial-upgrade/.qa b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/2-partial-upgrade/.qa new file mode 120000 index 00000000..fea2489f --- /dev/null +++ b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/2-partial-upgrade/.qa @@ -0,0 +1 @@ +../.qa \ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/2-partial-upgrade/firsthalf.yaml b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/2-partial-upgrade/firsthalf.yaml new file mode 100644 index 00000000..02ba5c1b --- /dev/null +++ b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/2-partial-upgrade/firsthalf.yaml @@ -0,0 +1,13 @@ +meta: +- desc: | + install upgrade ceph/-x on one node only + 1st half + restart : osd.0,1,2,3 +tasks: +- install.upgrade: + osd.0: +- print: "**** done install.upgrade osd.0" +- ceph.restart: + daemons: [mon.a,mon.b,mon.c,mgr.x,osd.0,osd.1,osd.2,osd.3] + mon-health-to-clog: false +- print: "**** done ceph.restart 1st half" diff --git a/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/3-thrash/.qa b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/3-thrash/.qa new file mode 120000 index 00000000..fea2489f --- /dev/null +++ b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/3-thrash/.qa @@ -0,0 +1 @@ +../.qa \ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/3-thrash/default.yaml b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/3-thrash/default.yaml new file mode 100644 index 00000000..49e6f84f --- /dev/null +++ b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/3-thrash/default.yaml @@ -0,0 +1,27 @@ +meta: +- desc: | + randomly kill and revive osd + small chance to increase the number of pgs +overrides: + ceph: + log-whitelist: + - but it is still running + - wrongly marked me down + - objects unfound and apparently lost + - log bound mismatch + ### ref: https://tracker.ceph.com/issues/40251 + - failed to encode map +tasks: +- parallel: + - stress-tasks +stress-tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgpnum_fix: 1 + chance_thrash_cluster_full: 0 + chance_thrash_pg_upmap: 0 + chance_thrash_pg_upmap_items: 0 + disable_objectstore_tool_tests: true + chance_force_recovery: 0 +- print: "**** done thrashosds 3-thrash" diff --git a/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/4-workload/+ b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/4-workload/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/4-workload/.qa b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/4-workload/.qa new file mode 120000 index 00000000..fea2489f --- /dev/null +++ b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/4-workload/.qa @@ -0,0 +1 @@ +../.qa \ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/4-workload/fsx.yaml b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/4-workload/fsx.yaml new file mode 100644 index 00000000..fd4081f2 --- /dev/null +++ b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/4-workload/fsx.yaml @@ -0,0 +1,8 @@ +meta: +- desc: | + run basic fsx tests for rbd +stress-tasks: +- rbd_fsx: + clients: [client.0] + size: 134217728 +- print: "**** done rbd_fsx 4-workload" diff --git a/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/4-workload/radosbench.yaml b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/4-workload/radosbench.yaml new file mode 100644 index 00000000..c545936c --- /dev/null +++ b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/4-workload/radosbench.yaml @@ -0,0 +1,52 @@ +meta: +- desc: | + run randomized correctness test for rados operations + generate write load with rados bench +stress-tasks: +- full_sequential: + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 +- print: "**** done radosbench 4-workload" diff --git a/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/4-workload/rbd-cls.yaml b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/4-workload/rbd-cls.yaml new file mode 100644 index 00000000..0c0f512a --- /dev/null +++ b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/4-workload/rbd-cls.yaml @@ -0,0 +1,10 @@ +meta: +- desc: | + run basic cls tests for rbd +stress-tasks: +- workunit: + branch: nautilus + clients: + client.0: + - cls/test_cls_rbd.sh +- print: "**** done cls/test_cls_rbd.sh 4-workload" diff --git a/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/4-workload/rbd-import-export.yaml b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/4-workload/rbd-import-export.yaml new file mode 100644 index 00000000..2f7cf3ba --- /dev/null +++ b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/4-workload/rbd-import-export.yaml @@ -0,0 +1,12 @@ +meta: +- desc: | + run basic import/export cli tests for rbd +stress-tasks: +- workunit: + branch: nautilus + clients: + client.0: + - rbd/import_export.sh + env: + RBD_CREATE_ARGS: --new-format +- print: "**** done rbd/import_export.sh 4-workload" diff --git a/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/4-workload/rbd_api.yaml b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/4-workload/rbd_api.yaml new file mode 100644 index 00000000..f25e4dd4 --- /dev/null +++ b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/4-workload/rbd_api.yaml @@ -0,0 +1,10 @@ +meta: +- desc: | + librbd C and C++ api tests +stress-tasks: +- workunit: + branch: nautilus + clients: + client.0: + - rbd/test_librbd.sh +- print: "**** done rbd/test_librbd.sh 4-workload" diff --git a/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/4-workload/readwrite.yaml b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/4-workload/readwrite.yaml new file mode 100644 index 00000000..45686899 --- /dev/null +++ b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/4-workload/readwrite.yaml @@ -0,0 +1,16 @@ +meta: +- desc: | + randomized correctness test for rados operations on a replicated pool, + using only reads, writes, and deletes +stress-tasks: +- full_sequential: + - rados: + clients: [client.0] + ops: 4000 + objects: 500 + write_append_excl: false + op_weights: + read: 45 + write: 45 + delete: 10 +- print: "**** done rados/readwrite 4-workload" diff --git a/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/4-workload/snaps-few-objects.yaml b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/4-workload/snaps-few-objects.yaml new file mode 100644 index 00000000..ae232d86 --- /dev/null +++ b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/4-workload/snaps-few-objects.yaml @@ -0,0 +1,18 @@ +meta: +- desc: | + randomized correctness test for rados operations on a replicated pool with snapshot operations +stress-tasks: +- full_sequential: + - rados: + clients: [client.0] + ops: 4000 + objects: 50 + write_append_excl: false + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 +- print: "**** done rados/snaps-few-objects 4-workload" diff --git a/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/5-finish-upgrade.yaml b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/5-finish-upgrade.yaml new file mode 100644 index 00000000..803737c7 --- /dev/null +++ b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/5-finish-upgrade.yaml @@ -0,0 +1,8 @@ +tasks: +- install.upgrade: + osd.4: + client.0: +- ceph.restart: + daemons: [osd.4, osd.5, osd.6, osd.7] + wait-for-healthy: false + wait-for-osds-up: true diff --git a/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/7-final-workload/+ b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/7-final-workload/+ new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/7-final-workload/.qa b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/7-final-workload/.qa new file mode 120000 index 00000000..fea2489f --- /dev/null +++ b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/7-final-workload/.qa @@ -0,0 +1 @@ +../.qa \ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/7-final-workload/rbd-python.yaml b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/7-final-workload/rbd-python.yaml new file mode 100644 index 00000000..dbfde7f7 --- /dev/null +++ b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/7-final-workload/rbd-python.yaml @@ -0,0 +1,10 @@ +meta: +- desc: | + librbd python api tests +tasks: +- workunit: + tag: v14.2.10 + clients: + client.0: + - rbd/test_librbd_python.sh +- print: "**** done rbd/test_librbd_python.sh 7-workload" diff --git a/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/7-final-workload/rgw-swift.yaml b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/7-final-workload/rgw-swift.yaml new file mode 100644 index 00000000..1b5710b1 --- /dev/null +++ b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/7-final-workload/rgw-swift.yaml @@ -0,0 +1,12 @@ +meta: +- desc: | + swift api tests for rgw +tasks: +- rgw: + client.0: +- print: "**** done rgw 9-workload" +- swift: + client.0: + force-branch: ceph-nautilus + rgw_server: client.0 +- print: "**** done swift 9-workload" diff --git a/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/7-final-workload/snaps-many-objects.yaml b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/7-final-workload/snaps-many-objects.yaml new file mode 100644 index 00000000..805bf97c --- /dev/null +++ b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/7-final-workload/snaps-many-objects.yaml @@ -0,0 +1,16 @@ +meta: +- desc: | + randomized correctness test for rados operations on a replicated pool with snapshot operations +tasks: +- rados: + clients: [client.0] + ops: 4000 + objects: 500 + write_append_excl: false + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 diff --git a/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/objectstore/.qa b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/objectstore/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/objectstore/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/objectstore/bluestore-bitmap.yaml b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/objectstore/bluestore-bitmap.yaml new file mode 120000 index 00000000..a59cf517 --- /dev/null +++ b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/objectstore/bluestore-bitmap.yaml @@ -0,0 +1 @@ +.qa/objectstore/bluestore-bitmap.yaml \ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/objectstore/default.yaml b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/objectstore/default.yaml new file mode 100644 index 00000000..e69de29b diff --git a/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/objectstore/filestore-xfs.yaml b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/objectstore/filestore-xfs.yaml new file mode 120000 index 00000000..41f2a9d1 --- /dev/null +++ b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/objectstore/filestore-xfs.yaml @@ -0,0 +1 @@ +.qa/objectstore/filestore-xfs.yaml \ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/supported-all-distro b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/supported-all-distro new file mode 120000 index 00000000..0f102120 --- /dev/null +++ b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/supported-all-distro @@ -0,0 +1 @@ +../../../../distros/supported-all-distro/ \ No newline at end of file diff --git a/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/thrashosds-health.yaml b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/thrashosds-health.yaml new file mode 120000 index 00000000..e0426dbe --- /dev/null +++ b/qa/suites/upgrade/nautilus-p2p/nautilus-p2p-stress-split/thrashosds-health.yaml @@ -0,0 +1 @@ +../../../../tasks/thrashosds-health.yaml \ No newline at end of file diff --git a/qa/tasks/__init__.py b/qa/tasks/__init__.py new file mode 100644 index 00000000..9a7949a0 --- /dev/null +++ b/qa/tasks/__init__.py @@ -0,0 +1,6 @@ +import logging + +# Inherit teuthology's log level +teuthology_log = logging.getLogger('teuthology') +log = logging.getLogger(__name__) +log.setLevel(teuthology_log.level) diff --git a/qa/tasks/admin_socket.py b/qa/tasks/admin_socket.py new file mode 100644 index 00000000..c454d3d0 --- /dev/null +++ b/qa/tasks/admin_socket.py @@ -0,0 +1,194 @@ +""" +Admin Socket task -- used in rados, powercycle, and smoke testing +""" + +import json +import logging +import os +import time + +from teuthology.exceptions import CommandFailedError +from teuthology.orchestra import run +from teuthology import misc as teuthology +from teuthology.parallel import parallel +from teuthology.config import config as teuth_config + +log = logging.getLogger(__name__) + + +def task(ctx, config): + """ + Run an admin socket command, make sure the output is json, and run + a test program on it. The test program should read json from + stdin. This task succeeds if the test program exits with status 0. + + To run the same test on all clients:: + + tasks: + - ceph: + - rados: + - admin_socket: + all: + dump_requests: + test: http://example.com/script + + To restrict it to certain clients:: + + tasks: + - ceph: + - rados: [client.1] + - admin_socket: + client.1: + dump_requests: + test: http://example.com/script + + If an admin socket command has arguments, they can be specified as + a list:: + + tasks: + - ceph: + - rados: [client.0] + - admin_socket: + client.0: + dump_requests: + test: http://example.com/script + help: + test: http://example.com/test_help_version + args: [version] + + Note that there must be a ceph client with an admin socket running + before this task is run. The tests are parallelized at the client + level. Tests for a single client are run serially. + + :param ctx: Context + :param config: Configuration + """ + assert isinstance(config, dict), \ + 'admin_socket task requires a dict for configuration' + teuthology.replace_all_with_clients(ctx.cluster, config) + + with parallel() as ptask: + for client, tests in config.items(): + ptask.spawn(_run_tests, ctx, client, tests) + + +def _socket_command(ctx, remote, socket_path, command, args): + """ + Run an admin socket command and return the result as a string. + + :param ctx: Context + :param remote: Remote site + :param socket_path: path to socket + :param command: command to be run remotely + :param args: command arguments + + :returns: output of command in json format + """ + testdir = teuthology.get_testdir(ctx) + max_tries = 120 + while True: + try: + out = remote.sh([ + 'sudo', + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'ceph', + '--admin-daemon', socket_path, + ] + command.split(' ') + args) + except CommandFailedError: + assert max_tries > 0 + max_tries -= 1 + log.info('ceph cli returned an error, command not registered yet?') + log.info('sleeping and retrying ...') + time.sleep(1) + continue + break + log.debug('admin socket command %s returned %s', command, out) + return json.loads(out) + +def _run_tests(ctx, client, tests): + """ + Create a temp directory and wait for a client socket to be created. + For each test, copy the executable locally and run the test. + Remove temp directory when finished. + + :param ctx: Context + :param client: client machine to run the test + :param tests: list of tests to run + """ + testdir = teuthology.get_testdir(ctx) + log.debug('Running admin socket tests on %s', client) + (remote,) = ctx.cluster.only(client).remotes.keys() + socket_path = '/var/run/ceph/ceph-{name}.asok'.format(name=client) + overrides = ctx.config.get('overrides', {}).get('admin_socket', {}) + + try: + tmp_dir = os.path.join( + testdir, + 'admin_socket_{client}'.format(client=client), + ) + remote.run( + args=[ + 'mkdir', + '--', + tmp_dir, + run.Raw('&&'), + # wait for client process to create the socket + 'while', 'test', '!', '-e', socket_path, run.Raw(';'), + 'do', 'sleep', '1', run.Raw(';'), 'done', + ], + ) + + for command, config in tests.items(): + if config is None: + config = {} + teuthology.deep_merge(config, overrides) + log.debug('Testing %s with config %s', command, str(config)) + + test_path = None + if 'test' in config: + # hack: the git_url is always ceph-ci or ceph + git_url = teuth_config.get_ceph_git_url() + repo_name = 'ceph.git' + if git_url.count('ceph-ci'): + repo_name = 'ceph-ci.git' + url = config['test'].format( + branch=config.get('branch', 'master'), + repo=repo_name, + ) + test_path = os.path.join(tmp_dir, command) + remote.run( + args=[ + 'wget', + '-q', + '-O', + test_path, + '--', + url, + run.Raw('&&'), + 'chmod', + 'u=rx', + '--', + test_path, + ], + ) + + args = config.get('args', []) + assert isinstance(args, list), \ + 'admin socket command args must be a list' + sock_out = _socket_command(ctx, remote, socket_path, command, args) + if test_path is not None: + remote.run( + args=[ + test_path, + ], + stdin=json.dumps(sock_out), + ) + + finally: + remote.run( + args=[ + 'rm', '-rf', '--', tmp_dir, + ], + ) diff --git a/qa/tasks/autotest.py b/qa/tasks/autotest.py new file mode 100644 index 00000000..a78987dc --- /dev/null +++ b/qa/tasks/autotest.py @@ -0,0 +1,168 @@ +""" +Run an autotest test on the ceph cluster. +""" +import json +import logging +import os + +import six + +from teuthology import misc as teuthology +from teuthology.parallel import parallel +from teuthology.orchestra import run + +log = logging.getLogger(__name__) + +def task(ctx, config): + """ + Run an autotest test on the ceph cluster. + + Only autotest client tests are supported. + + The config is a mapping from role name to list of tests to run on + that client. + + For example:: + + tasks: + - ceph: + - ceph-fuse: [client.0, client.1] + - autotest: + client.0: [dbench] + client.1: [bonnie] + + You can also specify a list of tests to run on all clients:: + + tasks: + - ceph: + - ceph-fuse: + - autotest: + all: [dbench] + """ + assert isinstance(config, dict) + config = teuthology.replace_all_with_clients(ctx.cluster, config) + log.info('Setting up autotest...') + testdir = teuthology.get_testdir(ctx) + with parallel() as p: + for role in config.keys(): + (remote,) = ctx.cluster.only(role).remotes.keys() + p.spawn(_download, testdir, remote) + + log.info('Making a separate scratch dir for every client...') + for role in config.keys(): + assert isinstance(role, six.string_types) + PREFIX = 'client.' + assert role.startswith(PREFIX) + id_ = role[len(PREFIX):] + (remote,) = ctx.cluster.only(role).remotes.keys() + mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_)) + scratch = os.path.join(mnt, 'client.{id}'.format(id=id_)) + remote.run( + args=[ + 'sudo', + 'install', + '-d', + '-m', '0755', + '--owner={user}'.format(user='ubuntu'), #TODO + '--', + scratch, + ], + ) + + with parallel() as p: + for role, tests in config.items(): + (remote,) = ctx.cluster.only(role).remotes.keys() + p.spawn(_run_tests, testdir, remote, role, tests) + +def _download(testdir, remote): + """ + Download. Does not explicitly support muliple tasks in a single run. + """ + remote.run( + args=[ + # explicitly does not support multiple autotest tasks + # in a single run; the result archival would conflict + 'mkdir', '{tdir}/archive/autotest'.format(tdir=testdir), + run.Raw('&&'), + 'mkdir', '{tdir}/autotest'.format(tdir=testdir), + run.Raw('&&'), + 'wget', + '-nv', + '--no-check-certificate', + 'https://github.com/ceph/autotest/tarball/ceph', + '-O-', + run.Raw('|'), + 'tar', + '-C', '{tdir}/autotest'.format(tdir=testdir), + '-x', + '-z', + '-f-', + '--strip-components=1', + ], + ) + +def _run_tests(testdir, remote, role, tests): + """ + Spawned to run test on remote site + """ + assert isinstance(role, six.string_types) + PREFIX = 'client.' + assert role.startswith(PREFIX) + id_ = role[len(PREFIX):] + mnt = os.path.join(testdir, 'mnt.{id}'.format(id=id_)) + scratch = os.path.join(mnt, 'client.{id}'.format(id=id_)) + + assert isinstance(tests, list) + for idx, testname in enumerate(tests): + log.info('Running autotest client test #%d: %s...', idx, testname) + + tag = 'client.{id}.num{idx}.{testname}'.format( + idx=idx, + testname=testname, + id=id_, + ) + control = '{tdir}/control.{tag}'.format(tdir=testdir, tag=tag) + teuthology.write_file( + remote=remote, + path=control, + data='import json; data=json.loads({data!r}); job.run_test(**data)'.format( + data=json.dumps(dict( + url=testname, + dir=scratch, + # TODO perhaps tag + # results will be in {testdir}/autotest/client/results/dbench + # or {testdir}/autotest/client/results/dbench.{tag} + )), + ), + ) + remote.run( + args=[ + '{tdir}/autotest/client/bin/autotest'.format(tdir=testdir), + '--verbose', + '--harness=simple', + '--tag={tag}'.format(tag=tag), + control, + run.Raw('3>&1'), + ], + ) + + remote.run( + args=[ + 'rm', '-rf', '--', control, + ], + ) + + remote.run( + args=[ + 'mv', + '--', + '{tdir}/autotest/client/results/{tag}'.format(tdir=testdir, tag=tag), + '{tdir}/archive/autotest/{tag}'.format(tdir=testdir, tag=tag), + ], + ) + + remote.run( + args=[ + 'rm', '-rf', '--', '{tdir}/autotest'.format(tdir=testdir), + ], + ) diff --git a/qa/tasks/aver.py b/qa/tasks/aver.py new file mode 100644 index 00000000..79ee18c5 --- /dev/null +++ b/qa/tasks/aver.py @@ -0,0 +1,67 @@ +""" +Aver wrapper task +""" +import contextlib +import logging +from subprocess import check_call, Popen, PIPE + +log = logging.getLogger(__name__) + + +@contextlib.contextmanager +def task(ctx, config): + """ + Execute an aver assertion + + Parameters: + + input: file containing data referred to by the assertions. File name is + relative to the job's archive path + validations: list of validations in the Aver language + + Example: + - aver: + input: bench_output.csv + validations: + - expect performance(alg='ceph') > performance(alg='raw') + - for size > 3 expect avg_throughput > 2000 + """ + log.info('Beginning aver...') + assert isinstance(config, dict), 'expecting dictionary for configuration' + + if 'input' not in config: + raise Exception("Expecting 'input' option") + if len(config.get('validations', [])) < 1: + raise Exception("Expecting at least one entry in 'validations'") + + url = ('https://github.com/ivotron/aver/releases/download/' + 'v0.3.0/aver-linux-amd64.tar.bz2') + + aver_path = ctx.archive + '/aver' + + # download binary + check_call(['wget', '-O', aver_path + '.tbz', url]) + check_call(['tar', 'xfj', aver_path + '.tbz', '-C', ctx.archive]) + + # print version + process = Popen([aver_path, '-v'], stdout=PIPE) + log.info(process.communicate()[0]) + + # validate + for validation in config['validations']: + cmd = (aver_path + ' -s -i ' + (ctx.archive + '/' + config['input']) + + ' "' + validation + '"') + log.info("executing: " + cmd) + process = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True) + (stdout, stderr) = process.communicate() + if stderr: + log.info('aver stderr: ' + stderr) + log.info('aver result: ' + stdout) + if stdout.strip(' \t\n\r') != 'true': + raise Exception('Failed validation: ' + validation) + + try: + yield + finally: + log.info('Removing aver binary...') + check_call(['rm', aver_path, aver_path + '.tbz']) diff --git a/qa/tasks/blktrace.py b/qa/tasks/blktrace.py new file mode 100644 index 00000000..10b1da0c --- /dev/null +++ b/qa/tasks/blktrace.py @@ -0,0 +1,96 @@ +""" +Run blktrace program through teuthology +""" +import contextlib +import logging + +from teuthology import misc as teuthology +from teuthology import contextutil +from teuthology.orchestra import run + +log = logging.getLogger(__name__) +blktrace = '/usr/sbin/blktrace' +daemon_signal = 'term' + +@contextlib.contextmanager +def setup(ctx, config): + """ + Setup all the remotes + """ + osds = ctx.cluster.only(teuthology.is_type('osd', config['cluster'])) + log_dir = '{tdir}/archive/performance/blktrace'.format(tdir=teuthology.get_testdir(ctx)) + + for remote, roles_for_host in osds.remotes.items(): + log.info('Creating %s on %s' % (log_dir, remote.name)) + remote.run( + args=['mkdir', '-p', '-m0755', '--', log_dir], + wait=False, + ) + yield + +@contextlib.contextmanager +def execute(ctx, config): + """ + Run the blktrace program on remote machines. + """ + procs = [] + testdir = teuthology.get_testdir(ctx) + log_dir = '{tdir}/archive/performance/blktrace'.format(tdir=testdir) + + osds = ctx.cluster.only(teuthology.is_type('osd')) + for remote, roles_for_host in osds.remotes.items(): + roles_to_devs = ctx.disk_config.remote_to_roles_to_dev[remote] + for role in teuthology.cluster_roles_of_type(roles_for_host, 'osd', + config['cluster']): + if roles_to_devs.get(role): + dev = roles_to_devs[role] + log.info("running blktrace on %s: %s" % (remote.name, dev)) + + proc = remote.run( + args=[ + 'cd', + log_dir, + run.Raw(';'), + 'daemon-helper', + daemon_signal, + 'sudo', + blktrace, + '-o', + dev.rsplit("/", 1)[1], + '-d', + dev, + ], + wait=False, + stdin=run.PIPE, + ) + procs.append(proc) + try: + yield + finally: + osds = ctx.cluster.only(teuthology.is_type('osd')) + log.info('stopping blktrace processs') + for proc in procs: + proc.stdin.close() + +@contextlib.contextmanager +def task(ctx, config): + """ + Usage: + blktrace: + + or: + blktrace: + cluster: backup + + Runs blktrace on all osds in the specified cluster (the 'ceph' cluster by + default). + """ + if config is None: + config = {} + config['cluster'] = config.get('cluster', 'ceph') + + with contextutil.nested( + lambda: setup(ctx=ctx, config=config), + lambda: execute(ctx=ctx, config=config), + ): + yield diff --git a/qa/tasks/boto.cfg.template b/qa/tasks/boto.cfg.template new file mode 100644 index 00000000..cdfe8873 --- /dev/null +++ b/qa/tasks/boto.cfg.template @@ -0,0 +1,2 @@ +[Boto] +http_socket_timeout = {idle_timeout} diff --git a/qa/tasks/cbt.py b/qa/tasks/cbt.py new file mode 100644 index 00000000..e234eff9 --- /dev/null +++ b/qa/tasks/cbt.py @@ -0,0 +1,283 @@ +import logging +import os +import yaml + +from teuthology import misc +from teuthology.orchestra import run +from teuthology.task import Task + +log = logging.getLogger(__name__) + + +class CBT(Task): + """ + Passes through a CBT configuration yaml fragment. + """ + def __init__(self, ctx, config): + super(CBT, self).__init__(ctx, config) + self.log = log + + def hosts_of_type(self, type_): + return [r.name for r in self.ctx.cluster.only(misc.is_type(type_)).remotes.keys()] + + def generate_cbt_config(self): + mon_hosts = self.hosts_of_type('mon') + osd_hosts = self.hosts_of_type('osd') + client_hosts = self.hosts_of_type('client') + rgw_client = {} + rgw_client[client_hosts[0]] = None + rgw_hosts = self.config.get('cluster', {}).get('rgws', rgw_client) + cluster_config = dict( + user=self.config.get('cluster', {}).get('user', 'ubuntu'), + head=mon_hosts[0], + osds=osd_hosts, + mons=mon_hosts, + clients=client_hosts, + rgws=rgw_hosts, + osds_per_node=self.config.get('cluster', {}).get('osds_per_node', 1), + rebuild_every_test=False, + use_existing=True, + is_teuthology=self.config.get('cluster', {}).get('is_teuthology', True), + iterations=self.config.get('cluster', {}).get('iterations', 1), + tmp_dir='/tmp/cbt', + pool_profiles=self.config.get('cluster', {}).get('pool_profiles'), + ) + + benchmark_config = self.config.get('benchmarks') + benchmark_type = next(iter(benchmark_config.keys())) + if benchmark_type == 'librbdfio': + testdir = misc.get_testdir(self.ctx) + benchmark_config['librbdfio']['cmd_path'] = os.path.join(testdir, 'fio/fio') + if benchmark_type == 'cosbench': + # create cosbench_dir and cosbench_xml_dir + testdir = misc.get_testdir(self.ctx) + benchmark_config['cosbench']['cosbench_dir'] = os.path.join(testdir, 'cos') + benchmark_config['cosbench']['cosbench_xml_dir'] = os.path.join(testdir, 'xml') + self.ctx.cluster.run(args=['mkdir', '-p', '-m0755', '--', benchmark_config['cosbench']['cosbench_xml_dir']]) + benchmark_config['cosbench']['controller'] = osd_hosts[0] + + # set auth details + remotes_and_roles = self.ctx.cluster.remotes.items() + ips = [host for (host, port) in + (remote.ssh.get_transport().getpeername() for (remote, role_list) in remotes_and_roles)] + benchmark_config['cosbench']['auth'] = "username=cosbench:operator;password=intel2012;url=http://%s:80/auth/v1.0;retry=9" %(ips[0]) + + return dict( + cluster=cluster_config, + benchmarks=benchmark_config, + ) + + def install_dependencies(self): + system_type = misc.get_system_type(self.first_mon) + + if system_type == 'rpm': + install_cmd = ['sudo', 'yum', '-y', 'install'] + cbt_depends = ['python36-PyYAML', 'python36-lxml', 'librbd-devel', 'pdsh', 'collectl'] + else: + install_cmd = ['sudo', 'apt-get', '-y', '--force-yes', 'install'] + cbt_depends = ['python3-yaml', 'python3-lxml', 'librbd-dev', 'collectl'] + self.first_mon.run(args=install_cmd + cbt_depends) + + benchmark_type = next(iter(self.cbt_config.get('benchmarks').keys())) + self.log.info('benchmark: %s', benchmark_type) + + if benchmark_type == 'librbdfio': + # install fio + testdir = misc.get_testdir(self.ctx) + self.first_mon.run( + args=[ + 'git', 'clone', '-b', 'master', + 'https://github.com/axboe/fio.git', + '{tdir}/fio'.format(tdir=testdir) + ] + ) + self.first_mon.run( + args=[ + 'cd', os.path.join(testdir, 'fio'), run.Raw('&&'), + './configure', run.Raw('&&'), + 'make' + ] + ) + + if benchmark_type == 'cosbench': + # install cosbench + self.log.info('install dependencies for cosbench') + if system_type == 'rpm': + cosbench_depends = ['wget', 'unzip', 'java-1.7.0-openjdk', 'curl'] + else: + cosbench_depends = ['wget', 'unzip', 'openjdk-8-jre', 'curl'] + self.first_mon.run(args=install_cmd + cosbench_depends) + testdir = misc.get_testdir(self.ctx) + cosbench_version = '0.4.2.c3' + cosbench_location = 'https://github.com/intel-cloud/cosbench/releases/download/v0.4.2.c3/0.4.2.c3.zip' + os_version = misc.get_system_type(self.first_mon, False, True) + + # additional requirements for bionic + if os_version == '18.04': + self.first_mon.run( + args=['sudo', 'apt-get', '-y', 'purge', 'openjdk-11*']) + # use our own version of cosbench + cosbench_version = 'cosbench-0.4.2.c3.1' + # contains additional parameter "-N" to nc + cosbench_location = 'http://drop.ceph.com/qa/cosbench-0.4.2.c3.1.zip' + cosbench_dir = os.path.join(testdir, cosbench_version) + self.ctx.cluster.run(args=['mkdir', '-p', '-m0755', '--', cosbench_dir]) + self.first_mon.run( + args=[ + 'cd', testdir, run.Raw('&&'), + 'wget', + cosbench_location, run.Raw('&&'), + 'unzip', '{name}.zip'.format(name=cosbench_version), '-d', cosbench_version + ] + ) + else: + self.first_mon.run( + args=[ + 'cd', testdir, run.Raw('&&'), + 'wget', + cosbench_location, run.Raw('&&'), + 'unzip', '{name}.zip'.format(name=cosbench_version) + ] + ) + self.first_mon.run( + args=[ + 'cd', testdir, run.Raw('&&'), + 'ln', '-s', cosbench_version, 'cos', + ] + ) + self.first_mon.run( + args=[ + 'cd', os.path.join(testdir, 'cos'), run.Raw('&&'), + 'chmod', '+x', run.Raw('*.sh'), + ] + ) + + # start cosbench and check info + self.log.info('start cosbench') + self.first_mon.run( + args=[ + 'cd', testdir, run.Raw('&&'), + 'cd', 'cos', run.Raw('&&'), + 'sh', 'start-all.sh' + ] + ) + self.log.info('check cosbench info') + self.first_mon.run( + args=[ + 'cd', testdir, run.Raw('&&'), + 'cd', 'cos', run.Raw('&&'), + 'sh', 'cli.sh', 'info' + ] + ) + + def checkout_cbt(self): + testdir = misc.get_testdir(self.ctx) + repo = self.config.get('repo', 'https://github.com/ceph/cbt.git') + branch = self.config.get('branch', 'master') + branch = self.config.get('force-branch', branch) + sha1 = self.config.get('sha1') + self.first_mon.run( + args=[ + 'git', 'clone', '-b', branch, repo, + '{tdir}/cbt'.format(tdir=testdir) + ] + ) + if sha1: + self.first_mon.run( + args=[ + 'cd', os.path.join(testdir, 'cbt'), run.Raw('&&'), + 'git', 'reset', '--hard', sha1, + ] + ) + + def setup(self): + super(CBT, self).setup() + self.first_mon = next(iter(self.ctx.cluster.only(misc.get_first_mon(self.ctx, self.config)).remotes.keys())) + self.cbt_config = self.generate_cbt_config() + self.log.info('cbt configuration is %s', self.cbt_config) + self.cbt_dir = os.path.join(misc.get_archive_dir(self.ctx), 'cbt') + self.ctx.cluster.run(args=['mkdir', '-p', '-m0755', '--', self.cbt_dir]) + misc.write_file(self.first_mon, os.path.join(self.cbt_dir, 'cbt_config.yaml'), + yaml.safe_dump(self.cbt_config, default_flow_style=False)) + self.checkout_cbt() + self.install_dependencies() + + def begin(self): + super(CBT, self).begin() + testdir = misc.get_testdir(self.ctx) + self.first_mon.run( + args=[ + '{tdir}/cbt/cbt.py'.format(tdir=testdir), + '-a', self.cbt_dir, + '{cbtdir}/cbt_config.yaml'.format(cbtdir=self.cbt_dir), + ], + ) + preserve_file = os.path.join(self.ctx.archive, '.preserve') + open(preserve_file, 'a').close() + + def end(self): + super(CBT, self).end() + testdir = misc.get_testdir(self.ctx) + self.first_mon.run( + args=[ + 'rm', '--one-file-system', '-rf', '--', + '{tdir}/cbt'.format(tdir=testdir), + ] + ) + benchmark_type = next(iter(self.cbt_config.get('benchmarks').keys())) + if benchmark_type == 'librbdfio': + self.first_mon.run( + args=[ + 'rm', '--one-file-system', '-rf', '--', + '{tdir}/fio'.format(tdir=testdir), + ] + ) + + if benchmark_type == 'cosbench': + os_version = misc.get_system_type(self.first_mon, False, True) + if os_version == '18.04': + cosbench_version = 'cosbench-0.4.2.c3.1' + else: + cosbench_version = '0.4.2.c3' + # note: stop-all requires 'nc' + self.first_mon.run( + args=[ + 'cd', testdir, run.Raw('&&'), + 'cd', 'cos', run.Raw('&&'), + 'sh', 'stop-all.sh', + run.Raw('||'), 'true' + ] + ) + self.first_mon.run( + args=[ + 'sudo', 'killall', '-9', 'java', + run.Raw('||'), 'true' + ] + ) + self.first_mon.run( + args=[ + 'rm', '--one-file-system', '-rf', '--', + '{tdir}/cos'.format(tdir=testdir), + ] + ) + self.first_mon.run( + args=[ + 'rm', '--one-file-system', '-rf', '--', + '{tdir}/{version}'.format(tdir=testdir, version=cosbench_version), + ] + ) + self.first_mon.run( + args=[ + 'rm', '--one-file-system', '-rf', '--', + '{tdir}/{version}.zip'.format(tdir=testdir, version=cosbench_version), + ] + ) + self.first_mon.run( + args=[ + 'rm', '--one-file-system', '-rf', '--', + '{tdir}/xml'.format(tdir=testdir), + ] + ) + + +task = CBT diff --git a/qa/tasks/ceph.conf.template b/qa/tasks/ceph.conf.template new file mode 100644 index 00000000..a84043f5 --- /dev/null +++ b/qa/tasks/ceph.conf.template @@ -0,0 +1,105 @@ +[global] + chdir = "" + pid file = /var/run/ceph/$cluster-$name.pid + auth supported = cephx + + filestore xattr use omap = true + + mon clock drift allowed = 1.000 + + osd crush chooseleaf type = 0 + auth debug = true + + ms die on old message = true + ms die on bug = true + + mon max pg per osd = 10000 # >= luminous + mon pg warn max object skew = 0 + + osd pool default size = 2 + + mon osd allow primary affinity = true + mon osd allow pg remap = true + mon warn on legacy crush tunables = false + mon warn on crush straw calc version zero = false + mon warn on no sortbitwise = false + mon warn on osd down out interval zero = false + mon warn on too few osds = false + mon_warn_on_pool_pg_num_not_power_of_two = false + mon_warn_on_pool_no_redundancy = false + + osd pool default erasure code profile = "plugin=jerasure technique=reed_sol_van k=2 m=1 ruleset-failure-domain=osd crush-failure-domain=osd" + + osd default data pool replay window = 5 + + mon allow pool delete = true + + mon cluster log file level = debug + debug asserts on shutdown = true + mon health detail to clog = false + + # we see this fail in qa on *nautilus*; bump up retries + mon_client_directed_command_retry = 4 + +[osd] + osd journal size = 100 + + osd scrub load threshold = 5.0 + osd scrub max interval = 600 + + osd recover clone overlap = true + osd recovery max chunk = 1048576 + + osd debug shutdown = true + osd debug op order = true + osd debug verify stray on activate = true + + osd open classes on start = true + osd debug pg log writeout = true + + osd deep scrub update digest min age = 30 + + osd map max advance = 10 + + journal zero on create = true + + filestore ondisk finisher threads = 3 + filestore apply finisher threads = 3 + + bdev debug aio = true + osd debug misdirected ops = true + +[mgr] + debug ms = 1 + debug mgr = 20 + debug mon = 20 + debug auth = 20 + mon reweight min pgs per osd = 4 + mon reweight min bytes per osd = 10 + +[mon] + debug ms = 1 + debug mon = 20 + debug paxos = 20 + debug auth = 20 + mon data avail warn = 5 + mon mgr mkfs grace = 240 + mon reweight min pgs per osd = 4 + mon osd reporter subtree level = osd + mon osd prime pg temp = true + mon reweight min bytes per osd = 10 + + # rotate auth tickets quickly to exercise renewal paths + auth mon ticket ttl = 660 # 11m + auth service ticket ttl = 240 # 4m + + # don't complain about insecure global_id in the test suite + mon_warn_on_insecure_global_id_reclaim = false + mon_warn_on_insecure_global_id_reclaim_allowed = false + +[client] + rgw cache enabled = true + rgw enable ops log = true + rgw enable usage log = true + log file = /var/log/ceph/$cluster-$name.$pid.log + admin socket = /var/run/ceph/$cluster-$name.$pid.asok diff --git a/qa/tasks/ceph.py b/qa/tasks/ceph.py new file mode 100644 index 00000000..5551f274 --- /dev/null +++ b/qa/tasks/ceph.py @@ -0,0 +1,1896 @@ +""" +Ceph cluster task. + +Handle the setup, starting, and clean-up of a Ceph cluster. +""" +from io import BytesIO +from io import StringIO + +import argparse +import configobj +import contextlib +import errno +import logging +import os +import json +import time +import gevent +import re +import socket + +from paramiko import SSHException +from tasks.ceph_manager import CephManager, write_conf +from tarfile import ReadError +from tasks.cephfs.filesystem import Filesystem +from teuthology import misc as teuthology +from teuthology import contextutil +from teuthology import exceptions +from teuthology.orchestra import run +import tasks.ceph_client as cclient +from teuthology.orchestra.daemon import DaemonGroup + +CEPH_ROLE_TYPES = ['mon', 'mgr', 'osd', 'mds', 'rgw'] +DATA_PATH = '/var/lib/ceph/{type_}/{cluster}-{id_}' + +log = logging.getLogger(__name__) + + +def generate_caps(type_): + """ + Each call will return the next capability for each system type + (essentially a subset of possible role values). Valid types are osd, + mds and client. + """ + defaults = dict( + osd=dict( + mon='allow *', + mgr='allow *', + osd='allow *', + ), + mgr=dict( + mon='allow profile mgr', + osd='allow *', + mds='allow *', + ), + mds=dict( + mon='allow *', + mgr='allow *', + osd='allow *', + mds='allow', + ), + client=dict( + mon='allow rw', + mgr='allow r', + osd='allow rwx', + mds='allow', + ), + ) + for subsystem, capability in defaults[type_].items(): + yield '--cap' + yield subsystem + yield capability + + +@contextlib.contextmanager +def ceph_crash(ctx, config): + """ + Gather crash dumps from /var/lib/crash + """ + try: + yield + + finally: + if ctx.archive is not None: + log.info('Archiving crash dumps...') + path = os.path.join(ctx.archive, 'remote') + try: + os.makedirs(path) + except OSError: + pass + for remote in ctx.cluster.remotes.keys(): + sub = os.path.join(path, remote.shortname) + try: + os.makedirs(sub) + except OSError: + pass + try: + teuthology.pull_directory(remote, '/var/lib/ceph/crash', + os.path.join(sub, 'crash')) + except ReadError: + pass + + +@contextlib.contextmanager +def ceph_log(ctx, config): + """ + Create /var/log/ceph log directory that is open to everyone. + Add valgrind and profiling-logger directories. + + :param ctx: Context + :param config: Configuration + """ + log.info('Making ceph log dir writeable by non-root...') + run.wait( + ctx.cluster.run( + args=[ + 'sudo', + 'chmod', + '777', + '/var/log/ceph', + ], + wait=False, + ) + ) + log.info('Disabling ceph logrotate...') + run.wait( + ctx.cluster.run( + args=[ + 'sudo', + 'rm', '-f', '--', + '/etc/logrotate.d/ceph', + ], + wait=False, + ) + ) + log.info('Creating extra log directories...') + run.wait( + ctx.cluster.run( + args=[ + 'sudo', + 'install', '-d', '-m0777', '--', + '/var/log/ceph/valgrind', + '/var/log/ceph/profiling-logger', + ], + wait=False, + ) + ) + + class Rotater(object): + stop_event = gevent.event.Event() + + def invoke_logrotate(self): + # 1) install ceph-test.conf in /etc/logrotate.d + # 2) continuously loop over logrotate invocation with ceph-test.conf + while not self.stop_event.is_set(): + self.stop_event.wait(timeout=30) + try: + run.wait( + ctx.cluster.run( + args=['sudo', 'logrotate', '/etc/logrotate.d/ceph-test.conf' + ], + wait=False, + ) + ) + except exceptions.ConnectionLostError as e: + # Some tests may power off nodes during test, in which + # case we will see connection errors that we should ignore. + log.debug("Missed logrotate, node '{0}' is offline".format( + e.node)) + except EOFError: + # Paramiko sometimes raises this when it fails to + # connect to a node during open_session. As with + # ConnectionLostError, we ignore this because nodes + # are allowed to get power cycled during tests. + log.debug("Missed logrotate, EOFError") + except SSHException: + log.debug("Missed logrotate, SSHException") + except socket.error as e: + if e.errno in (errno.EHOSTUNREACH, errno.ECONNRESET): + log.debug("Missed logrotate, host unreachable") + else: + raise + + def begin(self): + self.thread = gevent.spawn(self.invoke_logrotate) + + def end(self): + self.stop_event.set() + self.thread.get() + + def write_rotate_conf(ctx, daemons): + testdir = teuthology.get_testdir(ctx) + remote_logrotate_conf = '%s/logrotate.ceph-test.conf' % testdir + rotate_conf_path = os.path.join(os.path.dirname(__file__), 'logrotate.conf') + with open(rotate_conf_path) as f: + conf = "" + for daemon, size in daemons.items(): + log.info('writing logrotate stanza for {}'.format(daemon)) + conf += f.read().format(daemon_type=daemon, + max_size=size) + f.seek(0, 0) + + for remote in ctx.cluster.remotes.keys(): + teuthology.write_file(remote=remote, + path=remote_logrotate_conf, + data=BytesIO(conf.encode()) + ) + remote.run( + args=[ + 'sudo', + 'mv', + remote_logrotate_conf, + '/etc/logrotate.d/ceph-test.conf', + run.Raw('&&'), + 'sudo', + 'chmod', + '0644', + '/etc/logrotate.d/ceph-test.conf', + run.Raw('&&'), + 'sudo', + 'chown', + 'root.root', + '/etc/logrotate.d/ceph-test.conf' + ] + ) + remote.chcon('/etc/logrotate.d/ceph-test.conf', + 'system_u:object_r:etc_t:s0') + + if ctx.config.get('log-rotate'): + daemons = ctx.config.get('log-rotate') + log.info('Setting up log rotation with ' + str(daemons)) + write_rotate_conf(ctx, daemons) + logrotater = Rotater() + logrotater.begin() + try: + yield + + finally: + if ctx.config.get('log-rotate'): + log.info('Shutting down logrotate') + logrotater.end() + ctx.cluster.run( + args=['sudo', 'rm', '/etc/logrotate.d/ceph-test.conf' + ] + ) + if ctx.archive is not None and \ + not (ctx.config.get('archive-on-error') and ctx.summary['success']): + # and logs + log.info('Compressing logs...') + run.wait( + ctx.cluster.run( + args=[ + 'sudo', + 'find', + '/var/log/ceph', + '-name', + '*.log', + '-print0', + run.Raw('|'), + 'sudo', + 'xargs', + '-0', + '--no-run-if-empty', + '--', + 'gzip', + '--', + ], + wait=False, + ), + ) + + log.info('Archiving logs...') + path = os.path.join(ctx.archive, 'remote') + try: + os.makedirs(path) + except OSError: + pass + for remote in ctx.cluster.remotes.keys(): + sub = os.path.join(path, remote.shortname) + try: + os.makedirs(sub) + except OSError: + pass + teuthology.pull_directory(remote, '/var/log/ceph', + os.path.join(sub, 'log')) + + +def assign_devs(roles, devs): + """ + Create a dictionary of devs indexed by roles + + :param roles: List of roles + :param devs: Corresponding list of devices. + :returns: Dictionary of devs indexed by roles. + """ + return dict(zip(roles, devs)) + + +@contextlib.contextmanager +def valgrind_post(ctx, config): + """ + After the tests run, look through all the valgrind logs. Exceptions are raised + if textual errors occurred in the logs, or if valgrind exceptions were detected in + the logs. + + :param ctx: Context + :param config: Configuration + """ + try: + yield + finally: + lookup_procs = list() + log.info('Checking for errors in any valgrind logs...') + for remote in ctx.cluster.remotes.keys(): + # look at valgrind logs for each node + proc = remote.run( + args="sudo zgrep '' /var/log/ceph/valgrind/* " + # include a second file so that we always get + # a filename prefix on the output + "/dev/null | sort | uniq", + wait=False, + check_status=False, + stdout=StringIO(), + ) + lookup_procs.append((proc, remote)) + + valgrind_exception = None + for (proc, remote) in lookup_procs: + proc.wait() + out = proc.stdout.getvalue() + for line in out.split('\n'): + if line == '': + continue + try: + (file, kind) = line.split(':') + except Exception: + log.error('failed to split line %s', line) + raise + log.debug('file %s kind %s', file, kind) + if (file.find('mds') >= 0) and kind.find('Lost') > 0: + continue + log.error('saw valgrind issue %s in %s', kind, file) + valgrind_exception = Exception('saw valgrind issues') + + if config.get('expect_valgrind_errors'): + if not valgrind_exception: + raise Exception('expected valgrind issues and found none') + else: + if valgrind_exception: + raise valgrind_exception + + +@contextlib.contextmanager +def crush_setup(ctx, config): + cluster_name = config['cluster'] + first_mon = teuthology.get_first_mon(ctx, config, cluster_name) + (mon_remote,) = ctx.cluster.only(first_mon).remotes.keys() + + profile = config.get('crush_tunables', 'default') + log.info('Setting crush tunables to %s', profile) + mon_remote.run( + args=['sudo', 'ceph', '--cluster', cluster_name, + 'osd', 'crush', 'tunables', profile]) + yield + + +@contextlib.contextmanager +def create_rbd_pool(ctx, config): + cluster_name = config['cluster'] + first_mon = teuthology.get_first_mon(ctx, config, cluster_name) + (mon_remote,) = ctx.cluster.only(first_mon).remotes.keys() + log.info('Waiting for OSDs to come up') + teuthology.wait_until_osds_up( + ctx, + cluster=ctx.cluster, + remote=mon_remote, + ceph_cluster=cluster_name, + ) + if config.get('create_rbd_pool', True): + log.info('Creating RBD pool') + mon_remote.run( + args=['sudo', 'ceph', '--cluster', cluster_name, + 'osd', 'pool', 'create', 'rbd', '8']) + mon_remote.run( + args=[ + 'sudo', 'ceph', '--cluster', cluster_name, + 'osd', 'pool', 'application', 'enable', + 'rbd', 'rbd', '--yes-i-really-mean-it' + ], + check_status=False) + yield + +@contextlib.contextmanager +def cephfs_setup(ctx, config): + cluster_name = config['cluster'] + + first_mon = teuthology.get_first_mon(ctx, config, cluster_name) + (mon_remote,) = ctx.cluster.only(first_mon).remotes.keys() + mdss = ctx.cluster.only(teuthology.is_type('mds', cluster_name)) + # If there are any MDSs, then create a filesystem for them to use + # Do this last because requires mon cluster to be up and running + if mdss.remotes: + log.info('Setting up CephFS filesystem...') + + Filesystem(ctx, fs_config=config.get('cephfs', None), name='cephfs', + create=True, ec_profile=config.get('cephfs_ec_profile', None)) + + yield + + +def get_mons(roles, ips, cluster_name, + mon_bind_msgr2=False, + mon_bind_addrvec=False): + """ + Get monitors and their associated addresses + """ + mons = {} + v1_ports = {} + v2_ports = {} + mon_id = 0 + is_mon = teuthology.is_type('mon', cluster_name) + for idx, roles in enumerate(roles): + for role in roles: + if not is_mon(role): + continue + if ips[idx] not in v1_ports: + v1_ports[ips[idx]] = 6789 + else: + v1_ports[ips[idx]] += 1 + if mon_bind_msgr2: + if ips[idx] not in v2_ports: + v2_ports[ips[idx]] = 3300 + addr = '{ip}'.format(ip=ips[idx]) + else: + assert mon_bind_addrvec + v2_ports[ips[idx]] += 1 + addr = '[v2:{ip}:{port2},v1:{ip}:{port1}]'.format( + ip=ips[idx], + port2=v2_ports[ips[idx]], + port1=v1_ports[ips[idx]], + ) + elif mon_bind_addrvec: + addr = '[v1:{ip}:{port}]'.format( + ip=ips[idx], + port=v1_ports[ips[idx]], + ) + else: + addr = '{ip}:{port}'.format( + ip=ips[idx], + port=v1_ports[ips[idx]], + ) + mon_id += 1 + mons[role] = addr + assert mons + return mons + +def skeleton_config(ctx, roles, ips, mons, cluster='ceph'): + """ + Returns a ConfigObj that is prefilled with a skeleton config. + + Use conf[section][key]=value or conf.merge to change it. + + Use conf.write to write it out, override .filename first if you want. + """ + path = os.path.join(os.path.dirname(__file__), 'ceph.conf.template') + conf = configobj.ConfigObj(path, file_error=True) + mon_hosts = [] + for role, addr in mons.items(): + mon_cluster, _, _ = teuthology.split_role(role) + if mon_cluster != cluster: + continue + name = teuthology.ceph_role(role) + conf.setdefault(name, {}) + mon_hosts.append(addr) + conf.setdefault('global', {}) + conf['global']['mon host'] = ','.join(mon_hosts) + # set up standby mds's + is_mds = teuthology.is_type('mds', cluster) + for roles_subset in roles: + for role in roles_subset: + if is_mds(role): + name = teuthology.ceph_role(role) + conf.setdefault(name, {}) + return conf + +def create_simple_monmap(ctx, remote, conf, mons, + path=None, + mon_bind_addrvec=False): + """ + Writes a simple monmap based on current ceph.conf into path, or + /monmap by default. + + Assumes ceph_conf is up to date. + + Assumes mon sections are named "mon.*", with the dot. + + :return the FSID (as a string) of the newly created monmap + """ + + addresses = list(mons.items()) + assert addresses, "There are no monitors in config!" + log.debug('Ceph mon addresses: %s', addresses) + + testdir = teuthology.get_testdir(ctx) + args = [ + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'monmaptool', + '--create', + '--clobber', + ] + if mon_bind_addrvec: + args.extend(['--enable-all-features']) + for (role, addr) in addresses: + _, _, n = teuthology.split_role(role) + if mon_bind_addrvec and (',' in addr or 'v' in addr or ':' in addr): + args.extend(('--addv', n, addr)) + else: + args.extend(('--add', n, addr)) + if not path: + path = '{tdir}/monmap'.format(tdir=testdir) + args.extend([ + '--print', + path + ]) + + monmap_output = remote.sh(args) + fsid = re.search("generated fsid (.+)$", + monmap_output, re.MULTILINE).group(1) + return fsid + +@contextlib.contextmanager +def cluster(ctx, config): + """ + Handle the creation and removal of a ceph cluster. + + On startup: + Create directories needed for the cluster. + Create remote journals for all osds. + Create and set keyring. + Copy the monmap to the test systems. + Setup mon nodes. + Setup mds nodes. + Mkfs osd nodes. + Add keyring information to monmaps + Mkfs mon nodes. + + On exit: + If errors occurred, extract a failure message and store in ctx.summary. + Unmount all test files and temporary journaling files. + Save the monitor information and archive all ceph logs. + Cleanup the keyring setup, and remove all monitor map and data files left over. + + :param ctx: Context + :param config: Configuration + """ + if ctx.config.get('use_existing_cluster', False) is True: + log.info("'use_existing_cluster' is true; skipping cluster creation") + yield + + testdir = teuthology.get_testdir(ctx) + cluster_name = config['cluster'] + data_dir = '{tdir}/{cluster}.data'.format(tdir=testdir, cluster=cluster_name) + log.info('Creating ceph cluster %s...', cluster_name) + log.info('config %s', config) + log.info('ctx.config %s', ctx.config) + run.wait( + ctx.cluster.run( + args=[ + 'install', '-d', '-m0755', '--', + data_dir, + ], + wait=False, + ) + ) + + run.wait( + ctx.cluster.run( + args=[ + 'sudo', + 'install', '-d', '-m0777', '--', '/var/run/ceph', + ], + wait=False, + ) + ) + + devs_to_clean = {} + remote_to_roles_to_devs = {} + osds = ctx.cluster.only(teuthology.is_type('osd', cluster_name)) + for remote, roles_for_host in osds.remotes.items(): + devs = teuthology.get_scratch_devices(remote) + roles_to_devs = assign_devs( + teuthology.cluster_roles_of_type(roles_for_host, 'osd', cluster_name), devs + ) + devs_to_clean[remote] = [] + log.info('osd dev map: {}'.format(roles_to_devs)) + assert roles_to_devs, \ + "remote {} has osd roles, but no osd devices were specified!".format(remote.hostname) + remote_to_roles_to_devs[remote] = roles_to_devs + log.info("remote_to_roles_to_devs: {}".format(remote_to_roles_to_devs)) + for osd_role, dev_name in remote_to_roles_to_devs.items(): + assert dev_name, "{} has no associated device!".format(osd_role) + + log.info('Generating config...') + remotes_and_roles = ctx.cluster.remotes.items() + roles = [role_list for (remote, role_list) in remotes_and_roles] + ips = [host for (host, port) in + (remote.ssh.get_transport().getpeername() for (remote, role_list) in remotes_and_roles)] + mons = get_mons( + roles, ips, cluster_name, + mon_bind_msgr2=config.get('mon_bind_msgr2'), + mon_bind_addrvec=config.get('mon_bind_addrvec'), + ) + conf = skeleton_config( + ctx, roles=roles, ips=ips, mons=mons, cluster=cluster_name, + ) + for section, keys in config['conf'].items(): + for key, value in keys.items(): + log.info("[%s] %s = %s" % (section, key, value)) + if section not in conf: + conf[section] = {} + conf[section][key] = value + + if not hasattr(ctx, 'ceph'): + ctx.ceph = {} + ctx.ceph[cluster_name] = argparse.Namespace() + ctx.ceph[cluster_name].conf = conf + ctx.ceph[cluster_name].mons = mons + + default_keyring = '/etc/ceph/{cluster}.keyring'.format(cluster=cluster_name) + keyring_path = config.get('keyring_path', default_keyring) + + coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir) + + firstmon = teuthology.get_first_mon(ctx, config, cluster_name) + + log.info('Setting up %s...' % firstmon) + ctx.cluster.only(firstmon).run( + args=[ + 'sudo', + 'adjust-ulimits', + 'ceph-coverage', + coverage_dir, + 'ceph-authtool', + '--create-keyring', + keyring_path, + ], + ) + ctx.cluster.only(firstmon).run( + args=[ + 'sudo', + 'adjust-ulimits', + 'ceph-coverage', + coverage_dir, + 'ceph-authtool', + '--gen-key', + '--name=mon.', + keyring_path, + ], + ) + ctx.cluster.only(firstmon).run( + args=[ + 'sudo', + 'chmod', + '0644', + keyring_path, + ], + ) + (mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys() + monmap_path = '{tdir}/{cluster}.monmap'.format(tdir=testdir, + cluster=cluster_name) + fsid = create_simple_monmap( + ctx, + remote=mon0_remote, + conf=conf, + mons=mons, + path=monmap_path, + mon_bind_addrvec=config.get('mon_bind_addrvec'), + ) + if not 'global' in conf: + conf['global'] = {} + conf['global']['fsid'] = fsid + + default_conf_path = '/etc/ceph/{cluster}.conf'.format(cluster=cluster_name) + conf_path = config.get('conf_path', default_conf_path) + log.info('Writing %s for FSID %s...' % (conf_path, fsid)) + write_conf(ctx, conf_path, cluster_name) + + log.info('Creating admin key on %s...' % firstmon) + ctx.cluster.only(firstmon).run( + args=[ + 'sudo', + 'adjust-ulimits', + 'ceph-coverage', + coverage_dir, + 'ceph-authtool', + '--gen-key', + '--name=client.admin', + '--cap', 'mon', 'allow *', + '--cap', 'osd', 'allow *', + '--cap', 'mds', 'allow *', + '--cap', 'mgr', 'allow *', + keyring_path, + ], + ) + + log.info('Copying monmap to all nodes...') + keyring = teuthology.get_file( + remote=mon0_remote, + path=keyring_path, + ) + monmap = teuthology.get_file( + remote=mon0_remote, + path=monmap_path, + ) + + for rem in ctx.cluster.remotes.keys(): + # copy mon key and initial monmap + log.info('Sending monmap to node {remote}'.format(remote=rem)) + teuthology.sudo_write_file( + remote=rem, + path=keyring_path, + data=keyring, + perms='0644' + ) + teuthology.write_file( + remote=rem, + path=monmap_path, + data=monmap, + ) + + log.info('Setting up mon nodes...') + mons = ctx.cluster.only(teuthology.is_type('mon', cluster_name)) + + if not config.get('skip_mgr_daemons', False): + log.info('Setting up mgr nodes...') + mgrs = ctx.cluster.only(teuthology.is_type('mgr', cluster_name)) + for remote, roles_for_host in mgrs.remotes.items(): + for role in teuthology.cluster_roles_of_type(roles_for_host, 'mgr', + cluster_name): + _, _, id_ = teuthology.split_role(role) + mgr_dir = DATA_PATH.format( + type_='mgr', cluster=cluster_name, id_=id_) + remote.run( + args=[ + 'sudo', + 'mkdir', + '-p', + mgr_dir, + run.Raw('&&'), + 'sudo', + 'adjust-ulimits', + 'ceph-coverage', + coverage_dir, + 'ceph-authtool', + '--create-keyring', + '--gen-key', + '--name=mgr.{id}'.format(id=id_), + mgr_dir + '/keyring', + ], + ) + + log.info('Setting up mds nodes...') + mdss = ctx.cluster.only(teuthology.is_type('mds', cluster_name)) + for remote, roles_for_host in mdss.remotes.items(): + for role in teuthology.cluster_roles_of_type(roles_for_host, 'mds', + cluster_name): + _, _, id_ = teuthology.split_role(role) + mds_dir = DATA_PATH.format( + type_='mds', cluster=cluster_name, id_=id_) + remote.run( + args=[ + 'sudo', + 'mkdir', + '-p', + mds_dir, + run.Raw('&&'), + 'sudo', + 'adjust-ulimits', + 'ceph-coverage', + coverage_dir, + 'ceph-authtool', + '--create-keyring', + '--gen-key', + '--name=mds.{id}'.format(id=id_), + mds_dir + '/keyring', + ], + ) + remote.run(args=[ + 'sudo', 'chown', '-R', 'ceph:ceph', mds_dir + ]) + + cclient.create_keyring(ctx, cluster_name) + log.info('Running mkfs on osd nodes...') + + if not hasattr(ctx, 'disk_config'): + ctx.disk_config = argparse.Namespace() + if not hasattr(ctx.disk_config, 'remote_to_roles_to_dev'): + ctx.disk_config.remote_to_roles_to_dev = {} + if not hasattr(ctx.disk_config, 'remote_to_roles_to_dev_mount_options'): + ctx.disk_config.remote_to_roles_to_dev_mount_options = {} + if not hasattr(ctx.disk_config, 'remote_to_roles_to_dev_fstype'): + ctx.disk_config.remote_to_roles_to_dev_fstype = {} + + teuthology.deep_merge(ctx.disk_config.remote_to_roles_to_dev, remote_to_roles_to_devs) + + log.info("ctx.disk_config.remote_to_roles_to_dev: {r}".format(r=str(ctx.disk_config.remote_to_roles_to_dev))) + for remote, roles_for_host in osds.remotes.items(): + roles_to_devs = remote_to_roles_to_devs[remote] + + for role in teuthology.cluster_roles_of_type(roles_for_host, 'osd', cluster_name): + _, _, id_ = teuthology.split_role(role) + mnt_point = DATA_PATH.format( + type_='osd', cluster=cluster_name, id_=id_) + remote.run( + args=[ + 'sudo', + 'mkdir', + '-p', + mnt_point, + ]) + log.info('roles_to_devs: {}'.format(roles_to_devs)) + log.info('role: {}'.format(role)) + if roles_to_devs.get(role): + dev = roles_to_devs[role] + fs = config.get('fs') + package = None + mkfs_options = config.get('mkfs_options') + mount_options = config.get('mount_options') + if fs == 'btrfs': + # package = 'btrfs-tools' + if mount_options is None: + mount_options = ['noatime', 'user_subvol_rm_allowed'] + if mkfs_options is None: + mkfs_options = ['-m', 'single', + '-l', '32768', + '-n', '32768'] + if fs == 'xfs': + # package = 'xfsprogs' + if mount_options is None: + mount_options = ['noatime'] + if mkfs_options is None: + mkfs_options = ['-f', '-i', 'size=2048'] + if fs == 'ext4' or fs == 'ext3': + if mount_options is None: + mount_options = ['noatime', 'user_xattr'] + + if mount_options is None: + mount_options = [] + if mkfs_options is None: + mkfs_options = [] + mkfs = ['mkfs.%s' % fs] + mkfs_options + log.info('%s on %s on %s' % (mkfs, dev, remote)) + if package is not None: + remote.sh('sudo apt-get install -y %s' % package) + + try: + remote.run(args=['yes', run.Raw('|')] + ['sudo'] + mkfs + [dev]) + except run.CommandFailedError: + # Newer btfs-tools doesn't prompt for overwrite, use -f + if '-f' not in mount_options: + mkfs_options.append('-f') + mkfs = ['mkfs.%s' % fs] + mkfs_options + log.info('%s on %s on %s' % (mkfs, dev, remote)) + remote.run(args=['yes', run.Raw('|')] + ['sudo'] + mkfs + [dev]) + + log.info('mount %s on %s -o %s' % (dev, remote, + ','.join(mount_options))) + remote.run( + args=[ + 'sudo', + 'mount', + '-t', fs, + '-o', ','.join(mount_options), + dev, + mnt_point, + ] + ) + remote.run( + args=[ + 'sudo', '/sbin/restorecon', mnt_point, + ], + check_status=False, + ) + if not remote in ctx.disk_config.remote_to_roles_to_dev_mount_options: + ctx.disk_config.remote_to_roles_to_dev_mount_options[remote] = {} + ctx.disk_config.remote_to_roles_to_dev_mount_options[remote][role] = mount_options + if not remote in ctx.disk_config.remote_to_roles_to_dev_fstype: + ctx.disk_config.remote_to_roles_to_dev_fstype[remote] = {} + ctx.disk_config.remote_to_roles_to_dev_fstype[remote][role] = fs + devs_to_clean[remote].append(mnt_point) + + for role in teuthology.cluster_roles_of_type(roles_for_host, 'osd', cluster_name): + _, _, id_ = teuthology.split_role(role) + try: + remote.run( + args=[ + 'sudo', + 'MALLOC_CHECK_=3', + 'adjust-ulimits', + 'ceph-coverage', + coverage_dir, + 'ceph-osd', + '--no-mon-config', + '--cluster', + cluster_name, + '--mkfs', + '--mkkey', + '-i', id_, + '--monmap', monmap_path, + ], + ) + except run.CommandFailedError: + # try without --no-mon-config.. this may be an upgrade test + remote.run( + args=[ + 'sudo', + 'MALLOC_CHECK_=3', + 'adjust-ulimits', + 'ceph-coverage', + coverage_dir, + 'ceph-osd', + '--cluster', + cluster_name, + '--mkfs', + '--mkkey', + '-i', id_, + '--monmap', monmap_path, + ], + ) + mnt_point = DATA_PATH.format( + type_='osd', cluster=cluster_name, id_=id_) + try: + remote.run(args=[ + 'sudo', 'chown', '-R', 'ceph:ceph', mnt_point + ]) + except run.CommandFailedError as e: + # hammer does not have ceph user, so ignore this error + log.info('ignoring error when chown ceph:ceph,' + 'probably installing hammer: %s', e) + + log.info('Reading keys from all nodes...') + keys_fp = BytesIO() + keys = [] + for remote, roles_for_host in ctx.cluster.remotes.items(): + for type_ in ['mgr', 'mds', 'osd']: + if type_ == 'mgr' and config.get('skip_mgr_daemons', False): + continue + for role in teuthology.cluster_roles_of_type(roles_for_host, type_, cluster_name): + _, _, id_ = teuthology.split_role(role) + data = teuthology.get_file( + remote=remote, + path=os.path.join( + DATA_PATH.format( + type_=type_, id_=id_, cluster=cluster_name), + 'keyring', + ), + sudo=True, + ) + keys.append((type_, id_, data)) + keys_fp.write(data) + for remote, roles_for_host in ctx.cluster.remotes.items(): + for role in teuthology.cluster_roles_of_type(roles_for_host, 'client', cluster_name): + _, _, id_ = teuthology.split_role(role) + data = teuthology.get_file( + remote=remote, + path='/etc/ceph/{cluster}.client.{id}.keyring'.format(id=id_, cluster=cluster_name) + ) + keys.append(('client', id_, data)) + keys_fp.write(data) + + log.info('Adding keys to all mons...') + writes = mons.run( + args=[ + 'sudo', 'tee', '-a', + keyring_path, + ], + stdin=run.PIPE, + wait=False, + stdout=BytesIO(), + ) + keys_fp.seek(0) + teuthology.feed_many_stdins_and_close(keys_fp, writes) + run.wait(writes) + for type_, id_, data in keys: + run.wait( + mons.run( + args=[ + 'sudo', + 'adjust-ulimits', + 'ceph-coverage', + coverage_dir, + 'ceph-authtool', + keyring_path, + '--name={type}.{id}'.format( + type=type_, + id=id_, + ), + ] + list(generate_caps(type_)), + wait=False, + ), + ) + + log.info('Running mkfs on mon nodes...') + for remote, roles_for_host in mons.remotes.items(): + for role in teuthology.cluster_roles_of_type(roles_for_host, 'mon', cluster_name): + _, _, id_ = teuthology.split_role(role) + mnt_point = DATA_PATH.format( + type_='mon', id_=id_, cluster=cluster_name) + remote.run( + args=[ + 'sudo', + 'mkdir', + '-p', + mnt_point, + ], + ) + remote.run( + args=[ + 'sudo', + 'adjust-ulimits', + 'ceph-coverage', + coverage_dir, + 'ceph-mon', + '--cluster', cluster_name, + '--mkfs', + '-i', id_, + '--monmap', monmap_path, + '--keyring', keyring_path, + ], + ) + try: + remote.run(args=[ + 'sudo', 'chown', '-R', 'ceph:ceph', mnt_point + ]) + except run.CommandFailedError as e: + # hammer does not have ceph user, so ignore this error + log.info('ignoring error when chown ceph:ceph,' + 'probably installing hammer: %s', e) + + run.wait( + mons.run( + args=[ + 'rm', + '--', + monmap_path, + ], + wait=False, + ), + ) + + try: + yield + except Exception: + # we need to know this below + ctx.summary['success'] = False + raise + finally: + (mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys() + + log.info('Checking cluster log for badness...') + + def first_in_ceph_log(pattern, excludes): + """ + Find the first occurrence of the pattern specified in the Ceph log, + Returns None if none found. + + :param pattern: Pattern scanned for. + :param excludes: Patterns to ignore. + :return: First line of text (or None if not found) + """ + args = [ + 'sudo', + 'egrep', pattern, + '/var/log/ceph/{cluster}.log'.format(cluster=cluster_name), + ] + for exclude in excludes: + args.extend([run.Raw('|'), 'egrep', '-v', exclude]) + args.extend([ + run.Raw('|'), 'head', '-n', '1', + ]) + stdout = mon0_remote.sh(args) + return stdout or None + + if first_in_ceph_log('\[ERR\]|\[WRN\]|\[SEC\]', + config['log_whitelist']) is not None: + log.warning('Found errors (ERR|WRN|SEC) in cluster log') + ctx.summary['success'] = False + # use the most severe problem as the failure reason + if 'failure_reason' not in ctx.summary: + for pattern in ['\[SEC\]', '\[ERR\]', '\[WRN\]']: + match = first_in_ceph_log(pattern, config['log_whitelist']) + if match is not None: + ctx.summary['failure_reason'] = \ + '"{match}" in cluster log'.format( + match=match.rstrip('\n'), + ) + break + + for remote, dirs in devs_to_clean.items(): + for dir_ in dirs: + log.info('Unmounting %s on %s' % (dir_, remote)) + try: + remote.run( + args=[ + 'sync', + run.Raw('&&'), + 'sudo', + 'umount', + '-f', + dir_ + ] + ) + except Exception as e: + remote.run(args=[ + 'sudo', + run.Raw('PATH=/usr/sbin:$PATH'), + 'lsof', + run.Raw(';'), + 'ps', 'auxf', + ]) + raise e + + if ctx.archive is not None and \ + not (ctx.config.get('archive-on-error') and ctx.summary['success']): + + # archive mon data, too + log.info('Archiving mon data...') + path = os.path.join(ctx.archive, 'data') + try: + os.makedirs(path) + except OSError as e: + if e.errno == errno.EEXIST: + pass + else: + raise + for remote, roles in mons.remotes.items(): + for role in roles: + is_mon = teuthology.is_type('mon', cluster_name) + if is_mon(role): + _, _, id_ = teuthology.split_role(role) + mon_dir = DATA_PATH.format( + type_='mon', id_=id_, cluster=cluster_name) + teuthology.pull_directory_tarball( + remote, + mon_dir, + path + '/' + role + '.tgz') + + log.info('Cleaning ceph cluster...') + run.wait( + ctx.cluster.run( + args=[ + 'sudo', + 'rm', + '-rf', + '--', + conf_path, + keyring_path, + data_dir, + monmap_path, + run.Raw('{tdir}/../*.pid'.format(tdir=testdir)), + ], + wait=False, + ), + ) + + +def osd_scrub_pgs(ctx, config): + """ + Scrub pgs when we exit. + + First make sure all pgs are active and clean. + Next scrub all osds. + Then periodically check until all pgs have scrub time stamps that + indicate the last scrub completed. Time out if no progress is made + here after two minutes. + """ + retries = 40 + delays = 20 + cluster_name = config['cluster'] + manager = ctx.managers[cluster_name] + all_clean = False + for _ in range(0, retries): + stats = manager.get_pg_stats() + unclean = [stat['pgid'] for stat in stats if 'active+clean' not in stat['state']] + osd_dump = manager.get_osd_dump_json() + for pool in osd_dump['pools']: + pg_num_target = pool.get('pg_num_target') + if pg_num_target is None: + # mimic does not adjust pg num automatically + split_merge = False + break + elif pg_num_target != pool['pg_num']: + split_merge = True + break + else: + split_merge = False + if not unclean and not split_merge: + all_clean = True + break + log.info( + "Waiting for all PGs to be active+clean and split+merged, waiting on %s to go clean and/or %s to split/merge" % (unclean, split_merge)) + time.sleep(delays) + if not all_clean: + raise RuntimeError("Scrubbing terminated -- not all pgs were active and clean.") + check_time_now = time.localtime() + time.sleep(1) + all_roles = teuthology.all_roles(ctx.cluster) + for role in teuthology.cluster_roles_of_type(all_roles, 'osd', cluster_name): + log.info("Scrubbing {osd}".format(osd=role)) + _, _, id_ = teuthology.split_role(role) + # allow this to fail; in certain cases the OSD might not be up + # at this point. we will catch all pgs below. + try: + manager.raw_cluster_cmd('tell', 'osd.' + id_, 'config', 'set', + 'osd_debug_deep_scrub_sleep', '0'); + manager.raw_cluster_cmd('osd', 'deep-scrub', id_) + except run.CommandFailedError: + pass + prev_good = 0 + gap_cnt = 0 + loop = True + while loop: + stats = manager.get_pg_stats() + timez = [(stat['pgid'],stat['last_scrub_stamp']) for stat in stats] + loop = False + thiscnt = 0 + for (pgid, tmval) in timez: + pgtm = time.strptime(tmval[0:tmval.find('.')], '%Y-%m-%d %H:%M:%S') + if pgtm > check_time_now: + thiscnt += 1 + else: + log.info('pgid %s last_scrub_stamp %s %s <= %s', pgid, tmval, pgtm, check_time_now) + loop = True + if thiscnt > prev_good: + prev_good = thiscnt + gap_cnt = 0 + else: + gap_cnt += 1 + if gap_cnt % 6 == 0: + for (pgid, tmval) in timez: + # re-request scrub every so often in case the earlier + # request was missed. do not do it every time because + # the scrub may be in progress or not reported yet and + # we will starve progress. + manager.raw_cluster_cmd('pg', 'deep-scrub', pgid) + if gap_cnt > retries: + raise RuntimeError('Exiting scrub checking -- not all pgs scrubbed.') + if loop: + log.info('Still waiting for all pgs to be scrubbed.') + time.sleep(delays) + + +@contextlib.contextmanager +def run_daemon(ctx, config, type_): + """ + Run daemons for a role type. Handle the startup and termination of a a daemon. + On startup -- set coverages, cpu_profile, valgrind values for all remotes, + and a max_mds value for one mds. + On cleanup -- Stop all existing daemons of this type. + + :param ctx: Context + :param config: Configuration + :paran type_: Role type + """ + cluster_name = config['cluster'] + log.info('Starting %s daemons in cluster %s...', type_, cluster_name) + testdir = teuthology.get_testdir(ctx) + daemons = ctx.cluster.only(teuthology.is_type(type_, cluster_name)) + + # check whether any daemons if this type are configured + if daemons is None: + return + coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir) + + daemon_signal = 'kill' + if config.get('coverage') or config.get('valgrind') is not None: + daemon_signal = 'term' + + # create osds in order. (this only matters for pre-luminous, which might + # be hammer, which doesn't take an id_ argument to legacy 'osd create'). + osd_uuids = {} + for remote, roles_for_host in daemons.remotes.items(): + is_type_ = teuthology.is_type(type_, cluster_name) + for role in roles_for_host: + if not is_type_(role): + continue + _, _, id_ = teuthology.split_role(role) + + + if type_ == 'osd': + datadir='/var/lib/ceph/osd/{cluster}-{id}'.format( + cluster=cluster_name, id=id_) + osd_uuid = teuthology.get_file( + remote=remote, + path=datadir + '/fsid', + sudo=True, + ).decode().strip() + osd_uuids[id_] = osd_uuid + for osd_id in range(len(osd_uuids)): + id_ = str(osd_id) + osd_uuid = osd_uuids.get(id_) + try: + remote.run( + args=[ + 'sudo', 'ceph', '--cluster', cluster_name, + 'osd', 'new', osd_uuid, id_, + ] + ) + except: + # fallback to pre-luminous (hammer or jewel) + remote.run( + args=[ + 'sudo', 'ceph', '--cluster', cluster_name, + 'osd', 'create', osd_uuid, + ] + ) + if config.get('add_osds_to_crush'): + remote.run( + args=[ + 'sudo', 'ceph', '--cluster', cluster_name, + 'osd', 'crush', 'create-or-move', 'osd.' + id_, + '1.0', 'host=localhost', 'root=default', + ] + ) + + for remote, roles_for_host in daemons.remotes.items(): + is_type_ = teuthology.is_type(type_, cluster_name) + for role in roles_for_host: + if not is_type_(role): + continue + _, _, id_ = teuthology.split_role(role) + + run_cmd = [ + 'sudo', + 'adjust-ulimits', + 'ceph-coverage', + coverage_dir, + 'daemon-helper', + daemon_signal, + ] + run_cmd_tail = [ + 'ceph-%s' % (type_), + '-f', + '--cluster', cluster_name, + '-i', id_] + + if type_ in config.get('cpu_profile', []): + profile_path = '/var/log/ceph/profiling-logger/%s.prof' % (role) + run_cmd.extend(['env', 'CPUPROFILE=%s' % profile_path]) + + if config.get('valgrind') is not None: + valgrind_args = None + if type_ in config['valgrind']: + valgrind_args = config['valgrind'][type_] + if role in config['valgrind']: + valgrind_args = config['valgrind'][role] + run_cmd = teuthology.get_valgrind_args(testdir, role, + run_cmd, + valgrind_args) + + run_cmd.extend(run_cmd_tail) + + # always register mgr; don't necessarily start + ctx.daemons.register_daemon( + remote, type_, id_, + cluster=cluster_name, + args=run_cmd, + logger=log.getChild(role), + stdin=run.PIPE, + wait=False + ) + if type_ != 'mgr' or not config.get('skip_mgr_daemons', False): + role = cluster_name + '.' + type_ + ctx.daemons.get_daemon(type_, id_, cluster_name).restart() + + try: + yield + finally: + teuthology.stop_daemons_of_type(ctx, type_, cluster_name) + + +def healthy(ctx, config): + """ + Wait for all osd's to be up, and for the ceph health monitor to return HEALTH_OK. + + :param ctx: Context + :param config: Configuration + """ + config = config if isinstance(config, dict) else dict() + cluster_name = config.get('cluster', 'ceph') + log.info('Waiting until %s daemons up and pgs clean...', cluster_name) + manager = ctx.managers[cluster_name] + try: + manager.wait_for_mgr_available(timeout=30) + except (run.CommandFailedError, AssertionError) as e: + log.info('ignoring mgr wait error, probably testing upgrade: %s', e) + + firstmon = teuthology.get_first_mon(ctx, config, cluster_name) + (mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys() + teuthology.wait_until_osds_up( + ctx, + cluster=ctx.cluster, + remote=mon0_remote, + ceph_cluster=cluster_name, + ) + + try: + manager.flush_all_pg_stats() + except (run.CommandFailedError, Exception) as e: + log.info('ignoring flush pg stats error, probably testing upgrade: %s', e) + manager.wait_for_clean() + + if config.get('wait-for-healthy', True): + log.info('Waiting until ceph cluster %s is healthy...', cluster_name) + teuthology.wait_until_healthy( + ctx, + remote=mon0_remote, + ceph_cluster=cluster_name, + ) + + if ctx.cluster.only(teuthology.is_type('mds', cluster_name)).remotes: + # Some MDSs exist, wait for them to be healthy + ceph_fs = Filesystem(ctx) # TODO: make Filesystem cluster-aware + ceph_fs.wait_for_daemons(timeout=300) + + +def wait_for_osds_up(ctx, config): + """ + Wait for all osd's to come up. + + :param ctx: Context + :param config: Configuration + """ + log.info('Waiting until ceph osds are all up...') + cluster_name = config.get('cluster', 'ceph') + firstmon = teuthology.get_first_mon(ctx, config, cluster_name) + (mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys() + teuthology.wait_until_osds_up( + ctx, + cluster=ctx.cluster, + remote=mon0_remote + ) + + +def wait_for_mon_quorum(ctx, config): + """ + Check renote ceph status until all monitors are up. + + :param ctx: Context + :param config: Configuration + """ + if isinstance(config, dict): + mons = config['daemons'] + cluster_name = config.get('cluster', 'ceph') + else: + assert isinstance(config, list) + mons = config + cluster_name = 'ceph' + firstmon = teuthology.get_first_mon(ctx, config, cluster_name) + (remote,) = ctx.cluster.only(firstmon).remotes.keys() + with contextutil.safe_while(sleep=10, tries=60, + action='wait for monitor quorum') as proceed: + while proceed(): + quorum_status = remote.sh('sudo ceph quorum_status', + logger=log.getChild('quorum_status')) + j = json.loads(quorum_status) + q = j.get('quorum_names', []) + log.debug('Quorum: %s', q) + if sorted(q) == sorted(mons): + break + + +def created_pool(ctx, config): + """ + Add new pools to the dictionary of pools that the ceph-manager + knows about. + """ + for new_pool in config: + if new_pool not in ctx.managers['ceph'].pools: + ctx.managers['ceph'].pools[new_pool] = ctx.managers['ceph'].get_pool_property( + new_pool, 'pg_num') + + +@contextlib.contextmanager +def tweaked_option(ctx, config): + """ + set an option, and then restore it with its original value + + Note, due to the way how tasks are executed/nested, it's not suggested to + use this method as a standalone task. otherwise, it's likely that it will + restore the tweaked option at the /end/ of 'tasks' block. + """ + saved_options = {} + # we can complicate this when necessary + options = ['mon-health-to-clog'] + type_, id_ = 'mon', '*' + cluster = config.get('cluster', 'ceph') + manager = ctx.managers[cluster] + if id_ == '*': + get_from = next(teuthology.all_roles_of_type(ctx.cluster, type_)) + else: + get_from = id_ + for option in options: + if option not in config: + continue + value = 'true' if config[option] else 'false' + option = option.replace('-', '_') + old_value = manager.get_config(type_, get_from, option) + if value != old_value: + saved_options[option] = old_value + manager.inject_args(type_, id_, option, value) + yield + for option, value in saved_options.items(): + manager.inject_args(type_, id_, option, value) + + +@contextlib.contextmanager +def restart(ctx, config): + """ + restart ceph daemons + + For example:: + tasks: + - ceph.restart: [all] + + For example:: + tasks: + - ceph.restart: [osd.0, mon.1, mds.*] + + or:: + + tasks: + - ceph.restart: + daemons: [osd.0, mon.1] + wait-for-healthy: false + wait-for-osds-up: true + + :param ctx: Context + :param config: Configuration + """ + if config is None: + config = {} + elif isinstance(config, list): + config = {'daemons': config} + + daemons = ctx.daemons.resolve_role_list(config.get('daemons', None), CEPH_ROLE_TYPES, True) + clusters = set() + + with tweaked_option(ctx, config): + for role in daemons: + cluster, type_, id_ = teuthology.split_role(role) + ctx.daemons.get_daemon(type_, id_, cluster).restart() + clusters.add(cluster) + + for role in daemons: + cluster, type_, id_ = teuthology.split_role(role) + if type_ == 'osd': + ctx.managers[cluster].mark_down_osd(id_) + + if config.get('wait-for-healthy', True): + for cluster in clusters: + healthy(ctx=ctx, config=dict(cluster=cluster)) + if config.get('wait-for-osds-up', False): + for cluster in clusters: + wait_for_osds_up(ctx=ctx, config=dict(cluster=cluster)) + yield + + +@contextlib.contextmanager +def stop(ctx, config): + """ + Stop ceph daemons + + For example:: + tasks: + - ceph.stop: [mds.*] + + tasks: + - ceph.stop: [osd.0, osd.2] + + tasks: + - ceph.stop: + daemons: [osd.0, osd.2] + + """ + if config is None: + config = {} + elif isinstance(config, list): + config = {'daemons': config} + + daemons = ctx.daemons.resolve_role_list(config.get('daemons', None), CEPH_ROLE_TYPES, True) + for role in daemons: + cluster, type_, id_ = teuthology.split_role(role) + ctx.daemons.get_daemon(type_, id_, cluster).stop() + + yield + + +@contextlib.contextmanager +def wait_for_failure(ctx, config): + """ + Wait for a failure of a ceph daemon + + For example:: + tasks: + - ceph.wait_for_failure: [mds.*] + + tasks: + - ceph.wait_for_failure: [osd.0, osd.2] + + tasks: + - ceph.wait_for_failure: + daemons: [osd.0, osd.2] + + """ + if config is None: + config = {} + elif isinstance(config, list): + config = {'daemons': config} + + daemons = ctx.daemons.resolve_role_list(config.get('daemons', None), CEPH_ROLE_TYPES, True) + for role in daemons: + cluster, type_, id_ = teuthology.split_role(role) + try: + ctx.daemons.get_daemon(type_, id_, cluster).wait() + except: + log.info('Saw expected daemon failure. Continuing.') + pass + else: + raise RuntimeError('daemon %s did not fail' % role) + + yield + + +def validate_config(ctx, config): + """ + Perform some simple validation on task configuration. + Raises exceptions.ConfigError if an error is found. + """ + # check for osds from multiple clusters on the same host + for remote, roles_for_host in ctx.cluster.remotes.items(): + last_cluster = None + last_role = None + for role in roles_for_host: + role_cluster, role_type, _ = teuthology.split_role(role) + if role_type != 'osd': + continue + if last_cluster and last_cluster != role_cluster: + msg = "Host should not have osds (%s and %s) from multiple clusters" % ( + last_role, role) + raise exceptions.ConfigError(msg) + last_cluster = role_cluster + last_role = role + + +def stop_logging_health(remote, cluster, retry): + # try this several times, since tell to mons is lossy. + args = 'sudo ceph --cluster {cluster} {retry_opts} tell mon.* injectargs -- --no-mon-health-to-clog' + try: + retry_opts = '--mon-client-directed-command-retry {}'.format(retry) + remote.run( + args=args.format(cluster=cluster, + retry_opts=retry_opts)) + except run.CommandFailedError: + for i in range(retry): + try: + remote.run( + args=args.format(cluster=cluster, + retry_opts='')) + return + except run.CommandFailedError: + pass + + +@contextlib.contextmanager +def task(ctx, config): + """ + Set up and tear down a Ceph cluster. + + For example:: + + tasks: + - ceph: + - interactive: + + You can also specify what branch to run:: + + tasks: + - ceph: + branch: foo + + Or a tag:: + + tasks: + - ceph: + tag: v0.42.13 + + Or a sha1:: + + tasks: + - ceph: + sha1: 1376a5ab0c89780eab39ffbbe436f6a6092314ed + + Or a local source dir:: + + tasks: + - ceph: + path: /home/sage/ceph + + To capture code coverage data, use:: + + tasks: + - ceph: + coverage: true + + To use btrfs, ext4, or xfs on the target's scratch disks, use:: + + tasks: + - ceph: + fs: xfs + mkfs_options: [-b,size=65536,-l,logdev=/dev/sdc1] + mount_options: [nobarrier, inode64] + + To change the cephfs's default max_mds (1), use:: + + tasks: + - ceph: + cephfs: + max_mds: 2 + + To change the mdsmap's default session_timeout (60 seconds), use:: + + tasks: + - ceph: + cephfs: + session_timeout: 300 + + Note, this will cause the task to check the /scratch_devs file on each node + for available devices. If no such file is found, /dev/sdb will be used. + + To run some daemons under valgrind, include their names + and the tool/args to use in a valgrind section:: + + tasks: + - ceph: + valgrind: + mds.1: --tool=memcheck + osd.1: [--tool=memcheck, --leak-check=no] + + Those nodes which are using memcheck or valgrind will get + checked for bad results. + + To adjust or modify config options, use:: + + tasks: + - ceph: + conf: + section: + key: value + + For example:: + + tasks: + - ceph: + conf: + mds.0: + some option: value + other key: other value + client.0: + debug client: 10 + debug ms: 1 + + By default, the cluster log is checked for errors and warnings, + and the run marked failed if any appear. You can ignore log + entries by giving a list of egrep compatible regexes, i.e.: + + tasks: + - ceph: + log-whitelist: ['foo.*bar', 'bad message'] + + To run multiple ceph clusters, use multiple ceph tasks, and roles + with a cluster name prefix, e.g. cluster1.client.0. Roles with no + cluster use the default cluster name, 'ceph'. OSDs from separate + clusters must be on separate hosts. Clients and non-osd daemons + from multiple clusters may be colocated. For each cluster, add an + instance of the ceph task with the cluster name specified, e.g.:: + + roles: + - [mon.a, osd.0, osd.1] + - [backup.mon.a, backup.osd.0, backup.osd.1] + - [client.0, backup.client.0] + tasks: + - ceph: + cluster: ceph + - ceph: + cluster: backup + + :param ctx: Context + :param config: Configuration + + """ + if config is None: + config = {} + assert isinstance(config, dict), \ + "task ceph only supports a dictionary for configuration" + + overrides = ctx.config.get('overrides', {}) + teuthology.deep_merge(config, overrides.get('ceph', {})) + + first_ceph_cluster = False + if not hasattr(ctx, 'daemons'): + first_ceph_cluster = True + ctx.daemons = DaemonGroup() + + testdir = teuthology.get_testdir(ctx) + if config.get('coverage'): + coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir) + log.info('Creating coverage directory...') + run.wait( + ctx.cluster.run( + args=[ + 'install', '-d', '-m0755', '--', + coverage_dir, + ], + wait=False, + ) + ) + + if 'cluster' not in config: + config['cluster'] = 'ceph' + + validate_config(ctx, config) + + subtasks = [] + if first_ceph_cluster: + # these tasks handle general log setup and parsing on all hosts, + # so they should only be run once + subtasks = [ + lambda: ceph_log(ctx=ctx, config=None), + lambda: ceph_crash(ctx=ctx, config=None), + lambda: valgrind_post(ctx=ctx, config=config), + ] + + subtasks += [ + lambda: cluster(ctx=ctx, config=dict( + conf=config.get('conf', {}), + fs=config.get('fs', 'xfs'), + mkfs_options=config.get('mkfs_options', None), + mount_options=config.get('mount_options', None), + skip_mgr_daemons=config.get('skip_mgr_daemons', False), + log_whitelist=config.get('log-whitelist', []), + cpu_profile=set(config.get('cpu_profile', []),), + cluster=config['cluster'], + mon_bind_msgr2=config.get('mon_bind_msgr2', True), + mon_bind_addrvec=config.get('mon_bind_addrvec', True), + )), + lambda: run_daemon(ctx=ctx, config=config, type_='mon'), + lambda: run_daemon(ctx=ctx, config=config, type_='mgr'), + lambda: crush_setup(ctx=ctx, config=config), + lambda: run_daemon(ctx=ctx, config=config, type_='osd'), + lambda: create_rbd_pool(ctx=ctx, config=config), + lambda: cephfs_setup(ctx=ctx, config=config), + lambda: run_daemon(ctx=ctx, config=config, type_='mds'), + ] + + with contextutil.nested(*subtasks): + first_mon = teuthology.get_first_mon(ctx, config, config['cluster']) + (mon,) = ctx.cluster.only(first_mon).remotes.keys() + if not hasattr(ctx, 'managers'): + ctx.managers = {} + ctx.managers[config['cluster']] = CephManager( + mon, + ctx=ctx, + logger=log.getChild('ceph_manager.' + config['cluster']), + cluster=config['cluster'], + ) + + try: + if config.get('wait-for-healthy', True): + healthy(ctx=ctx, config=dict(cluster=config['cluster'])) + + yield + finally: + # set pg_num_targets back to actual pg_num, so we don't have to + # wait for pending merges (which can take a while!) + ctx.managers[config['cluster']].stop_pg_num_changes() + + if config.get('wait-for-scrub', True): + osd_scrub_pgs(ctx, config) + + # stop logging health to clog during shutdown, or else we generate + # a bunch of scary messages unrelated to our actual run. + firstmon = teuthology.get_first_mon(ctx, config, config['cluster']) + (mon0_remote,) = ctx.cluster.only(firstmon).remotes.keys() + stop_logging_health(mon0_remote, config['cluster'], 5) diff --git a/qa/tasks/ceph_client.py b/qa/tasks/ceph_client.py new file mode 100644 index 00000000..74e818f9 --- /dev/null +++ b/qa/tasks/ceph_client.py @@ -0,0 +1,42 @@ +""" +Set up client keyring +""" +import logging + +from teuthology import misc as teuthology +from teuthology.orchestra import run + +log = logging.getLogger(__name__) + +def create_keyring(ctx, cluster_name): + """ + Set up key ring on remote sites + """ + log.info('Setting up client nodes...') + clients = ctx.cluster.only(teuthology.is_type('client', cluster_name)) + testdir = teuthology.get_testdir(ctx) + coverage_dir = '{tdir}/archive/coverage'.format(tdir=testdir) + for remote, roles_for_host in clients.remotes.items(): + for role in teuthology.cluster_roles_of_type(roles_for_host, 'client', + cluster_name): + name = teuthology.ceph_role(role) + client_keyring = '/etc/ceph/{0}.{1}.keyring'.format(cluster_name, name) + remote.run( + args=[ + 'sudo', + 'adjust-ulimits', + 'ceph-coverage', + coverage_dir, + 'ceph-authtool', + '--create-keyring', + '--gen-key', + # TODO this --name= is not really obeyed, all unknown "types" are munged to "client" + '--name={name}'.format(name=name), + client_keyring, + run.Raw('&&'), + 'sudo', + 'chmod', + '0644', + client_keyring, + ], + ) diff --git a/qa/tasks/ceph_deploy.py b/qa/tasks/ceph_deploy.py new file mode 100644 index 00000000..de45fff9 --- /dev/null +++ b/qa/tasks/ceph_deploy.py @@ -0,0 +1,932 @@ +""" +Execute ceph-deploy as a task +""" + +import contextlib +import os +import time +import logging +import traceback + +from teuthology import misc as teuthology +from teuthology import contextutil +from teuthology.config import config as teuth_config +from teuthology.task import install as install_fn +from teuthology.orchestra import run +from tasks.cephfs.filesystem import Filesystem +from teuthology.misc import wait_until_healthy + +log = logging.getLogger(__name__) + + +@contextlib.contextmanager +def download_ceph_deploy(ctx, config): + """ + Downloads ceph-deploy from the ceph.com git mirror and (by default) + switches to the master branch. If the `ceph-deploy-branch` is specified, it + will use that instead. The `bootstrap` script is ran, with the argument + obtained from `python_version`, if specified. + """ + # use mon.a for ceph_admin + (ceph_admin,) = ctx.cluster.only('mon.a').remotes.keys() + + try: + py_ver = str(config['python_version']) + except KeyError: + pass + else: + supported_versions = ['2', '3'] + if py_ver not in supported_versions: + raise ValueError("python_version must be: {}, not {}".format( + ' or '.join(supported_versions), py_ver + )) + + log.info("Installing Python") + system_type = teuthology.get_system_type(ceph_admin) + + if system_type == 'rpm': + package = 'python36' if py_ver == '3' else 'python' + ctx.cluster.run(args=[ + 'sudo', 'yum', '-y', 'install', + package, 'python-virtualenv' + ]) + else: + package = 'python3' if py_ver == '3' else 'python' + ctx.cluster.run(args=[ + 'sudo', 'apt-get', '-y', '--force-yes', 'install', + package, 'python-virtualenv' + ]) + + log.info('Downloading ceph-deploy...') + testdir = teuthology.get_testdir(ctx) + ceph_deploy_branch = config.get('ceph-deploy-branch', 'master') + + ceph_admin.run( + args=[ + 'git', 'clone', '-b', ceph_deploy_branch, + teuth_config.ceph_git_base_url + 'ceph-deploy.git', + '{tdir}/ceph-deploy'.format(tdir=testdir), + ], + ) + args = [ + 'cd', + '{tdir}/ceph-deploy'.format(tdir=testdir), + run.Raw('&&'), + './bootstrap', + ] + try: + args.append(str(config['python_version'])) + except KeyError: + pass + ceph_admin.run(args=args) + + try: + yield + finally: + log.info('Removing ceph-deploy ...') + ceph_admin.run( + args=[ + 'rm', + '-rf', + '{tdir}/ceph-deploy'.format(tdir=testdir), + ], + ) + + +def is_healthy(ctx, config): + """Wait until a Ceph cluster is healthy.""" + testdir = teuthology.get_testdir(ctx) + ceph_admin = teuthology.get_first_mon(ctx, config) + (remote,) = ctx.cluster.only(ceph_admin).remotes.keys() + max_tries = 90 # 90 tries * 10 secs --> 15 minutes + tries = 0 + while True: + tries += 1 + if tries >= max_tries: + msg = "ceph health was unable to get 'HEALTH_OK' after waiting 15 minutes" + remote.run( + args=[ + 'cd', + '{tdir}'.format(tdir=testdir), + run.Raw('&&'), + 'sudo', 'ceph', + 'report', + ], + ) + raise RuntimeError(msg) + + out = remote.sh( + [ + 'cd', + '{tdir}'.format(tdir=testdir), + run.Raw('&&'), + 'sudo', 'ceph', + 'health', + ], + logger=log.getChild('health'), + ) + log.info('Ceph health: %s', out.rstrip('\n')) + if out.split(None, 1)[0] == 'HEALTH_OK': + break + time.sleep(10) + + +def get_nodes_using_role(ctx, target_role): + """ + Extract the names of nodes that match a given role from a cluster, and modify the + cluster's service IDs to match the resulting node-based naming scheme that ceph-deploy + uses, such that if "mon.a" is on host "foo23", it'll be renamed to "mon.foo23". + """ + + # Nodes containing a service of the specified role + nodes_of_interest = [] + + # Prepare a modified version of cluster.remotes with ceph-deploy-ized names + modified_remotes = {} + ceph_deploy_mapped = dict() + for _remote, roles_for_host in ctx.cluster.remotes.items(): + modified_remotes[_remote] = [] + for svc_id in roles_for_host: + if svc_id.startswith("{0}.".format(target_role)): + fqdn = str(_remote).split('@')[-1] + nodename = str(str(_remote).split('.')[0]).split('@')[1] + if target_role == 'mon': + nodes_of_interest.append(fqdn) + else: + nodes_of_interest.append(nodename) + mapped_role = "{0}.{1}".format(target_role, nodename) + modified_remotes[_remote].append(mapped_role) + # keep dict of mapped role for later use by tasks + # eg. mon.a => mon.node1 + ceph_deploy_mapped[svc_id] = mapped_role + else: + modified_remotes[_remote].append(svc_id) + + ctx.cluster.remotes = modified_remotes + # since the function is called multiple times for target roles + # append new mapped roles + if not hasattr(ctx.cluster, 'mapped_role'): + ctx.cluster.mapped_role = ceph_deploy_mapped + else: + ctx.cluster.mapped_role.update(ceph_deploy_mapped) + log.info("New mapped_role={mr}".format(mr=ctx.cluster.mapped_role)) + return nodes_of_interest + + +def get_dev_for_osd(ctx, config): + """Get a list of all osd device names.""" + osd_devs = [] + for remote, roles_for_host in ctx.cluster.remotes.items(): + host = remote.name.split('@')[-1] + shortname = host.split('.')[0] + devs = teuthology.get_scratch_devices(remote) + num_osd_per_host = list( + teuthology.roles_of_type( + roles_for_host, 'osd')) + num_osds = len(num_osd_per_host) + if config.get('separate_journal_disk') is not None: + num_devs_reqd = 2 * num_osds + assert num_devs_reqd <= len( + devs), 'fewer data and journal disks than required ' + shortname + for dindex in range(0, num_devs_reqd, 2): + jd_index = dindex + 1 + dev_short = devs[dindex].split('/')[-1] + jdev_short = devs[jd_index].split('/')[-1] + osd_devs.append((shortname, dev_short, jdev_short)) + else: + assert num_osds <= len(devs), 'fewer disks than osds ' + shortname + for dev in devs[:num_osds]: + dev_short = dev.split('/')[-1] + osd_devs.append((shortname, dev_short)) + return osd_devs + + +def get_all_nodes(ctx, config): + """Return a string of node names separated by blanks""" + nodelist = [] + for t, k in ctx.config['targets'].items(): + host = t.split('@')[-1] + simple_host = host.split('.')[0] + nodelist.append(simple_host) + nodelist = " ".join(nodelist) + return nodelist + +@contextlib.contextmanager +def build_ceph_cluster(ctx, config): + """Build a ceph cluster""" + + # Expect to find ceph_admin on the first mon by ID, same place that the download task + # puts it. Remember this here, because subsequently IDs will change from those in + # the test config to those that ceph-deploy invents. + + (ceph_admin,) = ctx.cluster.only('mon.a').remotes.keys() + + def execute_ceph_deploy(cmd): + """Remotely execute a ceph_deploy command""" + return ceph_admin.run( + args=[ + 'cd', + '{tdir}/ceph-deploy'.format(tdir=testdir), + run.Raw('&&'), + run.Raw(cmd), + ], + check_status=False, + ).exitstatus + + def ceph_disk_osd_create(ctx, config): + node_dev_list = get_dev_for_osd(ctx, config) + no_of_osds = 0 + for d in node_dev_list: + node = d[0] + for disk in d[1:]: + zap = './ceph-deploy disk zap ' + node + ':' + disk + estatus = execute_ceph_deploy(zap) + if estatus != 0: + raise RuntimeError("ceph-deploy: Failed to zap osds") + osd_create_cmd = './ceph-deploy osd create ' + # first check for filestore, default is bluestore with ceph-deploy + if config.get('filestore') is not None: + osd_create_cmd += '--filestore ' + elif config.get('bluestore') is not None: + osd_create_cmd += '--bluestore ' + if config.get('dmcrypt') is not None: + osd_create_cmd += '--dmcrypt ' + osd_create_cmd += ":".join(d) + estatus_osd = execute_ceph_deploy(osd_create_cmd) + if estatus_osd == 0: + log.info('successfully created osd') + no_of_osds += 1 + else: + raise RuntimeError("ceph-deploy: Failed to create osds") + return no_of_osds + + def ceph_volume_osd_create(ctx, config): + osds = ctx.cluster.only(teuthology.is_type('osd')) + no_of_osds = 0 + for remote in osds.remotes.keys(): + # all devs should be lvm + osd_create_cmd = './ceph-deploy osd create --debug ' + remote.shortname + ' ' + # default is bluestore so we just need config item for filestore + roles = ctx.cluster.remotes[remote] + dev_needed = len([role for role in roles + if role.startswith('osd')]) + all_devs = teuthology.get_scratch_devices(remote) + log.info("node={n}, need_devs={d}, available={a}".format( + n=remote.shortname, + d=dev_needed, + a=all_devs, + )) + devs = all_devs[0:dev_needed] + # rest of the devices can be used for journal if required + jdevs = dev_needed + for device in devs: + device_split = device.split('/') + lv_device = device_split[-2] + '/' + device_split[-1] + if config.get('filestore') is not None: + osd_create_cmd += '--filestore --data ' + lv_device + ' ' + # filestore with ceph-volume also needs journal disk + try: + jdevice = all_devs.pop(jdevs) + except IndexError: + raise RuntimeError("No device available for \ + journal configuration") + jdevice_split = jdevice.split('/') + j_lv = jdevice_split[-2] + '/' + jdevice_split[-1] + osd_create_cmd += '--journal ' + j_lv + else: + osd_create_cmd += ' --data ' + lv_device + estatus_osd = execute_ceph_deploy(osd_create_cmd) + if estatus_osd == 0: + log.info('successfully created osd') + no_of_osds += 1 + else: + raise RuntimeError("ceph-deploy: Failed to create osds") + return no_of_osds + + try: + log.info('Building ceph cluster using ceph-deploy...') + testdir = teuthology.get_testdir(ctx) + ceph_branch = None + if config.get('branch') is not None: + cbranch = config.get('branch') + for var, val in cbranch.items(): + ceph_branch = '--{var}={val}'.format(var=var, val=val) + all_nodes = get_all_nodes(ctx, config) + mds_nodes = get_nodes_using_role(ctx, 'mds') + mds_nodes = " ".join(mds_nodes) + mon_node = get_nodes_using_role(ctx, 'mon') + mon_nodes = " ".join(mon_node) + # skip mgr based on config item + # this is needed when test uses latest code to install old ceph + # versions + skip_mgr = config.get('skip-mgr', False) + if not skip_mgr: + mgr_nodes = get_nodes_using_role(ctx, 'mgr') + mgr_nodes = " ".join(mgr_nodes) + new_mon = './ceph-deploy new' + " " + mon_nodes + if not skip_mgr: + mgr_create = './ceph-deploy mgr create' + " " + mgr_nodes + mon_hostname = mon_nodes.split(' ')[0] + mon_hostname = str(mon_hostname) + gather_keys = './ceph-deploy gatherkeys' + " " + mon_hostname + deploy_mds = './ceph-deploy mds create' + " " + mds_nodes + + if mon_nodes is None: + raise RuntimeError("no monitor nodes in the config file") + + estatus_new = execute_ceph_deploy(new_mon) + if estatus_new != 0: + raise RuntimeError("ceph-deploy: new command failed") + + log.info('adding config inputs...') + testdir = teuthology.get_testdir(ctx) + conf_path = '{tdir}/ceph-deploy/ceph.conf'.format(tdir=testdir) + + if config.get('conf') is not None: + confp = config.get('conf') + for section, keys in confp.items(): + lines = '[{section}]\n'.format(section=section) + teuthology.append_lines_to_file(ceph_admin, conf_path, lines, + sudo=True) + for key, value in keys.items(): + log.info("[%s] %s = %s" % (section, key, value)) + lines = '{key} = {value}\n'.format(key=key, value=value) + teuthology.append_lines_to_file( + ceph_admin, conf_path, lines, sudo=True) + + # install ceph + dev_branch = ctx.config['branch'] + branch = '--dev={branch}'.format(branch=dev_branch) + if ceph_branch: + option = ceph_branch + else: + option = branch + install_nodes = './ceph-deploy install ' + option + " " + all_nodes + estatus_install = execute_ceph_deploy(install_nodes) + if estatus_install != 0: + raise RuntimeError("ceph-deploy: Failed to install ceph") + # install ceph-test package too + install_nodes2 = './ceph-deploy install --tests ' + option + \ + " " + all_nodes + estatus_install = execute_ceph_deploy(install_nodes2) + if estatus_install != 0: + raise RuntimeError("ceph-deploy: Failed to install ceph-test") + + mon_create_nodes = './ceph-deploy mon create-initial' + # If the following fails, it is OK, it might just be that the monitors + # are taking way more than a minute/monitor to form quorum, so lets + # try the next block which will wait up to 15 minutes to gatherkeys. + execute_ceph_deploy(mon_create_nodes) + + estatus_gather = execute_ceph_deploy(gather_keys) + if estatus_gather != 0: + raise RuntimeError("ceph-deploy: Failed during gather keys") + + # install admin key on mons (ceph-create-keys doesn't do this any more) + mons = ctx.cluster.only(teuthology.is_type('mon')) + for remote in mons.remotes.keys(): + execute_ceph_deploy('./ceph-deploy admin ' + remote.shortname) + + # create osd's + if config.get('use-ceph-volume', False): + no_of_osds = ceph_volume_osd_create(ctx, config) + else: + # this method will only work with ceph-deploy v1.5.39 or older + no_of_osds = ceph_disk_osd_create(ctx, config) + + if not skip_mgr: + execute_ceph_deploy(mgr_create) + + if mds_nodes: + estatus_mds = execute_ceph_deploy(deploy_mds) + if estatus_mds != 0: + raise RuntimeError("ceph-deploy: Failed to deploy mds") + + if config.get('test_mon_destroy') is not None: + for d in range(1, len(mon_node)): + mon_destroy_nodes = './ceph-deploy mon destroy' + \ + " " + mon_node[d] + estatus_mon_d = execute_ceph_deploy(mon_destroy_nodes) + if estatus_mon_d != 0: + raise RuntimeError("ceph-deploy: Failed to delete monitor") + + + + if config.get('wait-for-healthy', True) and no_of_osds >= 2: + is_healthy(ctx=ctx, config=None) + + log.info('Setting up client nodes...') + conf_path = '/etc/ceph/ceph.conf' + admin_keyring_path = '/etc/ceph/ceph.client.admin.keyring' + first_mon = teuthology.get_first_mon(ctx, config) + (mon0_remote,) = ctx.cluster.only(first_mon).remotes.keys() + conf_data = teuthology.get_file( + remote=mon0_remote, + path=conf_path, + sudo=True, + ) + admin_keyring = teuthology.get_file( + remote=mon0_remote, + path=admin_keyring_path, + sudo=True, + ) + + clients = ctx.cluster.only(teuthology.is_type('client')) + for remot, roles_for_host in clients.remotes.items(): + for id_ in teuthology.roles_of_type(roles_for_host, 'client'): + client_keyring = \ + '/etc/ceph/ceph.client.{id}.keyring'.format(id=id_) + mon0_remote.run( + args=[ + 'cd', + '{tdir}'.format(tdir=testdir), + run.Raw('&&'), + 'sudo', 'bash', '-c', + run.Raw('"'), 'ceph', + 'auth', + 'get-or-create', + 'client.{id}'.format(id=id_), + 'mds', 'allow', + 'mon', 'allow *', + 'osd', 'allow *', + run.Raw('>'), + client_keyring, + run.Raw('"'), + ], + ) + key_data = teuthology.get_file( + remote=mon0_remote, + path=client_keyring, + sudo=True, + ) + teuthology.sudo_write_file( + remote=remot, + path=client_keyring, + data=key_data, + perms='0644' + ) + teuthology.sudo_write_file( + remote=remot, + path=admin_keyring_path, + data=admin_keyring, + perms='0644' + ) + teuthology.sudo_write_file( + remote=remot, + path=conf_path, + data=conf_data, + perms='0644' + ) + + if mds_nodes: + log.info('Configuring CephFS...') + Filesystem(ctx, create=True) + elif not config.get('only_mon'): + raise RuntimeError( + "The cluster is NOT operational due to insufficient OSDs") + # create rbd pool + ceph_admin.run( + args=[ + 'sudo', 'ceph', '--cluster', 'ceph', + 'osd', 'pool', 'create', 'rbd', '128', '128'], + check_status=False) + ceph_admin.run( + args=[ + 'sudo', 'ceph', '--cluster', 'ceph', + 'osd', 'pool', 'application', 'enable', + 'rbd', 'rbd', '--yes-i-really-mean-it' + ], + check_status=False) + yield + + except Exception: + log.info( + "Error encountered, logging exception before tearing down ceph-deploy") + log.info(traceback.format_exc()) + raise + finally: + if config.get('keep_running'): + return + log.info('Stopping ceph...') + ctx.cluster.run(args=['sudo', 'systemctl', 'stop', 'ceph.target'], + check_status=False) + time.sleep(4) + + # and now just check for the processes themselves, as if upstart/sysvinit + # is lying to us. Ignore errors if the grep fails + ctx.cluster.run(args=['sudo', 'ps', 'aux', run.Raw('|'), + 'grep', '-v', 'grep', run.Raw('|'), + 'grep', 'ceph'], check_status=False) + ctx.cluster.run(args=['sudo', 'systemctl', run.Raw('|'), + 'grep', 'ceph'], check_status=False) + + if ctx.archive is not None: + # archive mon data, too + log.info('Archiving mon data...') + path = os.path.join(ctx.archive, 'data') + os.makedirs(path) + mons = ctx.cluster.only(teuthology.is_type('mon')) + for remote, roles in mons.remotes.items(): + for role in roles: + if role.startswith('mon.'): + teuthology.pull_directory_tarball( + remote, + '/var/lib/ceph/mon', + path + '/' + role + '.tgz') + + log.info('Compressing logs...') + run.wait( + ctx.cluster.run( + args=[ + 'sudo', + 'find', + '/var/log/ceph', + '-name', + '*.log', + '-print0', + run.Raw('|'), + 'sudo', + 'xargs', + '-0', + '--no-run-if-empty', + '--', + 'gzip', + '--', + ], + wait=False, + ), + ) + + log.info('Archiving logs...') + path = os.path.join(ctx.archive, 'remote') + os.makedirs(path) + for remote in ctx.cluster.remotes.keys(): + sub = os.path.join(path, remote.shortname) + os.makedirs(sub) + teuthology.pull_directory(remote, '/var/log/ceph', + os.path.join(sub, 'log')) + + # Prevent these from being undefined if the try block fails + all_nodes = get_all_nodes(ctx, config) + purge_nodes = './ceph-deploy purge' + " " + all_nodes + purgedata_nodes = './ceph-deploy purgedata' + " " + all_nodes + + log.info('Purging package...') + execute_ceph_deploy(purge_nodes) + log.info('Purging data...') + execute_ceph_deploy(purgedata_nodes) + + +@contextlib.contextmanager +def cli_test(ctx, config): + """ + ceph-deploy cli to exercise most commonly use cli's and ensure + all commands works and also startup the init system. + + """ + log.info('Ceph-deploy Test') + if config is None: + config = {} + test_branch = '' + conf_dir = teuthology.get_testdir(ctx) + "/cdtest" + + def execute_cdeploy(admin, cmd, path): + """Execute ceph-deploy commands """ + """Either use git path or repo path """ + args = ['cd', conf_dir, run.Raw(';')] + if path: + args.append('{path}/ceph-deploy/ceph-deploy'.format(path=path)) + else: + args.append('ceph-deploy') + args.append(run.Raw(cmd)) + ec = admin.run(args=args, check_status=False).exitstatus + if ec != 0: + raise RuntimeError( + "failed during ceph-deploy cmd: {cmd} , ec={ec}".format(cmd=cmd, ec=ec)) + + if config.get('rhbuild'): + path = None + else: + path = teuthology.get_testdir(ctx) + # test on branch from config eg: wip-* , master or next etc + # packages for all distro's should exist for wip* + if ctx.config.get('branch'): + branch = ctx.config.get('branch') + test_branch = ' --dev={branch} '.format(branch=branch) + mons = ctx.cluster.only(teuthology.is_type('mon')) + for node, role in mons.remotes.items(): + admin = node + admin.run(args=['mkdir', conf_dir], check_status=False) + nodename = admin.shortname + system_type = teuthology.get_system_type(admin) + if config.get('rhbuild'): + admin.run(args=['sudo', 'yum', 'install', 'ceph-deploy', '-y']) + log.info('system type is %s', system_type) + osds = ctx.cluster.only(teuthology.is_type('osd')) + + for remote, roles in osds.remotes.items(): + devs = teuthology.get_scratch_devices(remote) + log.info("roles %s", roles) + if (len(devs) < 3): + log.error( + 'Test needs minimum of 3 devices, only found %s', + str(devs)) + raise RuntimeError("Needs minimum of 3 devices ") + + conf_path = '{conf_dir}/ceph.conf'.format(conf_dir=conf_dir) + new_cmd = 'new ' + nodename + execute_cdeploy(admin, new_cmd, path) + if config.get('conf') is not None: + confp = config.get('conf') + for section, keys in confp.items(): + lines = '[{section}]\n'.format(section=section) + teuthology.append_lines_to_file(admin, conf_path, lines, + sudo=True) + for key, value in keys.items(): + log.info("[%s] %s = %s" % (section, key, value)) + lines = '{key} = {value}\n'.format(key=key, value=value) + teuthology.append_lines_to_file(admin, conf_path, lines, + sudo=True) + new_mon_install = 'install {branch} --mon '.format( + branch=test_branch) + nodename + new_mgr_install = 'install {branch} --mgr '.format( + branch=test_branch) + nodename + new_osd_install = 'install {branch} --osd '.format( + branch=test_branch) + nodename + new_admin = 'install {branch} --cli '.format(branch=test_branch) + nodename + create_initial = 'mon create-initial ' + mgr_create = 'mgr create ' + nodename + # either use create-keys or push command + push_keys = 'admin ' + nodename + execute_cdeploy(admin, new_mon_install, path) + execute_cdeploy(admin, new_mgr_install, path) + execute_cdeploy(admin, new_osd_install, path) + execute_cdeploy(admin, new_admin, path) + execute_cdeploy(admin, create_initial, path) + execute_cdeploy(admin, mgr_create, path) + execute_cdeploy(admin, push_keys, path) + + for i in range(3): + zap_disk = 'disk zap ' + "{n}:{d}".format(n=nodename, d=devs[i]) + prepare = 'osd prepare ' + "{n}:{d}".format(n=nodename, d=devs[i]) + execute_cdeploy(admin, zap_disk, path) + execute_cdeploy(admin, prepare, path) + + log.info("list files for debugging purpose to check file permissions") + admin.run(args=['ls', run.Raw('-lt'), conf_dir]) + remote.run(args=['sudo', 'ceph', '-s'], check_status=False) + out = remote.sh('sudo ceph health') + log.info('Ceph health: %s', out.rstrip('\n')) + log.info("Waiting for cluster to become healthy") + with contextutil.safe_while(sleep=10, tries=6, + action='check health') as proceed: + while proceed(): + out = remote.sh('sudo ceph health') + if (out.split(None, 1)[0] == 'HEALTH_OK'): + break + rgw_install = 'install {branch} --rgw {node}'.format( + branch=test_branch, + node=nodename, + ) + rgw_create = 'rgw create ' + nodename + execute_cdeploy(admin, rgw_install, path) + execute_cdeploy(admin, rgw_create, path) + log.info('All ceph-deploy cli tests passed') + try: + yield + finally: + log.info("cleaning up") + ctx.cluster.run(args=['sudo', 'systemctl', 'stop', 'ceph.target'], + check_status=False) + time.sleep(4) + for i in range(3): + umount_dev = "{d}1".format(d=devs[i]) + r = remote.run(args=['sudo', 'umount', run.Raw(umount_dev)]) + cmd = 'purge ' + nodename + execute_cdeploy(admin, cmd, path) + cmd = 'purgedata ' + nodename + execute_cdeploy(admin, cmd, path) + log.info("Removing temporary dir") + admin.run( + args=[ + 'rm', + run.Raw('-rf'), + run.Raw(conf_dir)], + check_status=False) + if config.get('rhbuild'): + admin.run(args=['sudo', 'yum', 'remove', 'ceph-deploy', '-y']) + + +@contextlib.contextmanager +def single_node_test(ctx, config): + """ + - ceph-deploy.single_node_test: null + + #rhbuild testing + - ceph-deploy.single_node_test: + rhbuild: 1.2.3 + + """ + log.info("Testing ceph-deploy on single node") + if config is None: + config = {} + overrides = ctx.config.get('overrides', {}) + teuthology.deep_merge(config, overrides.get('ceph-deploy', {})) + + if config.get('rhbuild'): + log.info("RH Build, Skip Download") + with contextutil.nested( + lambda: cli_test(ctx=ctx, config=config), + ): + yield + else: + with contextutil.nested( + lambda: install_fn.ship_utilities(ctx=ctx, config=None), + lambda: download_ceph_deploy(ctx=ctx, config=config), + lambda: cli_test(ctx=ctx, config=config), + ): + yield + + +@contextlib.contextmanager +def upgrade(ctx, config): + """ + Upgrade using ceph-deploy + eg: + ceph-deploy.upgrade: + # to upgrade to specific branch, use + branch: + stable: jewel + # to setup mgr node, use + setup-mgr-node: True + # to wait for cluster to be healthy after all upgrade, use + wait-for-healthy: True + role: (upgrades the below roles serially) + mon.a + mon.b + osd.0 + """ + roles = config.get('roles') + # get the roles that are mapped as per ceph-deploy + # roles are mapped for mon/mds eg: mon.a => mon.host_short_name + mapped_role = ctx.cluster.mapped_role + log.info("roles={r}, mapped_roles={mr}".format(r=roles, mr=mapped_role)) + if config.get('branch'): + branch = config.get('branch') + (var, val) = branch.items()[0] + ceph_branch = '--{var}={val}'.format(var=var, val=val) + else: + # default to wip-branch under test + dev_branch = ctx.config['branch'] + ceph_branch = '--dev={branch}'.format(branch=dev_branch) + # get the node used for initial deployment which is mon.a + mon_a = mapped_role.get('mon.a') + (ceph_admin,) = ctx.cluster.only(mon_a).remotes.keys() + testdir = teuthology.get_testdir(ctx) + cmd = './ceph-deploy install ' + ceph_branch + for role in roles: + # check if this role is mapped (mon or mds) + if mapped_role.get(role): + role = mapped_role.get(role) + remotes_and_roles = ctx.cluster.only(role).remotes + for remote, roles in remotes_and_roles.items(): + nodename = remote.shortname + cmd = cmd + ' ' + nodename + log.info("Upgrading ceph on %s", nodename) + ceph_admin.run( + args=[ + 'cd', + '{tdir}/ceph-deploy'.format(tdir=testdir), + run.Raw('&&'), + run.Raw(cmd), + ], + ) + # restart all ceph services, ideally upgrade should but it does not + remote.run( + args=[ + 'sudo', 'systemctl', 'restart', 'ceph.target' + ] + ) + ceph_admin.run(args=['sudo', 'ceph', '-s']) + + # workaround for http://tracker.ceph.com/issues/20950 + # write the correct mgr key to disk + if config.get('setup-mgr-node', None): + mons = ctx.cluster.only(teuthology.is_type('mon')) + for remote, roles in mons.remotes.items(): + remote.run( + args=[ + run.Raw('sudo ceph auth get client.bootstrap-mgr'), + run.Raw('|'), + run.Raw('sudo tee'), + run.Raw('/var/lib/ceph/bootstrap-mgr/ceph.keyring') + ] + ) + + if config.get('setup-mgr-node', None): + mgr_nodes = get_nodes_using_role(ctx, 'mgr') + mgr_nodes = " ".join(mgr_nodes) + mgr_install = './ceph-deploy install --mgr ' + ceph_branch + " " + mgr_nodes + mgr_create = './ceph-deploy mgr create' + " " + mgr_nodes + # install mgr + ceph_admin.run( + args=[ + 'cd', + '{tdir}/ceph-deploy'.format(tdir=testdir), + run.Raw('&&'), + run.Raw(mgr_install), + ], + ) + # create mgr + ceph_admin.run( + args=[ + 'cd', + '{tdir}/ceph-deploy'.format(tdir=testdir), + run.Raw('&&'), + run.Raw(mgr_create), + ], + ) + ceph_admin.run(args=['sudo', 'ceph', '-s']) + if config.get('wait-for-healthy', None): + wait_until_healthy(ctx, ceph_admin, use_sudo=True) + yield + + +@contextlib.contextmanager +def task(ctx, config): + """ + Set up and tear down a Ceph cluster. + + For example:: + + tasks: + - install: + extras: yes + - ssh_keys: + - ceph-deploy: + branch: + stable: bobtail + mon_initial_members: 1 + ceph-deploy-branch: my-ceph-deploy-branch + only_mon: true + keep_running: true + # either choose bluestore or filestore, default is bluestore + bluestore: True + # or + filestore: True + # skip install of mgr for old release using below flag + skip-mgr: True ( default is False ) + # to use ceph-volume instead of ceph-disk + # ceph-disk can only be used with old ceph-deploy release from pypi + use-ceph-volume: true + + tasks: + - install: + extras: yes + - ssh_keys: + - ceph-deploy: + branch: + dev: master + conf: + mon: + debug mon = 20 + + tasks: + - install: + extras: yes + - ssh_keys: + - ceph-deploy: + branch: + testing: + dmcrypt: yes + separate_journal_disk: yes + + """ + if config is None: + config = {} + + assert isinstance(config, dict), \ + "task ceph-deploy only supports a dictionary for configuration" + + overrides = ctx.config.get('overrides', {}) + teuthology.deep_merge(config, overrides.get('ceph-deploy', {})) + + if config.get('branch') is not None: + assert isinstance( + config['branch'], dict), 'branch must be a dictionary' + + log.info('task ceph-deploy with config ' + str(config)) + + # we need to use 1.5.39-stable for testing jewel or master branch with + # ceph-disk + if config.get('use-ceph-volume', False) is False: + # check we are not testing specific branch + if config.get('ceph-deploy-branch', False) is False: + config['ceph-deploy-branch'] = '1.5.39-stable' + + with contextutil.nested( + lambda: install_fn.ship_utilities(ctx=ctx, config=None), + lambda: download_ceph_deploy(ctx=ctx, config=config), + lambda: build_ceph_cluster(ctx=ctx, config=config), + ): + yield diff --git a/qa/tasks/ceph_fuse.py b/qa/tasks/ceph_fuse.py new file mode 100644 index 00000000..1439ccff --- /dev/null +++ b/qa/tasks/ceph_fuse.py @@ -0,0 +1,160 @@ +""" +Ceph FUSE client task +""" + +import contextlib +import logging + +from teuthology import misc as teuthology +from tasks.cephfs.fuse_mount import FuseMount + +log = logging.getLogger(__name__) + + +def get_client_configs(ctx, config): + """ + Get a map of the configuration for each FUSE client in the configuration by + combining the configuration of the current task with any global overrides. + + :param ctx: Context instance + :param config: configuration for this task + :return: dict of client name to config or to None + """ + if config is None: + config = dict(('client.{id}'.format(id=id_), None) + for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')) + elif isinstance(config, list): + config = dict((name, None) for name in config) + + overrides = ctx.config.get('overrides', {}) + teuthology.deep_merge(config, overrides.get('ceph-fuse', {})) + + return config + + +@contextlib.contextmanager +def task(ctx, config): + """ + Mount/unmount a ``ceph-fuse`` client. + + The config is optional and defaults to mounting on all clients. If + a config is given, it is expected to be a list of clients to do + this operation on. This lets you e.g. set up one client with + ``ceph-fuse`` and another with ``kclient``. + + Example that mounts all clients:: + + tasks: + - ceph: + - ceph-fuse: + - interactive: + + Example that uses both ``kclient` and ``ceph-fuse``:: + + tasks: + - ceph: + - ceph-fuse: [client.0] + - kclient: [client.1] + - interactive: + + Example that enables valgrind: + + tasks: + - ceph: + - ceph-fuse: + client.0: + valgrind: [--tool=memcheck, --leak-check=full, --show-reachable=yes] + - interactive: + + Example that stops an already-mounted client: + + :: + + tasks: + - ceph: + - ceph-fuse: [client.0] + - ... do something that requires the FS mounted ... + - ceph-fuse: + client.0: + mounted: false + - ... do something that requires the FS unmounted ... + + Example that adds more generous wait time for mount (for virtual machines): + + tasks: + - ceph: + - ceph-fuse: + client.0: + mount_wait: 60 # default is 0, do not wait before checking /sys/ + mount_timeout: 120 # default is 30, give up if /sys/ is not populated + - interactive: + + :param ctx: Context + :param config: Configuration + """ + log.info('Running ceph_fuse task...') + + testdir = teuthology.get_testdir(ctx) + log.info("config is {}".format(str(config))) + config = get_client_configs(ctx, config) + log.info("new config is {}".format(str(config))) + + # List clients we will configure mounts for, default is all clients + clients = list(teuthology.get_clients(ctx=ctx, roles=filter(lambda x: 'client.' in x, config.keys()))) + + all_mounts = getattr(ctx, 'mounts', {}) + mounted_by_me = {} + skipped = {} + + # Construct any new FuseMount instances + for id_, remote in clients: + client_config = config.get("client.%s" % id_) + if client_config is None: + client_config = {} + + auth_id = client_config.get("auth_id", id_) + + skip = client_config.get("skip", False) + if skip: + skipped[id_] = skip + continue + + if id_ not in all_mounts: + fuse_mount = FuseMount(ctx, client_config, testdir, auth_id, remote) + all_mounts[id_] = fuse_mount + else: + # Catch bad configs where someone has e.g. tried to use ceph-fuse and kcephfs for the same client + assert isinstance(all_mounts[id_], FuseMount) + + if not config.get("disabled", False) and client_config.get('mounted', True): + mounted_by_me[id_] = {"config": client_config, "mount": all_mounts[id_]} + + ctx.mounts = all_mounts + + # Mount any clients we have been asked to (default to mount all) + log.info('Mounting ceph-fuse clients...') + for info in mounted_by_me.values(): + config = info["config"] + mount_path = config.get("mount_path") + mountpoint = config.get("mountpoint") + info["mount"].mount(mountpoint=mountpoint, mount_path=mount_path) + + for info in mounted_by_me.values(): + info["mount"].wait_until_mounted() + + # Umount any pre-existing clients that we have not been asked to mount + for client_id in set(all_mounts.keys()) - set(mounted_by_me.keys()) - set(skipped.keys()): + mount = all_mounts[client_id] + if mount.is_mounted(): + mount.umount_wait() + + try: + yield all_mounts + finally: + log.info('Unmounting ceph-fuse clients...') + + for info in mounted_by_me.values(): + # Conditional because an inner context might have umounted it + mount = info["mount"] + if mount.is_mounted(): + mount.umount_wait() diff --git a/qa/tasks/ceph_manager.py b/qa/tasks/ceph_manager.py new file mode 100644 index 00000000..3e1a2ec5 --- /dev/null +++ b/qa/tasks/ceph_manager.py @@ -0,0 +1,2642 @@ +""" +ceph manager -- Thrasher and CephManager objects +""" +from functools import wraps +import contextlib +import random +import signal +import time +import gevent +import base64 +import json +import logging +import threading +import traceback +import os + +from io import BytesIO, StringIO +from teuthology import misc as teuthology +from tasks.scrub import Scrubber +from tasks.util.rados import cmd_erasure_code_profile +from tasks.util import get_remote +from teuthology.contextutil import safe_while +from teuthology.orchestra.remote import Remote +from teuthology.orchestra import run +from teuthology.exceptions import CommandFailedError + +try: + from subprocess import DEVNULL # py3k +except ImportError: + DEVNULL = open(os.devnull, 'r+') + +DEFAULT_CONF_PATH = '/etc/ceph/ceph.conf' + +log = logging.getLogger(__name__) + + +def write_conf(ctx, conf_path=DEFAULT_CONF_PATH, cluster='ceph'): + conf_fp = BytesIO() + ctx.ceph[cluster].conf.write(conf_fp) + conf_fp.seek(0) + writes = ctx.cluster.run( + args=[ + 'sudo', 'mkdir', '-p', '/etc/ceph', run.Raw('&&'), + 'sudo', 'chmod', '0755', '/etc/ceph', run.Raw('&&'), + 'sudo', 'tee', conf_path, run.Raw('&&'), + 'sudo', 'chmod', '0644', conf_path, + run.Raw('>'), '/dev/null', + + ], + stdin=run.PIPE, + wait=False) + teuthology.feed_many_stdins_and_close(conf_fp, writes) + run.wait(writes) + + +def mount_osd_data(ctx, remote, cluster, osd): + """ + Mount a remote OSD + + :param ctx: Context + :param remote: Remote site + :param cluster: name of ceph cluster + :param osd: Osd name + """ + log.debug('Mounting data for osd.{o} on {r}'.format(o=osd, r=remote)) + role = "{0}.osd.{1}".format(cluster, osd) + alt_role = role if cluster != 'ceph' else "osd.{0}".format(osd) + if remote in ctx.disk_config.remote_to_roles_to_dev: + if alt_role in ctx.disk_config.remote_to_roles_to_dev[remote]: + role = alt_role + if role not in ctx.disk_config.remote_to_roles_to_dev[remote]: + return + dev = ctx.disk_config.remote_to_roles_to_dev[remote][role] + mount_options = ctx.disk_config.\ + remote_to_roles_to_dev_mount_options[remote][role] + fstype = ctx.disk_config.remote_to_roles_to_dev_fstype[remote][role] + mnt = os.path.join('/var/lib/ceph/osd', '{0}-{1}'.format(cluster, osd)) + + log.info('Mounting osd.{o}: dev: {n}, cluster: {c}' + 'mountpoint: {p}, type: {t}, options: {v}'.format( + o=osd, n=remote.name, p=mnt, t=fstype, v=mount_options, + c=cluster)) + + remote.run( + args=[ + 'sudo', + 'mount', + '-t', fstype, + '-o', ','.join(mount_options), + dev, + mnt, + ] + ) + + +class Thrasher: + """ + Object used to thrash Ceph + """ + def __init__(self, manager, config, logger=None): + self.ceph_manager = manager + self.cluster = manager.cluster + self.ceph_manager.wait_for_clean() + osd_status = self.ceph_manager.get_osd_status() + self.in_osds = osd_status['in'] + self.live_osds = osd_status['live'] + self.out_osds = osd_status['out'] + self.dead_osds = osd_status['dead'] + self.stopping = False + self.logger = logger + self.config = config + self.revive_timeout = self.config.get("revive_timeout", 360) + self.pools_to_fix_pgp_num = set() + if self.config.get('powercycle'): + self.revive_timeout += 120 + self.clean_wait = self.config.get('clean_wait', 0) + self.minin = self.config.get("min_in", 4) + self.chance_move_pg = self.config.get('chance_move_pg', 1.0) + self.sighup_delay = self.config.get('sighup_delay') + self.optrack_toggle_delay = self.config.get('optrack_toggle_delay') + self.dump_ops_enable = self.config.get('dump_ops_enable') + self.noscrub_toggle_delay = self.config.get('noscrub_toggle_delay') + self.chance_thrash_cluster_full = self.config.get('chance_thrash_cluster_full', .05) + self.chance_thrash_pg_upmap = self.config.get('chance_thrash_pg_upmap', 1.0) + self.chance_thrash_pg_upmap_items = self.config.get('chance_thrash_pg_upmap', 1.0) + self.random_eio = self.config.get('random_eio') + self.chance_force_recovery = self.config.get('chance_force_recovery', 0.3) + + num_osds = self.in_osds + self.out_osds + self.max_pgs = self.config.get("max_pgs_per_pool_osd", 1200) * len(num_osds) + self.min_pgs = self.config.get("min_pgs_per_pool_osd", 1) * len(num_osds) + if self.logger is not None: + self.log = lambda x: self.logger.info(x) + else: + def tmp(x): + """ + Implement log behavior + """ + print(x) + self.log = tmp + if self.config is None: + self.config = dict() + # prevent monitor from auto-marking things out while thrasher runs + # try both old and new tell syntax, in case we are testing old code + self.saved_options = [] + # assuming that the default settings do not vary from one daemon to + # another + first_mon = teuthology.get_first_mon(manager.ctx, self.config).split('.') + opts = [('mon', 'mon_osd_down_out_interval', 0)] + for service, opt, new_value in opts: + old_value = manager.get_config(first_mon[0], + first_mon[1], + opt) + self.saved_options.append((service, opt, old_value)) + manager.inject_args(service, '*', opt, new_value) + # initialize ceph_objectstore_tool property - must be done before + # do_thrash is spawned - http://tracker.ceph.com/issues/18799 + if (self.config.get('powercycle') or + not self.cmd_exists_on_osds("ceph-objectstore-tool") or + self.config.get('disable_objectstore_tool_tests', False)): + self.ceph_objectstore_tool = False + if self.config.get('powercycle'): + self.log("Unable to test ceph-objectstore-tool, " + "powercycle testing") + else: + self.log("Unable to test ceph-objectstore-tool, " + "not available on all OSD nodes") + else: + self.ceph_objectstore_tool = \ + self.config.get('ceph_objectstore_tool', True) + # spawn do_thrash + self.thread = gevent.spawn(self.do_thrash) + if self.sighup_delay: + self.sighup_thread = gevent.spawn(self.do_sighup) + if self.optrack_toggle_delay: + self.optrack_toggle_thread = gevent.spawn(self.do_optrack_toggle) + if self.dump_ops_enable == "true": + self.dump_ops_thread = gevent.spawn(self.do_dump_ops) + if self.noscrub_toggle_delay: + self.noscrub_toggle_thread = gevent.spawn(self.do_noscrub_toggle) + + def cmd_exists_on_osds(self, cmd): + allremotes = self.ceph_manager.ctx.cluster.only(\ + teuthology.is_type('osd', self.cluster)).remotes.keys() + allremotes = list(set(allremotes)) + for remote in allremotes: + proc = remote.run(args=['type', cmd], wait=True, + check_status=False, stdout=BytesIO(), + stderr=BytesIO()) + if proc.exitstatus != 0: + return False; + return True; + + def run_ceph_objectstore_tool(self, remote, osd, cmd): + return remote.run( + args=['sudo', 'adjust-ulimits', 'ceph-objectstore-tool'] + cmd, + wait=True, check_status=False, + stdout=StringIO(), + stderr=StringIO()) + + def kill_osd(self, osd=None, mark_down=False, mark_out=False): + """ + :param osd: Osd to be killed. + :mark_down: Mark down if true. + :mark_out: Mark out if true. + """ + if osd is None: + osd = random.choice(self.live_osds) + self.log("Killing osd %s, live_osds are %s" % (str(osd), + str(self.live_osds))) + self.live_osds.remove(osd) + self.dead_osds.append(osd) + self.ceph_manager.kill_osd(osd) + if mark_down: + self.ceph_manager.mark_down_osd(osd) + if mark_out and osd in self.in_osds: + self.out_osd(osd) + if self.ceph_objectstore_tool: + self.log("Testing ceph-objectstore-tool on down osd") + remote = self.ceph_manager.find_remote('osd', osd) + FSPATH = self.ceph_manager.get_filepath() + JPATH = os.path.join(FSPATH, "journal") + exp_osd = imp_osd = osd + exp_remote = imp_remote = remote + # If an older osd is available we'll move a pg from there + if (len(self.dead_osds) > 1 and + random.random() < self.chance_move_pg): + exp_osd = random.choice(self.dead_osds[:-1]) + exp_remote = self.ceph_manager.find_remote('osd', exp_osd) + if ('keyvaluestore_backend' in + self.ceph_manager.ctx.ceph[self.cluster].conf['osd']): + prefix = ("sudo adjust-ulimits ceph-objectstore-tool " + "--data-path {fpath} --journal-path {jpath} " + "--type keyvaluestore " + "--log-file=" + "/var/log/ceph/objectstore_tool.\\$pid.log ". + format(fpath=FSPATH, jpath=JPATH)) + else: + prefix = ("sudo adjust-ulimits ceph-objectstore-tool " + "--data-path {fpath} --journal-path {jpath} " + "--log-file=" + "/var/log/ceph/objectstore_tool.\\$pid.log ". + format(fpath=FSPATH, jpath=JPATH)) + cmd = (prefix + "--op list-pgs").format(id=exp_osd) + + # ceph-objectstore-tool might be temporarily absent during an + # upgrade - see http://tracker.ceph.com/issues/18014 + with safe_while(sleep=15, tries=40, action="type ceph-objectstore-tool") as proceed: + while proceed(): + proc = exp_remote.run(args=['type', 'ceph-objectstore-tool'], + wait=True, check_status=False, stdout=BytesIO(), + stderr=BytesIO()) + if proc.exitstatus == 0: + break + log.debug("ceph-objectstore-tool binary not present, trying again") + + # ceph-objectstore-tool might bogusly fail with "OSD has the store locked" + # see http://tracker.ceph.com/issues/19556 + with safe_while(sleep=15, tries=40, action="ceph-objectstore-tool --op list-pgs") as proceed: + while proceed(): + proc = exp_remote.run(args=cmd, wait=True, + check_status=False, + stdout=StringIO(), stderr=StringIO()) + if proc.exitstatus == 0: + break + elif (proc.exitstatus == 1 and + proc.stderr.getvalue() == "OSD has the store locked"): + continue + else: + raise Exception("ceph-objectstore-tool: " + "exp list-pgs failure with status {ret}". + format(ret=proc.exitstatus)) + + pgs = proc.stdout.getvalue().split('\n')[:-1] + if len(pgs) == 0: + self.log("No PGs found for osd.{osd}".format(osd=exp_osd)) + return + pg = random.choice(pgs) + exp_path = teuthology.get_testdir(self.ceph_manager.ctx) + exp_path = os.path.join(exp_path, '{0}.data'.format(self.cluster)) + exp_path = os.path.join(exp_path, + "exp.{pg}.{id}".format( + pg=pg, + id=exp_osd)) + # export + # Can't use new export-remove op since this is part of upgrade testing + cmd = prefix + "--op export --pgid {pg} --file {file}" + cmd = cmd.format(id=exp_osd, pg=pg, file=exp_path) + proc = exp_remote.run(args=cmd) + if proc.exitstatus: + raise Exception("ceph-objectstore-tool: " + "export failure with status {ret}". + format(ret=proc.exitstatus)) + # remove + cmd = prefix + "--force --op remove --pgid {pg}" + cmd = cmd.format(id=exp_osd, pg=pg) + proc = exp_remote.run(args=cmd) + if proc.exitstatus: + raise Exception("ceph-objectstore-tool: " + "remove failure with status {ret}". + format(ret=proc.exitstatus)) + # If there are at least 2 dead osds we might move the pg + if exp_osd != imp_osd: + # If pg isn't already on this osd, then we will move it there + cmd = (prefix + "--op list-pgs").format(id=imp_osd) + proc = imp_remote.run(args=cmd, wait=True, + check_status=False, stdout=StringIO()) + if proc.exitstatus: + raise Exception("ceph-objectstore-tool: " + "imp list-pgs failure with status {ret}". + format(ret=proc.exitstatus)) + pgs = proc.stdout.getvalue().split('\n')[:-1] + if pg not in pgs: + self.log("Moving pg {pg} from osd.{fosd} to osd.{tosd}". + format(pg=pg, fosd=exp_osd, tosd=imp_osd)) + if imp_remote != exp_remote: + # Copy export file to the other machine + self.log("Transfer export file from {srem} to {trem}". + format(srem=exp_remote, trem=imp_remote)) + tmpexport = Remote.get_file(exp_remote, exp_path) + Remote.put_file(imp_remote, tmpexport, exp_path) + os.remove(tmpexport) + else: + # Can't move the pg after all + imp_osd = exp_osd + imp_remote = exp_remote + # import + cmd = (prefix + "--op import --file {file}") + cmd = cmd.format(id=imp_osd, file=exp_path) + proc = imp_remote.run(args=cmd, wait=True, check_status=False, + stderr=BytesIO()) + if proc.exitstatus == 1: + bogosity = "The OSD you are using is older than the exported PG" + if bogosity in proc.stderr.getvalue(): + self.log("OSD older than exported PG" + "...ignored") + elif proc.exitstatus == 10: + self.log("Pool went away before processing an import" + "...ignored") + elif proc.exitstatus == 11: + self.log("Attempt to import an incompatible export" + "...ignored") + elif proc.exitstatus == 12: + # this should be safe to ignore because we only ever move 1 + # copy of the pg at a time, and merge is only initiated when + # all replicas are peered and happy. /me crosses fingers + self.log("PG merged on target" + "...ignored") + elif proc.exitstatus: + raise Exception("ceph-objectstore-tool: " + "import failure with status {ret}". + format(ret=proc.exitstatus)) + cmd = "rm -f {file}".format(file=exp_path) + exp_remote.run(args=cmd) + if imp_remote != exp_remote: + imp_remote.run(args=cmd) + + # apply low split settings to each pool + for pool in self.ceph_manager.list_pools(): + no_sudo_prefix = prefix[5:] + cmd = ("CEPH_ARGS='--filestore-merge-threshold 1 " + "--filestore-split-multiple 1' sudo -E " + + no_sudo_prefix + "--op apply-layout-settings --pool " + pool).format(id=osd) + proc = remote.run(args=cmd, wait=True, check_status=False, + stderr=BytesIO()) + output = proc.stderr.getvalue() + if b'Couldn\'t find pool' in output: + continue + if proc.exitstatus: + raise Exception("ceph-objectstore-tool apply-layout-settings" + " failed with {status}".format(status=proc.exitstatus)) + + def blackhole_kill_osd(self, osd=None): + """ + If all else fails, kill the osd. + :param osd: Osd to be killed. + """ + if osd is None: + osd = random.choice(self.live_osds) + self.log("Blackholing and then killing osd %s, live_osds are %s" % + (str(osd), str(self.live_osds))) + self.live_osds.remove(osd) + self.dead_osds.append(osd) + self.ceph_manager.blackhole_kill_osd(osd) + + def revive_osd(self, osd=None, skip_admin_check=False): + """ + Revive the osd. + :param osd: Osd to be revived. + """ + if osd is None: + osd = random.choice(self.dead_osds) + self.log("Reviving osd %s" % (str(osd),)) + self.ceph_manager.revive_osd( + osd, + self.revive_timeout, + skip_admin_check=skip_admin_check) + self.dead_osds.remove(osd) + self.live_osds.append(osd) + if self.random_eio > 0 and osd == self.rerrosd: + self.ceph_manager.set_config(self.rerrosd, + filestore_debug_random_read_err = self.random_eio) + self.ceph_manager.set_config(self.rerrosd, + bluestore_debug_random_read_err = self.random_eio) + + + def out_osd(self, osd=None): + """ + Mark the osd out + :param osd: Osd to be marked. + """ + if osd is None: + osd = random.choice(self.in_osds) + self.log("Removing osd %s, in_osds are: %s" % + (str(osd), str(self.in_osds))) + self.ceph_manager.mark_out_osd(osd) + self.in_osds.remove(osd) + self.out_osds.append(osd) + + def in_osd(self, osd=None): + """ + Mark the osd out + :param osd: Osd to be marked. + """ + if osd is None: + osd = random.choice(self.out_osds) + if osd in self.dead_osds: + return self.revive_osd(osd) + self.log("Adding osd %s" % (str(osd),)) + self.out_osds.remove(osd) + self.in_osds.append(osd) + self.ceph_manager.mark_in_osd(osd) + self.log("Added osd %s" % (str(osd),)) + + def reweight_osd_or_by_util(self, osd=None): + """ + Reweight an osd that is in + :param osd: Osd to be marked. + """ + if osd is not None or random.choice([True, False]): + if osd is None: + osd = random.choice(self.in_osds) + val = random.uniform(.1, 1.0) + self.log("Reweighting osd %s to %s" % (str(osd), str(val))) + self.ceph_manager.raw_cluster_cmd('osd', 'reweight', + str(osd), str(val)) + else: + # do it several times, the option space is large + for i in range(5): + options = { + 'max_change': random.choice(['0.05', '1.0', '3.0']), + 'overage': random.choice(['110', '1000']), + 'type': random.choice([ + 'reweight-by-utilization', + 'test-reweight-by-utilization']), + } + self.log("Reweighting by: %s"%(str(options),)) + self.ceph_manager.raw_cluster_cmd( + 'osd', + options['type'], + options['overage'], + options['max_change']) + + def primary_affinity(self, osd=None): + if osd is None: + osd = random.choice(self.in_osds) + if random.random() >= .5: + pa = random.random() + elif random.random() >= .5: + pa = 1 + else: + pa = 0 + self.log('Setting osd %s primary_affinity to %f' % (str(osd), pa)) + self.ceph_manager.raw_cluster_cmd('osd', 'primary-affinity', + str(osd), str(pa)) + + def thrash_cluster_full(self): + """ + Set and unset cluster full condition + """ + self.log('Setting full ratio to .001') + self.ceph_manager.raw_cluster_cmd('osd', 'set-full-ratio', '.001') + time.sleep(1) + self.log('Setting full ratio back to .95') + self.ceph_manager.raw_cluster_cmd('osd', 'set-full-ratio', '.95') + + def thrash_pg_upmap(self): + """ + Install or remove random pg_upmap entries in OSDMap + """ + from random import shuffle + out = self.ceph_manager.raw_cluster_cmd('osd', 'dump', '-f', 'json-pretty') + j = json.loads(out) + self.log('j is %s' % j) + try: + if random.random() >= .3: + pgs = self.ceph_manager.get_pg_stats() + pg = random.choice(pgs) + pgid = str(pg['pgid']) + poolid = int(pgid.split('.')[0]) + sizes = [x['size'] for x in j['pools'] if x['pool'] == poolid] + if len(sizes) == 0: + return + n = sizes[0] + osds = self.in_osds + self.out_osds + shuffle(osds) + osds = osds[0:n] + self.log('Setting %s to %s' % (pgid, osds)) + cmd = ['osd', 'pg-upmap', pgid] + [str(x) for x in osds] + self.log('cmd %s' % cmd) + self.ceph_manager.raw_cluster_cmd(*cmd) + else: + m = j['pg_upmap'] + if len(m) > 0: + shuffle(m) + pg = m[0]['pgid'] + self.log('Clearing pg_upmap on %s' % pg) + self.ceph_manager.raw_cluster_cmd( + 'osd', + 'rm-pg-upmap', + pg) + else: + self.log('No pg_upmap entries; doing nothing') + except CommandFailedError: + self.log('Failed to rm-pg-upmap, ignoring') + + def thrash_pg_upmap_items(self): + """ + Install or remove random pg_upmap_items entries in OSDMap + """ + from random import shuffle + out = self.ceph_manager.raw_cluster_cmd('osd', 'dump', '-f', 'json-pretty') + j = json.loads(out) + self.log('j is %s' % j) + try: + if random.random() >= .3: + pgs = self.ceph_manager.get_pg_stats() + pg = random.choice(pgs) + pgid = str(pg['pgid']) + poolid = int(pgid.split('.')[0]) + sizes = [x['size'] for x in j['pools'] if x['pool'] == poolid] + if len(sizes) == 0: + return + n = sizes[0] + osds = self.in_osds + self.out_osds + shuffle(osds) + osds = osds[0:n*2] + self.log('Setting %s to %s' % (pgid, osds)) + cmd = ['osd', 'pg-upmap-items', pgid] + [str(x) for x in osds] + self.log('cmd %s' % cmd) + self.ceph_manager.raw_cluster_cmd(*cmd) + else: + m = j['pg_upmap_items'] + if len(m) > 0: + shuffle(m) + pg = m[0]['pgid'] + self.log('Clearing pg_upmap on %s' % pg) + self.ceph_manager.raw_cluster_cmd( + 'osd', + 'rm-pg-upmap-items', + pg) + else: + self.log('No pg_upmap entries; doing nothing') + except CommandFailedError: + self.log('Failed to rm-pg-upmap-items, ignoring') + + def force_recovery(self): + """ + Force recovery on some of PGs + """ + backfill = random.random() >= 0.5 + j = self.ceph_manager.get_pgids_to_force(backfill) + if j: + try: + if backfill: + self.ceph_manager.raw_cluster_cmd('pg', 'force-backfill', *j) + else: + self.ceph_manager.raw_cluster_cmd('pg', 'force-recovery', *j) + except CommandFailedError: + self.log('Failed to force backfill|recovery, ignoring') + + + def cancel_force_recovery(self): + """ + Force recovery on some of PGs + """ + backfill = random.random() >= 0.5 + j = self.ceph_manager.get_pgids_to_cancel_force(backfill) + if j: + try: + if backfill: + self.ceph_manager.raw_cluster_cmd('pg', 'cancel-force-backfill', *j) + else: + self.ceph_manager.raw_cluster_cmd('pg', 'cancel-force-recovery', *j) + except CommandFailedError: + self.log('Failed to force backfill|recovery, ignoring') + + def force_cancel_recovery(self): + """ + Force or cancel forcing recovery + """ + if random.random() >= 0.4: + self.force_recovery() + else: + self.cancel_force_recovery() + + def all_up(self): + """ + Make sure all osds are up and not out. + """ + while len(self.dead_osds) > 0: + self.log("reviving osd") + self.revive_osd() + while len(self.out_osds) > 0: + self.log("inning osd") + self.in_osd() + + def all_up_in(self): + """ + Make sure all osds are up and fully in. + """ + self.all_up(); + for osd in self.live_osds: + self.ceph_manager.raw_cluster_cmd('osd', 'reweight', + str(osd), str(1)) + self.ceph_manager.raw_cluster_cmd('osd', 'primary-affinity', + str(osd), str(1)) + + def do_join(self): + """ + Break out of this Ceph loop + """ + self.stopping = True + self.thread.get() + if self.sighup_delay: + self.log("joining the do_sighup greenlet") + self.sighup_thread.get() + if self.optrack_toggle_delay: + self.log("joining the do_optrack_toggle greenlet") + self.optrack_toggle_thread.join() + if self.dump_ops_enable == "true": + self.log("joining the do_dump_ops greenlet") + self.dump_ops_thread.join() + if self.noscrub_toggle_delay: + self.log("joining the do_noscrub_toggle greenlet") + self.noscrub_toggle_thread.join() + + def grow_pool(self): + """ + Increase the size of the pool + """ + pool = self.ceph_manager.get_pool() + orig_pg_num = self.ceph_manager.get_pool_pg_num(pool) + self.log("Growing pool %s" % (pool,)) + if self.ceph_manager.expand_pool(pool, + self.config.get('pool_grow_by', 10), + self.max_pgs): + self.pools_to_fix_pgp_num.add(pool) + + def shrink_pool(self): + """ + Decrease the size of the pool + """ + pool = self.ceph_manager.get_pool() + _ = self.ceph_manager.get_pool_pg_num(pool) + self.log("Shrinking pool %s" % (pool,)) + if self.ceph_manager.contract_pool( + pool, + self.config.get('pool_shrink_by', 10), + self.min_pgs): + self.pools_to_fix_pgp_num.add(pool) + + def fix_pgp_num(self, pool=None): + """ + Fix number of pgs in pool. + """ + if pool is None: + pool = self.ceph_manager.get_pool() + force = False + else: + force = True + self.log("fixing pg num pool %s" % (pool,)) + if self.ceph_manager.set_pool_pgpnum(pool, force): + self.pools_to_fix_pgp_num.discard(pool) + + def test_pool_min_size(self): + """ + Kill and revive all osds except one. + """ + self.log("test_pool_min_size") + self.all_up() + self.ceph_manager.wait_for_recovery( + timeout=self.config.get('timeout') + ) + the_one = random.choice(self.in_osds) + self.log("Killing everyone but %s", the_one) + to_kill = filter(lambda x: x != the_one, self.in_osds) + [self.kill_osd(i) for i in to_kill] + [self.out_osd(i) for i in to_kill] + time.sleep(self.config.get("test_pool_min_size_time", 10)) + self.log("Killing %s" % (the_one,)) + self.kill_osd(the_one) + self.out_osd(the_one) + self.log("Reviving everyone but %s" % (the_one,)) + [self.revive_osd(i) for i in to_kill] + [self.in_osd(i) for i in to_kill] + self.log("Revived everyone but %s" % (the_one,)) + self.log("Waiting for clean") + self.ceph_manager.wait_for_recovery( + timeout=self.config.get('timeout') + ) + + def inject_pause(self, conf_key, duration, check_after, should_be_down): + """ + Pause injection testing. Check for osd being down when finished. + """ + the_one = random.choice(self.live_osds) + self.log("inject_pause on {osd}".format(osd=the_one)) + self.log( + "Testing {key} pause injection for duration {duration}".format( + key=conf_key, + duration=duration + )) + self.log( + "Checking after {after}, should_be_down={shouldbedown}".format( + after=check_after, + shouldbedown=should_be_down + )) + self.ceph_manager.set_config(the_one, **{conf_key: duration}) + if not should_be_down: + return + time.sleep(check_after) + status = self.ceph_manager.get_osd_status() + assert the_one in status['down'] + time.sleep(duration - check_after + 20) + status = self.ceph_manager.get_osd_status() + assert not the_one in status['down'] + + def test_backfill_full(self): + """ + Test backfills stopping when the replica fills up. + + First, use injectfull admin command to simulate a now full + osd by setting it to 0 on all of the OSDs. + + Second, on a random subset, set + osd_debug_skip_full_check_in_backfill_reservation to force + the more complicated check in do_scan to be exercised. + + Then, verify that all backfillings stop. + """ + self.log("injecting backfill full") + for i in self.live_osds: + self.ceph_manager.set_config( + i, + osd_debug_skip_full_check_in_backfill_reservation= + random.choice(['false', 'true'])) + self.ceph_manager.osd_admin_socket(i, command=['injectfull', 'backfillfull'], + check_status=True, timeout=30, stdout=DEVNULL) + for i in range(30): + status = self.ceph_manager.compile_pg_status() + if 'backfilling' not in status.keys(): + break + self.log( + "waiting for {still_going} backfillings".format( + still_going=status.get('backfilling'))) + time.sleep(1) + assert('backfilling' not in self.ceph_manager.compile_pg_status().keys()) + for i in self.live_osds: + self.ceph_manager.set_config( + i, + osd_debug_skip_full_check_in_backfill_reservation='false') + self.ceph_manager.osd_admin_socket(i, command=['injectfull', 'none'], + check_status=True, timeout=30, stdout=DEVNULL) + + def test_map_discontinuity(self): + """ + 1) Allows the osds to recover + 2) kills an osd + 3) allows the remaining osds to recover + 4) waits for some time + 5) revives the osd + This sequence should cause the revived osd to have to handle + a map gap since the mons would have trimmed + """ + while len(self.in_osds) < (self.minin + 1): + self.in_osd() + self.log("Waiting for recovery") + self.ceph_manager.wait_for_all_osds_up( + timeout=self.config.get('timeout') + ) + # now we wait 20s for the pg status to change, if it takes longer, + # the test *should* fail! + time.sleep(20) + self.ceph_manager.wait_for_clean( + timeout=self.config.get('timeout') + ) + + # now we wait 20s for the backfill replicas to hear about the clean + time.sleep(20) + self.log("Recovered, killing an osd") + self.kill_osd(mark_down=True, mark_out=True) + self.log("Waiting for clean again") + self.ceph_manager.wait_for_clean( + timeout=self.config.get('timeout') + ) + self.log("Waiting for trim") + time.sleep(int(self.config.get("map_discontinuity_sleep_time", 40))) + self.revive_osd() + + def choose_action(self): + """ + Random action selector. + """ + chance_down = self.config.get('chance_down', 0.4) + chance_test_backfill_full = \ + self.config.get('chance_test_backfill_full', 0) + if isinstance(chance_down, int): + chance_down = float(chance_down) / 100 + minin = self.minin + minout = self.config.get("min_out", 0) + minlive = self.config.get("min_live", 2) + mindead = self.config.get("min_dead", 0) + + self.log('choose_action: min_in %d min_out ' + '%d min_live %d min_dead %d' % + (minin, minout, minlive, mindead)) + actions = [] + if len(self.in_osds) > minin: + actions.append((self.out_osd, 1.0,)) + if len(self.live_osds) > minlive and chance_down > 0: + actions.append((self.kill_osd, chance_down,)) + if len(self.out_osds) > minout: + actions.append((self.in_osd, 1.7,)) + if len(self.dead_osds) > mindead: + actions.append((self.revive_osd, 1.0,)) + if self.config.get('thrash_primary_affinity', True): + actions.append((self.primary_affinity, 1.0,)) + actions.append((self.reweight_osd_or_by_util, + self.config.get('reweight_osd', .5),)) + actions.append((self.grow_pool, + self.config.get('chance_pgnum_grow', 0),)) + actions.append((self.shrink_pool, + self.config.get('chance_pgnum_shrink', 0),)) + actions.append((self.fix_pgp_num, + self.config.get('chance_pgpnum_fix', 0),)) + actions.append((self.test_pool_min_size, + self.config.get('chance_test_min_size', 0),)) + actions.append((self.test_backfill_full, + chance_test_backfill_full,)) + if self.chance_thrash_cluster_full > 0: + actions.append((self.thrash_cluster_full, self.chance_thrash_cluster_full,)) + if self.chance_thrash_pg_upmap > 0: + actions.append((self.thrash_pg_upmap, self.chance_thrash_pg_upmap,)) + if self.chance_thrash_pg_upmap_items > 0: + actions.append((self.thrash_pg_upmap_items, self.chance_thrash_pg_upmap_items,)) + if self.chance_force_recovery > 0: + actions.append((self.force_cancel_recovery, self.chance_force_recovery)) + + for key in ['heartbeat_inject_failure', 'filestore_inject_stall']: + for scenario in [ + (lambda: + self.inject_pause(key, + self.config.get('pause_short', 3), + 0, + False), + self.config.get('chance_inject_pause_short', 1),), + (lambda: + self.inject_pause(key, + self.config.get('pause_long', 80), + self.config.get('pause_check_after', 70), + True), + self.config.get('chance_inject_pause_long', 0),)]: + actions.append(scenario) + + total = sum([y for (x, y) in actions]) + val = random.uniform(0, total) + for (action, prob) in actions: + if val < prob: + return action + val -= prob + return None + + def log_exc(func): + @wraps(func) + def wrapper(self): + try: + return func(self) + except: + self.log(traceback.format_exc()) + raise + return wrapper + + @log_exc + def do_sighup(self): + """ + Loops and sends signal.SIGHUP to a random live osd. + + Loop delay is controlled by the config value sighup_delay. + """ + delay = float(self.sighup_delay) + self.log("starting do_sighup with a delay of {0}".format(delay)) + while not self.stopping: + osd = random.choice(self.live_osds) + self.ceph_manager.signal_osd(osd, signal.SIGHUP, silent=True) + time.sleep(delay) + + @log_exc + def do_optrack_toggle(self): + """ + Loops and toggle op tracking to all osds. + + Loop delay is controlled by the config value optrack_toggle_delay. + """ + delay = float(self.optrack_toggle_delay) + osd_state = "true" + self.log("starting do_optrack_toggle with a delay of {0}".format(delay)) + while not self.stopping: + if osd_state == "true": + osd_state = "false" + else: + osd_state = "true" + try: + self.ceph_manager.inject_args('osd', '*', + 'osd_enable_op_tracker', + osd_state) + except CommandFailedError: + self.log('Failed to tell all osds, ignoring') + gevent.sleep(delay) + + @log_exc + def do_dump_ops(self): + """ + Loops and does op dumps on all osds + """ + self.log("starting do_dump_ops") + while not self.stopping: + for osd in self.live_osds: + # Ignore errors because live_osds is in flux + self.ceph_manager.osd_admin_socket(osd, command=['dump_ops_in_flight'], + check_status=False, timeout=30, stdout=DEVNULL) + self.ceph_manager.osd_admin_socket(osd, command=['dump_blocked_ops'], + check_status=False, timeout=30, stdout=DEVNULL) + self.ceph_manager.osd_admin_socket(osd, command=['dump_historic_ops'], + check_status=False, timeout=30, stdout=DEVNULL) + gevent.sleep(0) + + @log_exc + def do_noscrub_toggle(self): + """ + Loops and toggle noscrub flags + + Loop delay is controlled by the config value noscrub_toggle_delay. + """ + delay = float(self.noscrub_toggle_delay) + scrub_state = "none" + self.log("starting do_noscrub_toggle with a delay of {0}".format(delay)) + while not self.stopping: + if scrub_state == "none": + self.ceph_manager.raw_cluster_cmd('osd', 'set', 'noscrub') + scrub_state = "noscrub" + elif scrub_state == "noscrub": + self.ceph_manager.raw_cluster_cmd('osd', 'set', 'nodeep-scrub') + scrub_state = "both" + elif scrub_state == "both": + self.ceph_manager.raw_cluster_cmd('osd', 'unset', 'noscrub') + scrub_state = "nodeep-scrub" + else: + self.ceph_manager.raw_cluster_cmd('osd', 'unset', 'nodeep-scrub') + scrub_state = "none" + gevent.sleep(delay) + self.ceph_manager.raw_cluster_cmd('osd', 'unset', 'noscrub') + self.ceph_manager.raw_cluster_cmd('osd', 'unset', 'nodeep-scrub') + + @log_exc + def do_thrash(self): + """ + Loop to select random actions to thrash ceph manager with. + """ + cleanint = self.config.get("clean_interval", 60) + scrubint = self.config.get("scrub_interval", -1) + maxdead = self.config.get("max_dead", 0) + delay = self.config.get("op_delay", 5) + self.rerrosd = self.live_osds[0] + if self.random_eio > 0: + self.ceph_manager.inject_args('osd', self.rerrosd, + 'filestore_debug_random_read_err', + self.random_eio) + self.ceph_manager.inject_args('osd', self.rerrosd, + 'bluestore_debug_random_read_err', + self.random_eio) + self.log("starting do_thrash") + while not self.stopping: + to_log = [str(x) for x in ["in_osds: ", self.in_osds, + "out_osds: ", self.out_osds, + "dead_osds: ", self.dead_osds, + "live_osds: ", self.live_osds]] + self.log(" ".join(to_log)) + if random.uniform(0, 1) < (float(delay) / cleanint): + while len(self.dead_osds) > maxdead: + self.revive_osd() + for osd in self.in_osds: + self.ceph_manager.raw_cluster_cmd('osd', 'reweight', + str(osd), str(1)) + if random.uniform(0, 1) < float( + self.config.get('chance_test_map_discontinuity', 0)) \ + and len(self.live_osds) > 5: # avoid m=2,k=2 stall, w/ some buffer for crush being picky + self.test_map_discontinuity() + else: + self.ceph_manager.wait_for_recovery( + timeout=self.config.get('timeout') + ) + time.sleep(self.clean_wait) + if scrubint > 0: + if random.uniform(0, 1) < (float(delay) / scrubint): + self.log('Scrubbing while thrashing being performed') + Scrubber(self.ceph_manager, self.config) + self.choose_action()() + time.sleep(delay) + self.all_up() + if self.random_eio > 0: + self.ceph_manager.inject_args('osd', self.rerrosd, + 'filestore_debug_random_read_err', '0.0') + self.ceph_manager.inject_args('osd', self.rerrosd, + 'bluestore_debug_random_read_err', '0.0') + for pool in list(self.pools_to_fix_pgp_num): + if self.ceph_manager.get_pool_pg_num(pool) > 0: + self.fix_pgp_num(pool) + self.pools_to_fix_pgp_num.clear() + for service, opt, saved_value in self.saved_options: + self.ceph_manager.inject_args(service, '*', opt, saved_value) + self.saved_options = [] + self.all_up_in() + + +class ObjectStoreTool: + + def __init__(self, manager, pool, **kwargs): + self.manager = manager + self.pool = pool + self.osd = kwargs.get('osd', None) + self.object_name = kwargs.get('object_name', None) + self.do_revive = kwargs.get('do_revive', True) + if self.osd and self.pool and self.object_name: + if self.osd == "primary": + self.osd = self.manager.get_object_primary(self.pool, + self.object_name) + assert self.osd + if self.object_name: + self.pgid = self.manager.get_object_pg_with_shard(self.pool, + self.object_name, + self.osd) + self.remote = next(iter(self.manager.ctx.\ + cluster.only('osd.{o}'.format(o=self.osd)).remotes.keys())) + path = self.manager.get_filepath().format(id=self.osd) + self.paths = ("--data-path {path} --journal-path {path}/journal". + format(path=path)) + + def build_cmd(self, options, args, stdin): + lines = [] + if self.object_name: + lines.append("object=$(sudo adjust-ulimits ceph-objectstore-tool " + "{paths} --pgid {pgid} --op list |" + "grep '\"oid\":\"{name}\"')". + format(paths=self.paths, + pgid=self.pgid, + name=self.object_name)) + args = '"$object" ' + args + options += " --pgid {pgid}".format(pgid=self.pgid) + cmd = ("sudo adjust-ulimits ceph-objectstore-tool {paths} {options} {args}". + format(paths=self.paths, + args=args, + options=options)) + if stdin: + cmd = ("echo {payload} | base64 --decode | {cmd}". + format(payload=base64.encode(stdin), + cmd=cmd)) + lines.append(cmd) + return "\n".join(lines) + + def run(self, options, args): + self.manager.kill_osd(self.osd) + cmd = self.build_cmd(options, args, None) + self.manager.log(cmd) + try: + proc = self.remote.run(args=['bash', '-e', '-x', '-c', cmd], + check_status=False, + stdout=BytesIO(), + stderr=BytesIO()) + proc.wait() + if proc.exitstatus != 0: + self.manager.log("failed with " + str(proc.exitstatus)) + error = proc.stdout.getvalue().decode() + " " + \ + proc.stderr.getvalue().decode() + raise Exception(error) + finally: + if self.do_revive: + self.manager.revive_osd(self.osd) + self.manager.wait_till_osd_is_up(self.osd, 300) + + +class CephManager: + """ + Ceph manager object. + Contains several local functions that form a bulk of this module. + + Note: this class has nothing to do with the Ceph daemon (ceph-mgr) of + the same name. + """ + + REPLICATED_POOL = 1 + ERASURE_CODED_POOL = 3 + + def __init__(self, controller, ctx=None, config=None, logger=None, + cluster='ceph'): + self.lock = threading.RLock() + self.ctx = ctx + self.config = config + self.controller = controller + self.next_pool_id = 0 + self.cluster = cluster + if (logger): + self.log = lambda x: logger.info(x) + else: + def tmp(x): + """ + implement log behavior. + """ + print(x) + self.log = tmp + if self.config is None: + self.config = dict() + pools = self.list_pools() + self.pools = {} + for pool in pools: + # we may race with a pool deletion; ignore failures here + try: + self.pools[pool] = self.get_pool_property(pool, 'pg_num') + except CommandFailedError: + self.log('Failed to get pg_num from pool %s, ignoring' % pool) + + def raw_cluster_cmd(self, *args): + """ + Start ceph on a raw cluster. Return count + """ + testdir = teuthology.get_testdir(self.ctx) + ceph_args = [ + 'sudo', + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'timeout', + '120', + 'ceph', + '--cluster', + self.cluster, + ] + ceph_args.extend(args) + proc = self.controller.run( + args=ceph_args, + stdout=StringIO(), + ) + return proc.stdout.getvalue() + + def raw_cluster_cmd_result(self, *args, **kwargs): + """ + Start ceph on a cluster. Return success or failure information. + """ + testdir = teuthology.get_testdir(self.ctx) + ceph_args = [ + 'sudo', + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'timeout', + '900', + 'ceph', + '--cluster', + self.cluster, + ] + ceph_args.extend(args) + kwargs['args'] = ceph_args + kwargs['check_status'] = False + proc = self.controller.run(**kwargs) + return proc.exitstatus + + def run_ceph_w(self, watch_channel=None): + """ + Execute "ceph -w" in the background with stdout connected to a BytesIO, + and return the RemoteProcess. + + :param watch_channel: Specifies the channel to be watched. This can be + 'cluster', 'audit', ... + :type watch_channel: str + """ + args = ["sudo", + "daemon-helper", + "kill", + "ceph", + '--cluster', + self.cluster, + "-w"] + if watch_channel is not None: + args.append("--watch-channel") + args.append(watch_channel) + return self.controller.run(args=args, wait=False, stdout=StringIO(), stdin=run.PIPE) + + def flush_pg_stats(self, osds, no_wait=None, wait_for_mon=300): + """ + Flush pg stats from a list of OSD ids, ensuring they are reflected + all the way to the monitor. Luminous and later only. + + :param osds: list of OSDs to flush + :param no_wait: list of OSDs not to wait for seq id. by default, we + wait for all specified osds, but some of them could be + moved out of osdmap, so we cannot get their updated + stat seq from monitor anymore. in that case, you need + to pass a blacklist. + :param wait_for_mon: wait for mon to be synced with mgr. 0 to disable + it. (5 min by default) + """ + seq = {osd: int(self.raw_cluster_cmd('tell', 'osd.%d' % osd, 'flush_pg_stats')) + for osd in osds} + if not wait_for_mon: + return + if no_wait is None: + no_wait = [] + for osd, need in seq.items(): + if osd in no_wait: + continue + got = 0 + while wait_for_mon > 0: + got = int(self.raw_cluster_cmd('osd', 'last-stat-seq', 'osd.%d' % osd)) + self.log('need seq {need} got {got} for osd.{osd}'.format( + need=need, got=got, osd=osd)) + if got >= need: + break + A_WHILE = 1 + time.sleep(A_WHILE) + wait_for_mon -= A_WHILE + else: + raise Exception('timed out waiting for mon to be updated with ' + 'osd.{osd}: {got} < {need}'. + format(osd=osd, got=got, need=need)) + + def flush_all_pg_stats(self): + self.flush_pg_stats(range(len(self.get_osd_dump()))) + + def do_rados(self, remote, cmd, check_status=True): + """ + Execute a remote rados command. + """ + testdir = teuthology.get_testdir(self.ctx) + pre = [ + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'rados', + '--cluster', + self.cluster, + ] + pre.extend(cmd) + proc = remote.run( + args=pre, + wait=True, + check_status=check_status + ) + return proc + + def rados_write_objects(self, pool, num_objects, size, + timelimit, threads, cleanup=False): + """ + Write rados objects + Threads not used yet. + """ + args = [ + '-p', pool, + '--num-objects', num_objects, + '-b', size, + 'bench', timelimit, + 'write' + ] + if not cleanup: + args.append('--no-cleanup') + return self.do_rados(self.controller, map(str, args)) + + def do_put(self, pool, obj, fname, namespace=None): + """ + Implement rados put operation + """ + args = ['-p', pool] + if namespace is not None: + args += ['-N', namespace] + args += [ + 'put', + obj, + fname + ] + return self.do_rados( + self.controller, + args, + check_status=False + ).exitstatus + + def do_get(self, pool, obj, fname='/dev/null', namespace=None): + """ + Implement rados get operation + """ + args = ['-p', pool] + if namespace is not None: + args += ['-N', namespace] + args += [ + 'get', + obj, + fname + ] + return self.do_rados( + self.controller, + args, + check_status=False + ).exitstatus + + def do_rm(self, pool, obj, namespace=None): + """ + Implement rados rm operation + """ + args = ['-p', pool] + if namespace is not None: + args += ['-N', namespace] + args += [ + 'rm', + obj + ] + return self.do_rados( + self.controller, + args, + check_status=False + ).exitstatus + + def osd_admin_socket(self, osd_id, command, check_status=True, timeout=0, stdout=None): + if stdout is None: + stdout = StringIO() + return self.admin_socket('osd', osd_id, command, check_status, timeout, stdout) + + def find_remote(self, service_type, service_id): + """ + Get the Remote for the host where a particular service runs. + + :param service_type: 'mds', 'osd', 'client' + :param service_id: The second part of a role, e.g. '0' for + the role 'client.0' + :return: a Remote instance for the host where the + requested role is placed + """ + return get_remote(self.ctx, self.cluster, + service_type, service_id) + + def admin_socket(self, service_type, service_id, + command, check_status=True, timeout=0, stdout=None): + """ + Remotely start up ceph specifying the admin socket + :param command: a list of words to use as the command + to the admin socket + """ + if stdout is None: + stdout = StringIO() + testdir = teuthology.get_testdir(self.ctx) + remote = self.find_remote(service_type, service_id) + args = [ + 'sudo', + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'timeout', + str(timeout), + 'ceph', + '--cluster', + self.cluster, + '--admin-daemon', + '/var/run/ceph/{cluster}-{type}.{id}.asok'.format( + cluster=self.cluster, + type=service_type, + id=service_id), + ] + args.extend(command) + return remote.run( + args=args, + stdout=stdout, + wait=True, + check_status=check_status + ) + + def objectstore_tool(self, pool, options, args, **kwargs): + return ObjectStoreTool(self, pool, **kwargs).run(options, args) + + def get_pgid(self, pool, pgnum): + """ + :param pool: pool name + :param pgnum: pg number + :returns: a string representing this pg. + """ + poolnum = self.get_pool_num(pool) + pg_str = "{poolnum}.{pgnum}".format( + poolnum=poolnum, + pgnum=pgnum) + return pg_str + + def get_pg_replica(self, pool, pgnum): + """ + get replica for pool, pgnum (e.g. (data, 0)->0 + """ + pg_str = self.get_pgid(pool, pgnum) + output = self.raw_cluster_cmd("pg", "map", pg_str, '--format=json') + j = json.loads('\n'.join(output.split('\n')[1:])) + return int(j['acting'][-1]) + assert False + + def wait_for_pg_stats(func): + # both osd_mon_report_interval and mgr_stats_period are 5 seconds + # by default, and take the faulty injection in ms into consideration, + # 12 seconds are more than enough + delays = [1, 1, 2, 3, 5, 8, 13, 0] + @wraps(func) + def wrapper(self, *args, **kwargs): + exc = None + for delay in delays: + try: + return func(self, *args, **kwargs) + except AssertionError as e: + time.sleep(delay) + exc = e + raise exc + return wrapper + + def get_pg_primary(self, pool, pgnum): + """ + get primary for pool, pgnum (e.g. (data, 0)->0 + """ + pg_str = self.get_pgid(pool, pgnum) + output = self.raw_cluster_cmd("pg", "map", pg_str, '--format=json') + j = json.loads('\n'.join(output.split('\n')[1:])) + return int(j['acting'][0]) + assert False + + def get_pool_num(self, pool): + """ + get number for pool (e.g., data -> 2) + """ + return int(self.get_pool_dump(pool)['pool']) + + def list_pools(self): + """ + list all pool names + """ + osd_dump = self.get_osd_dump_json() + self.log(osd_dump['pools']) + return [str(i['pool_name']) for i in osd_dump['pools']] + + def clear_pools(self): + """ + remove all pools + """ + [self.remove_pool(i) for i in self.list_pools()] + + def kick_recovery_wq(self, osdnum): + """ + Run kick_recovery_wq on cluster. + """ + return self.raw_cluster_cmd( + 'tell', "osd.%d" % (int(osdnum),), + 'debug', + 'kick_recovery_wq', + '0') + + def wait_run_admin_socket(self, service_type, + service_id, args=['version'], timeout=75, stdout=None): + """ + If osd_admin_socket call succeeds, return. Otherwise wait + five seconds and try again. + """ + if stdout is None: + stdout = StringIO() + tries = 0 + while True: + proc = self.admin_socket(service_type, service_id, + args, check_status=False, stdout=stdout) + if proc.exitstatus == 0: + return proc + else: + tries += 1 + if (tries * 5) > timeout: + raise Exception('timed out waiting for admin_socket ' + 'to appear after {type}.{id} restart'. + format(type=service_type, + id=service_id)) + self.log("waiting on admin_socket for {type}-{id}, " + "{command}".format(type=service_type, + id=service_id, + command=args)) + time.sleep(5) + + def get_pool_dump(self, pool): + """ + get the osd dump part of a pool + """ + osd_dump = self.get_osd_dump_json() + for i in osd_dump['pools']: + if i['pool_name'] == pool: + return i + assert False + + def get_config(self, service_type, service_id, name): + """ + :param node: like 'mon.a' + :param name: the option name + """ + proc = self.wait_run_admin_socket(service_type, service_id, + ['config', 'show']) + j = json.loads(proc.stdout.getvalue()) + return j[name] + + def inject_args(self, service_type, service_id, name, value): + whom = '{0}.{1}'.format(service_type, service_id) + if isinstance(value, bool): + value = 'true' if value else 'false' + opt_arg = '--{name}={value}'.format(name=name, value=value) + self.raw_cluster_cmd('--', 'tell', whom, 'injectargs', opt_arg) + + def set_config(self, osdnum, **argdict): + """ + :param osdnum: osd number + :param argdict: dictionary containing values to set. + """ + for k, v in argdict.items(): + self.wait_run_admin_socket( + 'osd', osdnum, + ['config', 'set', str(k), str(v)]) + + def raw_cluster_status(self): + """ + Get status from cluster + """ + status = self.raw_cluster_cmd('status', '--format=json-pretty') + return json.loads(status) + + def raw_osd_status(self): + """ + Get osd status from cluster + """ + return self.raw_cluster_cmd('osd', 'dump') + + def get_osd_status(self): + """ + Get osd statuses sorted by states that the osds are in. + """ + osd_lines = list(filter( + lambda x: x.startswith('osd.') and (("up" in x) or ("down" in x)), + self.raw_osd_status().split('\n'))) + self.log(osd_lines) + in_osds = [int(i[4:].split()[0]) + for i in filter(lambda x: " in " in x, osd_lines)] + out_osds = [int(i[4:].split()[0]) + for i in filter(lambda x: " out " in x, osd_lines)] + up_osds = [int(i[4:].split()[0]) + for i in filter(lambda x: " up " in x, osd_lines)] + down_osds = [int(i[4:].split()[0]) + for i in filter(lambda x: " down " in x, osd_lines)] + dead_osds = [int(x.id_) + for x in filter(lambda x: + not x.running(), + self.ctx.daemons. + iter_daemons_of_role('osd', self.cluster))] + live_osds = [int(x.id_) for x in + filter(lambda x: + x.running(), + self.ctx.daemons.iter_daemons_of_role('osd', + self.cluster))] + return {'in': in_osds, 'out': out_osds, 'up': up_osds, + 'down': down_osds, 'dead': dead_osds, 'live': live_osds, + 'raw': osd_lines} + + def get_num_pgs(self): + """ + Check cluster status for the number of pgs + """ + status = self.raw_cluster_status() + self.log(status) + return status['pgmap']['num_pgs'] + + def create_erasure_code_profile(self, profile_name, profile): + """ + Create an erasure code profile name that can be used as a parameter + when creating an erasure coded pool. + """ + with self.lock: + args = cmd_erasure_code_profile(profile_name, profile) + self.raw_cluster_cmd(*args) + + def create_pool_with_unique_name(self, pg_num=16, + erasure_code_profile_name=None, + min_size=None, + erasure_code_use_overwrites=False): + """ + Create a pool named unique_pool_X where X is unique. + """ + name = "" + with self.lock: + name = "unique_pool_%s" % (str(self.next_pool_id),) + self.next_pool_id += 1 + self.create_pool( + name, + pg_num, + erasure_code_profile_name=erasure_code_profile_name, + min_size=min_size, + erasure_code_use_overwrites=erasure_code_use_overwrites) + return name + + @contextlib.contextmanager + def pool(self, pool_name, pg_num=16, erasure_code_profile_name=None): + self.create_pool(pool_name, pg_num, erasure_code_profile_name) + yield + self.remove_pool(pool_name) + + def create_pool(self, pool_name, pg_num=16, + erasure_code_profile_name=None, + min_size=None, + erasure_code_use_overwrites=False): + """ + Create a pool named from the pool_name parameter. + :param pool_name: name of the pool being created. + :param pg_num: initial number of pgs. + :param erasure_code_profile_name: if set and !None create an + erasure coded pool using the profile + :param erasure_code_use_overwrites: if true, allow overwrites + """ + with self.lock: + assert isinstance(pool_name, str) + assert isinstance(pg_num, int) + assert pool_name not in self.pools + self.log("creating pool_name %s" % (pool_name,)) + if erasure_code_profile_name: + self.raw_cluster_cmd('osd', 'pool', 'create', + pool_name, str(pg_num), str(pg_num), + 'erasure', erasure_code_profile_name) + else: + self.raw_cluster_cmd('osd', 'pool', 'create', + pool_name, str(pg_num)) + if min_size is not None: + self.raw_cluster_cmd( + 'osd', 'pool', 'set', pool_name, + 'min_size', + str(min_size)) + if erasure_code_use_overwrites: + self.raw_cluster_cmd( + 'osd', 'pool', 'set', pool_name, + 'allow_ec_overwrites', + 'true') + self.raw_cluster_cmd( + 'osd', 'pool', 'application', 'enable', + pool_name, 'rados', '--yes-i-really-mean-it', + run.Raw('||'), 'true') + self.pools[pool_name] = pg_num + time.sleep(1) + + def add_pool_snap(self, pool_name, snap_name): + """ + Add pool snapshot + :param pool_name: name of pool to snapshot + :param snap_name: name of snapshot to take + """ + self.raw_cluster_cmd('osd', 'pool', 'mksnap', + str(pool_name), str(snap_name)) + + def remove_pool_snap(self, pool_name, snap_name): + """ + Remove pool snapshot + :param pool_name: name of pool to snapshot + :param snap_name: name of snapshot to remove + """ + self.raw_cluster_cmd('osd', 'pool', 'rmsnap', + str(pool_name), str(snap_name)) + + def remove_pool(self, pool_name): + """ + Remove the indicated pool + :param pool_name: Pool to be removed + """ + with self.lock: + assert isinstance(pool_name, str) + assert pool_name in self.pools + self.log("removing pool_name %s" % (pool_name,)) + del self.pools[pool_name] + self.raw_cluster_cmd('osd', 'pool', 'rm', pool_name, pool_name, + "--yes-i-really-really-mean-it") + + def get_pool(self): + """ + Pick a random pool + """ + with self.lock: + return random.sample(self.pools.keys(), 1)[0] + + def get_pool_pg_num(self, pool_name): + """ + Return the number of pgs in the pool specified. + """ + with self.lock: + assert isinstance(pool_name, str) + if pool_name in self.pools: + return self.pools[pool_name] + return 0 + + def get_pool_property(self, pool_name, prop): + """ + :param pool_name: pool + :param prop: property to be checked. + :returns: property as an int value. + """ + with self.lock: + assert isinstance(pool_name, str) + assert isinstance(prop, str) + output = self.raw_cluster_cmd( + 'osd', + 'pool', + 'get', + pool_name, + prop) + return int(output.split()[1]) + + def set_pool_property(self, pool_name, prop, val): + """ + :param pool_name: pool + :param prop: property to be set. + :param val: value to set. + + This routine retries if set operation fails. + """ + with self.lock: + assert isinstance(pool_name, str) + assert isinstance(prop, str) + assert isinstance(val, int) + tries = 0 + while True: + r = self.raw_cluster_cmd_result( + 'osd', + 'pool', + 'set', + pool_name, + prop, + str(val)) + if r != 11: # EAGAIN + break + tries += 1 + if tries > 50: + raise Exception('timed out getting EAGAIN ' + 'when setting pool property %s %s = %s' % + (pool_name, prop, val)) + self.log('got EAGAIN setting pool property, ' + 'waiting a few seconds...') + time.sleep(2) + + def expand_pool(self, pool_name, by, max_pgs): + """ + Increase the number of pgs in a pool + """ + with self.lock: + assert isinstance(pool_name, str) + assert isinstance(by, int) + assert pool_name in self.pools + if self.get_num_creating() > 0: + return False + if (self.pools[pool_name] + by) > max_pgs: + return False + self.log("increase pool size by %d" % (by,)) + new_pg_num = self.pools[pool_name] + by + self.set_pool_property(pool_name, "pg_num", new_pg_num) + self.pools[pool_name] = new_pg_num + return True + + def contract_pool(self, pool_name, by, min_pgs): + """ + Decrease the number of pgs in a pool + """ + with self.lock: + self.log('contract_pool %s by %s min %s' % ( + pool_name, str(by), str(min_pgs))) + assert isinstance(pool_name, str) + assert isinstance(by, int) + assert pool_name in self.pools + if self.get_num_creating() > 0: + self.log('too many creating') + return False + proj = self.pools[pool_name] - by + if proj < min_pgs: + self.log('would drop below min_pgs, proj %d, currently %d' % (proj,self.pools[pool_name],)) + return False + self.log("decrease pool size by %d" % (by,)) + new_pg_num = self.pools[pool_name] - by + self.set_pool_property(pool_name, "pg_num", new_pg_num) + self.pools[pool_name] = new_pg_num + return True + + def stop_pg_num_changes(self): + """ + Reset all pg_num_targets back to pg_num, canceling splits and merges + """ + self.log('Canceling any pending splits or merges...') + osd_dump = self.get_osd_dump_json() + for pool in osd_dump['pools']: + if 'pg_num_target' not in pool: + # mimic does not adjust pg num automatically + continue + if pool['pg_num'] != pool['pg_num_target']: + self.log('Setting pool %s (%d) pg_num %d -> %d' % + (pool['pool_name'], pool['pool'], + pool['pg_num_target'], + pool['pg_num'])) + self.raw_cluster_cmd('osd', 'pool', 'set', pool['pool_name'], + 'pg_num', str(pool['pg_num'])) + + def set_pool_pgpnum(self, pool_name, force): + """ + Set pgpnum property of pool_name pool. + """ + with self.lock: + assert isinstance(pool_name, str) + assert pool_name in self.pools + if not force and self.get_num_creating() > 0: + return False + self.set_pool_property(pool_name, 'pgp_num', self.pools[pool_name]) + return True + + def list_pg_unfound(self, pgid): + """ + return list of unfound pgs with the id specified + """ + r = None + offset = {} + while True: + out = self.raw_cluster_cmd('--', 'pg', pgid, 'list_unfound', + json.dumps(offset)) + j = json.loads(out) + if r is None: + r = j + else: + r['objects'].extend(j['objects']) + if not 'more' in j: + break + if j['more'] == 0: + break + offset = j['objects'][-1]['oid'] + if 'more' in r: + del r['more'] + return r + + def get_pg_stats(self): + """ + Dump the cluster and get pg stats + """ + out = self.raw_cluster_cmd('pg', 'dump', '--format=json') + j = json.loads('\n'.join(out.split('\n')[1:])) + try: + return j['pg_map']['pg_stats'] + except KeyError: + return j['pg_stats'] + + def get_pgids_to_force(self, backfill): + """ + Return the randomized list of PGs that can have their recovery/backfill forced + """ + j = self.get_pg_stats(); + pgids = [] + if backfill: + wanted = ['degraded', 'backfilling', 'backfill_wait'] + else: + wanted = ['recovering', 'degraded', 'recovery_wait'] + for pg in j: + status = pg['state'].split('+') + for t in wanted: + if random.random() > 0.5 and not ('forced_backfill' in status or 'forced_recovery' in status) and t in status: + pgids.append(pg['pgid']) + break + return pgids + + def get_pgids_to_cancel_force(self, backfill): + """ + Return the randomized list of PGs whose recovery/backfill priority is forced + """ + j = self.get_pg_stats(); + pgids = [] + if backfill: + wanted = 'forced_backfill' + else: + wanted = 'forced_recovery' + for pg in j: + status = pg['state'].split('+') + if wanted in status and random.random() > 0.5: + pgids.append(pg['pgid']) + return pgids + + def compile_pg_status(self): + """ + Return a histogram of pg state values + """ + ret = {} + j = self.get_pg_stats() + for pg in j: + for status in pg['state'].split('+'): + if status not in ret: + ret[status] = 0 + ret[status] += 1 + return ret + + @wait_for_pg_stats + def with_pg_state(self, pool, pgnum, check): + pgstr = self.get_pgid(pool, pgnum) + stats = self.get_single_pg_stats(pgstr) + assert(check(stats['state'])) + + @wait_for_pg_stats + def with_pg(self, pool, pgnum, check): + pgstr = self.get_pgid(pool, pgnum) + stats = self.get_single_pg_stats(pgstr) + return check(stats) + + def get_last_scrub_stamp(self, pool, pgnum): + """ + Get the timestamp of the last scrub. + """ + stats = self.get_single_pg_stats(self.get_pgid(pool, pgnum)) + return stats["last_scrub_stamp"] + + def do_pg_scrub(self, pool, pgnum, stype): + """ + Scrub pg and wait for scrubbing to finish + """ + init = self.get_last_scrub_stamp(pool, pgnum) + RESEND_TIMEOUT = 120 # Must be a multiple of SLEEP_TIME + FATAL_TIMEOUT = RESEND_TIMEOUT * 3 + SLEEP_TIME = 10 + timer = 0 + while init == self.get_last_scrub_stamp(pool, pgnum): + assert timer < FATAL_TIMEOUT, "fatal timeout trying to " + stype + self.log("waiting for scrub type %s" % (stype,)) + if (timer % RESEND_TIMEOUT) == 0: + self.raw_cluster_cmd('pg', stype, self.get_pgid(pool, pgnum)) + # The first time in this loop is the actual request + if timer != 0 and stype == "repair": + self.log("WARNING: Resubmitted a non-idempotent repair") + time.sleep(SLEEP_TIME) + timer += SLEEP_TIME + + def wait_snap_trimming_complete(self, pool): + """ + Wait for snap trimming on pool to end + """ + POLL_PERIOD = 10 + FATAL_TIMEOUT = 600 + start = time.time() + poolnum = self.get_pool_num(pool) + poolnumstr = "%s." % (poolnum,) + while (True): + now = time.time() + if (now - start) > FATAL_TIMEOUT: + assert (now - start) < FATAL_TIMEOUT, \ + 'failed to complete snap trimming before timeout' + all_stats = self.get_pg_stats() + trimming = False + for pg in all_stats: + if (poolnumstr in pg['pgid']) and ('snaptrim' in pg['state']): + self.log("pg {pg} in trimming, state: {state}".format( + pg=pg['pgid'], + state=pg['state'])) + trimming = True + if not trimming: + break + self.log("{pool} still trimming, waiting".format(pool=pool)) + time.sleep(POLL_PERIOD) + + def get_single_pg_stats(self, pgid): + """ + Return pg for the pgid specified. + """ + all_stats = self.get_pg_stats() + + for pg in all_stats: + if pg['pgid'] == pgid: + return pg + + return None + + def get_object_pg_with_shard(self, pool, name, osdid): + """ + """ + pool_dump = self.get_pool_dump(pool) + object_map = self.get_object_map(pool, name) + if pool_dump["type"] == CephManager.ERASURE_CODED_POOL: + shard = object_map['acting'].index(osdid) + return "{pgid}s{shard}".format(pgid=object_map['pgid'], + shard=shard) + else: + return object_map['pgid'] + + def get_object_primary(self, pool, name): + """ + """ + object_map = self.get_object_map(pool, name) + return object_map['acting_primary'] + + def get_object_map(self, pool, name): + """ + osd map --format=json converted to a python object + :returns: the python object + """ + out = self.raw_cluster_cmd('--format=json', 'osd', 'map', pool, name) + return json.loads('\n'.join(out.split('\n')[1:])) + + def get_osd_dump_json(self): + """ + osd dump --format=json converted to a python object + :returns: the python object + """ + out = self.raw_cluster_cmd('osd', 'dump', '--format=json') + return json.loads('\n'.join(out.split('\n')[1:])) + + def get_osd_dump(self): + """ + Dump osds + :returns: all osds + """ + return self.get_osd_dump_json()['osds'] + + def get_osd_metadata(self): + """ + osd metadata --format=json converted to a python object + :returns: the python object containing osd metadata information + """ + out = self.raw_cluster_cmd('osd', 'metadata', '--format=json') + return json.loads('\n'.join(out.split('\n')[1:])) + + def get_mgr_dump(self): + out = self.raw_cluster_cmd('mgr', 'dump', '--format=json') + return json.loads(out) + + def get_stuck_pgs(self, type_, threshold): + """ + :returns: stuck pg information from the cluster + """ + out = self.raw_cluster_cmd('pg', 'dump_stuck', type_, str(threshold), + '--format=json') + return json.loads(out).get('stuck_pg_stats',[]) + + def get_num_unfound_objects(self): + """ + Check cluster status to get the number of unfound objects + """ + status = self.raw_cluster_status() + self.log(status) + return status['pgmap'].get('unfound_objects', 0) + + def get_num_creating(self): + """ + Find the number of pgs in creating mode. + """ + pgs = self.get_pg_stats() + num = 0 + for pg in pgs: + if 'creating' in pg['state']: + num += 1 + return num + + def get_num_active_clean(self): + """ + Find the number of active and clean pgs. + """ + pgs = self.get_pg_stats() + num = 0 + for pg in pgs: + if (pg['state'].count('active') and + pg['state'].count('clean') and + not pg['state'].count('stale')): + num += 1 + return num + + def get_num_active_recovered(self): + """ + Find the number of active and recovered pgs. + """ + pgs = self.get_pg_stats() + num = 0 + for pg in pgs: + if (pg['state'].count('active') and + not pg['state'].count('recover') and + not pg['state'].count('backfilling') and + not pg['state'].count('stale')): + num += 1 + return num + + def get_is_making_recovery_progress(self): + """ + Return whether there is recovery progress discernable in the + raw cluster status + """ + status = self.raw_cluster_status() + kps = status['pgmap'].get('recovering_keys_per_sec', 0) + bps = status['pgmap'].get('recovering_bytes_per_sec', 0) + ops = status['pgmap'].get('recovering_objects_per_sec', 0) + return kps > 0 or bps > 0 or ops > 0 + + def get_num_active(self): + """ + Find the number of active pgs. + """ + pgs = self.get_pg_stats() + num = 0 + for pg in pgs: + if pg['state'].count('active') and not pg['state'].count('stale'): + num += 1 + return num + + def get_num_down(self): + """ + Find the number of pgs that are down. + """ + pgs = self.get_pg_stats() + num = 0 + for pg in pgs: + if ((pg['state'].count('down') and not + pg['state'].count('stale')) or + (pg['state'].count('incomplete') and not + pg['state'].count('stale'))): + num += 1 + return num + + def get_num_active_down(self): + """ + Find the number of pgs that are either active or down. + """ + pgs = self.get_pg_stats() + num = 0 + for pg in pgs: + if ((pg['state'].count('active') and not + pg['state'].count('stale')) or + (pg['state'].count('down') and not + pg['state'].count('stale')) or + (pg['state'].count('incomplete') and not + pg['state'].count('stale'))): + num += 1 + return num + + def is_clean(self): + """ + True if all pgs are clean + """ + return self.get_num_active_clean() == self.get_num_pgs() + + def is_recovered(self): + """ + True if all pgs have recovered + """ + return self.get_num_active_recovered() == self.get_num_pgs() + + def is_active_or_down(self): + """ + True if all pgs are active or down + """ + return self.get_num_active_down() == self.get_num_pgs() + + def wait_for_clean(self, timeout=1200): + """ + Returns true when all pgs are clean. + """ + self.log("waiting for clean") + start = time.time() + num_active_clean = self.get_num_active_clean() + while not self.is_clean(): + if timeout is not None: + if self.get_is_making_recovery_progress(): + self.log("making progress, resetting timeout") + start = time.time() + else: + self.log("no progress seen, keeping timeout for now") + if time.time() - start >= timeout: + self.log('dumping pgs') + out = self.raw_cluster_cmd('pg', 'dump') + self.log(out) + assert time.time() - start < timeout, \ + 'failed to become clean before timeout expired' + cur_active_clean = self.get_num_active_clean() + if cur_active_clean != num_active_clean: + start = time.time() + num_active_clean = cur_active_clean + time.sleep(3) + self.log("clean!") + + def are_all_osds_up(self): + """ + Returns true if all osds are up. + """ + x = self.get_osd_dump() + return (len(x) == sum([(y['up'] > 0) for y in x])) + + def wait_for_all_osds_up(self, timeout=None): + """ + When this exits, either the timeout has expired, or all + osds are up. + """ + self.log("waiting for all up") + start = time.time() + while not self.are_all_osds_up(): + if timeout is not None: + assert time.time() - start < timeout, \ + 'timeout expired in wait_for_all_osds_up' + time.sleep(3) + self.log("all up!") + + def pool_exists(self, pool): + if pool in self.list_pools(): + return True + return False + + def wait_for_pool(self, pool, timeout=300): + """ + Wait for a pool to exist + """ + self.log('waiting for pool %s to exist' % pool) + start = time.time() + while not self.pool_exists(pool): + if timeout is not None: + assert time.time() - start < timeout, \ + 'timeout expired in wait_for_pool' + time.sleep(3) + + def wait_for_pools(self, pools): + for pool in pools: + self.wait_for_pool(pool) + + def is_mgr_available(self): + x = self.get_mgr_dump() + return x.get('available', False) + + def wait_for_mgr_available(self, timeout=None): + self.log("waiting for mgr available") + start = time.time() + while not self.is_mgr_available(): + if timeout is not None: + assert time.time() - start < timeout, \ + 'timeout expired in wait_for_mgr_available' + time.sleep(3) + self.log("mgr available!") + + def wait_for_recovery(self, timeout=None): + """ + Check peering. When this exists, we have recovered. + """ + self.log("waiting for recovery to complete") + start = time.time() + num_active_recovered = self.get_num_active_recovered() + while not self.is_recovered(): + now = time.time() + if timeout is not None: + if self.get_is_making_recovery_progress(): + self.log("making progress, resetting timeout") + start = time.time() + else: + self.log("no progress seen, keeping timeout for now") + if now - start >= timeout: + if self.is_recovered(): + break + self.log('dumping pgs') + out = self.raw_cluster_cmd('pg', 'dump') + self.log(out) + assert now - start < timeout, \ + 'failed to recover before timeout expired' + cur_active_recovered = self.get_num_active_recovered() + if cur_active_recovered != num_active_recovered: + start = time.time() + num_active_recovered = cur_active_recovered + time.sleep(3) + self.log("recovered!") + + def wait_for_active(self, timeout=None): + """ + Check peering. When this exists, we are definitely active + """ + self.log("waiting for peering to complete") + start = time.time() + num_active = self.get_num_active() + while not self.is_active(): + if timeout is not None: + if time.time() - start >= timeout: + self.log('dumping pgs') + out = self.raw_cluster_cmd('pg', 'dump') + self.log(out) + assert time.time() - start < timeout, \ + 'failed to recover before timeout expired' + cur_active = self.get_num_active() + if cur_active != num_active: + start = time.time() + num_active = cur_active + time.sleep(3) + self.log("active!") + + def wait_for_active_or_down(self, timeout=None): + """ + Check peering. When this exists, we are definitely either + active or down + """ + self.log("waiting for peering to complete or become blocked") + start = time.time() + num_active_down = self.get_num_active_down() + while not self.is_active_or_down(): + if timeout is not None: + if time.time() - start >= timeout: + self.log('dumping pgs') + out = self.raw_cluster_cmd('pg', 'dump') + self.log(out) + assert time.time() - start < timeout, \ + 'failed to recover before timeout expired' + cur_active_down = self.get_num_active_down() + if cur_active_down != num_active_down: + start = time.time() + num_active_down = cur_active_down + time.sleep(3) + self.log("active or down!") + + def osd_is_up(self, osd): + """ + Wrapper for osd check + """ + osds = self.get_osd_dump() + return osds[osd]['up'] > 0 + + def wait_till_osd_is_up(self, osd, timeout=None): + """ + Loop waiting for osd. + """ + self.log('waiting for osd.%d to be up' % osd) + start = time.time() + while not self.osd_is_up(osd): + if timeout is not None: + assert time.time() - start < timeout, \ + 'osd.%d failed to come up before timeout expired' % osd + time.sleep(3) + self.log('osd.%d is up' % osd) + + def is_active(self): + """ + Wrapper to check if all pgs are active + """ + return self.get_num_active() == self.get_num_pgs() + + def wait_till_active(self, timeout=None): + """ + Wait until all pgs are active. + """ + self.log("waiting till active") + start = time.time() + while not self.is_active(): + if timeout is not None: + if time.time() - start >= timeout: + self.log('dumping pgs') + out = self.raw_cluster_cmd('pg', 'dump') + self.log(out) + assert time.time() - start < timeout, \ + 'failed to become active before timeout expired' + time.sleep(3) + self.log("active!") + + def wait_till_pg_convergence(self, timeout=None): + start = time.time() + old_stats = None + active_osds = [osd['osd'] for osd in self.get_osd_dump() + if osd['in'] and osd['up']] + while True: + # strictly speaking, no need to wait for mon. but due to the + # "ms inject socket failures" setting, the osdmap could be delayed, + # so mgr is likely to ignore the pg-stat messages with pgs serving + # newly created pools which is not yet known by mgr. so, to make sure + # the mgr is updated with the latest pg-stats, waiting for mon/mgr is + # necessary. + self.flush_pg_stats(active_osds) + new_stats = dict((stat['pgid'], stat['state']) + for stat in self.get_pg_stats()) + if old_stats == new_stats: + return old_stats + if timeout is not None: + assert time.time() - start < timeout, \ + 'failed to reach convergence before %d secs' % timeout + old_stats = new_stats + # longer than mgr_stats_period + time.sleep(5 + 1) + + def mark_out_osd(self, osd): + """ + Wrapper to mark osd out. + """ + self.raw_cluster_cmd('osd', 'out', str(osd)) + + def kill_osd(self, osd): + """ + Kill osds by either power cycling (if indicated by the config) + or by stopping. + """ + if self.config.get('powercycle'): + remote = self.find_remote('osd', osd) + self.log('kill_osd on osd.{o} ' + 'doing powercycle of {s}'.format(o=osd, s=remote.name)) + self._assert_ipmi(remote) + remote.console.power_off() + elif self.config.get('bdev_inject_crash') and self.config.get('bdev_inject_crash_probability'): + if random.uniform(0, 1) < self.config.get('bdev_inject_crash_probability', .5): + self.inject_args( + 'osd', osd, + 'bdev-inject-crash', self.config.get('bdev_inject_crash')) + try: + self.ctx.daemons.get_daemon('osd', osd, self.cluster).wait() + except: + pass + else: + raise RuntimeError('osd.%s did not fail' % osd) + else: + self.ctx.daemons.get_daemon('osd', osd, self.cluster).stop() + else: + self.ctx.daemons.get_daemon('osd', osd, self.cluster).stop() + + @staticmethod + def _assert_ipmi(remote): + assert remote.console.has_ipmi_credentials, ( + "powercycling requested but RemoteConsole is not " + "initialized. Check ipmi config.") + + def blackhole_kill_osd(self, osd): + """ + Stop osd if nothing else works. + """ + self.inject_args('osd', osd, + 'objectstore-blackhole', True) + time.sleep(2) + self.ctx.daemons.get_daemon('osd', osd, self.cluster).stop() + + def revive_osd(self, osd, timeout=360, skip_admin_check=False): + """ + Revive osds by either power cycling (if indicated by the config) + or by restarting. + """ + if self.config.get('powercycle'): + remote = self.find_remote('osd', osd) + self.log('kill_osd on osd.{o} doing powercycle of {s}'. + format(o=osd, s=remote.name)) + self._assert_ipmi(remote) + remote.console.power_on() + if not remote.console.check_status(300): + raise Exception('Failed to revive osd.{o} via ipmi'. + format(o=osd)) + teuthology.reconnect(self.ctx, 60, [remote]) + mount_osd_data(self.ctx, remote, self.cluster, str(osd)) + self.make_admin_daemon_dir(remote) + self.ctx.daemons.get_daemon('osd', osd, self.cluster).reset() + self.ctx.daemons.get_daemon('osd', osd, self.cluster).restart() + + if not skip_admin_check: + # wait for dump_ops_in_flight; this command doesn't appear + # until after the signal handler is installed and it is safe + # to stop the osd again without making valgrind leak checks + # unhappy. see #5924. + self.wait_run_admin_socket('osd', osd, + args=['dump_ops_in_flight'], + timeout=timeout, stdout=DEVNULL) + + def mark_down_osd(self, osd): + """ + Cluster command wrapper + """ + self.raw_cluster_cmd('osd', 'down', str(osd)) + + def mark_in_osd(self, osd): + """ + Cluster command wrapper + """ + self.raw_cluster_cmd('osd', 'in', str(osd)) + + def signal_osd(self, osd, sig, silent=False): + """ + Wrapper to local get_daemon call which sends the given + signal to the given osd. + """ + self.ctx.daemons.get_daemon('osd', osd, + self.cluster).signal(sig, silent=silent) + + ## monitors + def signal_mon(self, mon, sig, silent=False): + """ + Wrapper to local get_daemon call + """ + self.ctx.daemons.get_daemon('mon', mon, + self.cluster).signal(sig, silent=silent) + + def kill_mon(self, mon): + """ + Kill the monitor by either power cycling (if the config says so), + or by doing a stop. + """ + if self.config.get('powercycle'): + remote = self.find_remote('mon', mon) + self.log('kill_mon on mon.{m} doing powercycle of {s}'. + format(m=mon, s=remote.name)) + self._assert_ipmi(remote) + remote.console.power_off() + else: + self.ctx.daemons.get_daemon('mon', mon, self.cluster).stop() + + def revive_mon(self, mon): + """ + Restart by either power cycling (if the config says so), + or by doing a normal restart. + """ + if self.config.get('powercycle'): + remote = self.find_remote('mon', mon) + self.log('revive_mon on mon.{m} doing powercycle of {s}'. + format(m=mon, s=remote.name)) + self._assert_ipmi(remote) + remote.console.power_on() + self.make_admin_daemon_dir(remote) + self.ctx.daemons.get_daemon('mon', mon, self.cluster).restart() + + def revive_mgr(self, mgr): + """ + Restart by either power cycling (if the config says so), + or by doing a normal restart. + """ + if self.config.get('powercycle'): + remote = self.find_remote('mgr', mgr) + self.log('revive_mgr on mgr.{m} doing powercycle of {s}'. + format(m=mgr, s=remote.name)) + self._assert_ipmi(remote) + remote.console.power_on() + self.make_admin_daemon_dir(remote) + self.ctx.daemons.get_daemon('mgr', mgr, self.cluster).restart() + + def get_mon_status(self, mon): + """ + Extract all the monitor status information from the cluster + """ + addr = self.ctx.ceph[self.cluster].mons['mon.%s' % mon] + out = self.raw_cluster_cmd('-m', addr, 'mon_status') + return json.loads(out) + + def get_mon_quorum(self): + """ + Extract monitor quorum information from the cluster + """ + out = self.raw_cluster_cmd('quorum_status') + j = json.loads(out) + self.log('quorum_status is %s' % out) + return j['quorum'] + + def wait_for_mon_quorum_size(self, size, timeout=300): + """ + Loop until quorum size is reached. + """ + self.log('waiting for quorum size %d' % size) + start = time.time() + while not len(self.get_mon_quorum()) == size: + if timeout is not None: + assert time.time() - start < timeout, \ + ('failed to reach quorum size %d ' + 'before timeout expired' % size) + time.sleep(3) + self.log("quorum is size %d" % size) + + def get_mon_health(self, debug=False): + """ + Extract all the monitor health information. + """ + out = self.raw_cluster_cmd('health', '--format=json') + if debug: + self.log('health:\n{h}'.format(h=out)) + return json.loads(out) + + def get_filepath(self): + """ + Return path to osd data with {id} needing to be replaced + """ + return '/var/lib/ceph/osd/' + self.cluster + '-{id}' + + def make_admin_daemon_dir(self, remote): + """ + Create /var/run/ceph directory on remote site. + + :param ctx: Context + :param remote: Remote site + """ + remote.run(args=['sudo', + 'install', '-d', '-m0777', '--', '/var/run/ceph', ], ) + + def get_service_task_status(self, service, status_key): + """ + Return daemon task status for a given ceph service. + + :param service: ceph service (mds, osd, etc...) + :param status_key: matching task status key + """ + task_status = {} + status = self.raw_cluster_status() + try: + for k,v in status['servicemap']['services'][service]['daemons'].items(): + ts = dict(v).get('task_status', None) + if ts: + task_status[k] = ts[status_key] + except KeyError: # catches missing service and status key + return {} + self.log(task_status) + return task_status + +def utility_task(name): + """ + Generate ceph_manager subtask corresponding to ceph_manager + method name + """ + def task(ctx, config): + if config is None: + config = {} + args = config.get('args', []) + kwargs = config.get('kwargs', {}) + cluster = config.get('cluster', 'ceph') + fn = getattr(ctx.managers[cluster], name) + fn(*args, **kwargs) + return task + +revive_osd = utility_task("revive_osd") +revive_mon = utility_task("revive_mon") +kill_osd = utility_task("kill_osd") +kill_mon = utility_task("kill_mon") +create_pool = utility_task("create_pool") +remove_pool = utility_task("remove_pool") +wait_for_clean = utility_task("wait_for_clean") +flush_all_pg_stats = utility_task("flush_all_pg_stats") +set_pool_property = utility_task("set_pool_property") +do_pg_scrub = utility_task("do_pg_scrub") +wait_for_pool = utility_task("wait_for_pool") +wait_for_pools = utility_task("wait_for_pools") diff --git a/qa/tasks/ceph_objectstore_tool.py b/qa/tasks/ceph_objectstore_tool.py new file mode 100644 index 00000000..2199266e --- /dev/null +++ b/qa/tasks/ceph_objectstore_tool.py @@ -0,0 +1,663 @@ +""" +ceph_objectstore_tool - Simple test of ceph-objectstore-tool utility +""" +from io import BytesIO + +import contextlib +import json +import logging +import os +import six +import sys +import tempfile +import time +from tasks import ceph_manager +from tasks.util.rados import (rados, create_replicated_pool, create_ec_pool) +from teuthology import misc as teuthology +from teuthology.orchestra import run + +from teuthology.exceptions import CommandFailedError + +# from util.rados import (rados, create_ec_pool, +# create_replicated_pool, +# create_cache_pool) + +log = logging.getLogger(__name__) + +# Should get cluster name "ceph" from somewhere +# and normal path from osd_data and osd_journal in conf +FSPATH = "/var/lib/ceph/osd/ceph-{id}" +JPATH = "/var/lib/ceph/osd/ceph-{id}/journal" + + +def cod_setup_local_data(log, ctx, NUM_OBJECTS, DATADIR, + BASE_NAME, DATALINECOUNT): + objects = range(1, NUM_OBJECTS + 1) + for i in objects: + NAME = BASE_NAME + "{num}".format(num=i) + LOCALNAME = os.path.join(DATADIR, NAME) + + dataline = range(DATALINECOUNT) + fd = open(LOCALNAME, "w") + data = "This is the data for " + NAME + "\n" + for _ in dataline: + fd.write(data) + fd.close() + + +def cod_setup_remote_data(log, ctx, remote, NUM_OBJECTS, DATADIR, + BASE_NAME, DATALINECOUNT): + + objects = range(1, NUM_OBJECTS + 1) + for i in objects: + NAME = BASE_NAME + "{num}".format(num=i) + DDNAME = os.path.join(DATADIR, NAME) + + remote.run(args=['rm', '-f', DDNAME]) + + dataline = range(DATALINECOUNT) + data = "This is the data for " + NAME + "\n" + DATA = "" + for _ in dataline: + DATA += data + teuthology.write_file(remote, DDNAME, DATA) + + +def cod_setup(log, ctx, remote, NUM_OBJECTS, DATADIR, + BASE_NAME, DATALINECOUNT, POOL, db, ec): + ERRORS = 0 + log.info("Creating {objs} objects in pool".format(objs=NUM_OBJECTS)) + + objects = range(1, NUM_OBJECTS + 1) + for i in objects: + NAME = BASE_NAME + "{num}".format(num=i) + DDNAME = os.path.join(DATADIR, NAME) + + proc = rados(ctx, remote, ['-p', POOL, 'put', NAME, DDNAME], + wait=False) + # proc = remote.run(args=['rados', '-p', POOL, 'put', NAME, DDNAME]) + ret = proc.wait() + if ret != 0: + log.critical("Rados put failed with status {ret}". + format(ret=proc.exitstatus)) + sys.exit(1) + + db[NAME] = {} + + keys = range(i) + db[NAME]["xattr"] = {} + for k in keys: + if k == 0: + continue + mykey = "key{i}-{k}".format(i=i, k=k) + myval = "val{i}-{k}".format(i=i, k=k) + proc = remote.run(args=['rados', '-p', POOL, 'setxattr', + NAME, mykey, myval]) + ret = proc.wait() + if ret != 0: + log.error("setxattr failed with {ret}".format(ret=ret)) + ERRORS += 1 + db[NAME]["xattr"][mykey] = myval + + # Erasure coded pools don't support omap + if ec: + continue + + # Create omap header in all objects but REPobject1 + if i != 1: + myhdr = "hdr{i}".format(i=i) + proc = remote.run(args=['rados', '-p', POOL, 'setomapheader', + NAME, myhdr]) + ret = proc.wait() + if ret != 0: + log.critical("setomapheader failed with {ret}".format(ret=ret)) + ERRORS += 1 + db[NAME]["omapheader"] = myhdr + + db[NAME]["omap"] = {} + for k in keys: + if k == 0: + continue + mykey = "okey{i}-{k}".format(i=i, k=k) + myval = "oval{i}-{k}".format(i=i, k=k) + proc = remote.run(args=['rados', '-p', POOL, 'setomapval', + NAME, mykey, myval]) + ret = proc.wait() + if ret != 0: + log.critical("setomapval failed with {ret}".format(ret=ret)) + db[NAME]["omap"][mykey] = myval + + return ERRORS + + +def get_lines(filename): + tmpfd = open(filename, "r") + line = True + lines = [] + while line: + line = tmpfd.readline().rstrip('\n') + if line: + lines += [line] + tmpfd.close() + os.unlink(filename) + return lines + + +@contextlib.contextmanager +def task(ctx, config): + """ + Run ceph_objectstore_tool test + + The config should be as follows:: + + ceph_objectstore_tool: + objects: 20 # + pgnum: 12 + """ + + if config is None: + config = {} + assert isinstance(config, dict), \ + 'ceph_objectstore_tool task only accepts a dict for configuration' + + log.info('Beginning ceph_objectstore_tool...') + + log.debug(config) + log.debug(ctx) + clients = ctx.cluster.only(teuthology.is_type('client')) + assert len(clients.remotes) > 0, 'Must specify at least 1 client' + (cli_remote, _) = clients.remotes.popitem() + log.debug(cli_remote) + + # clients = dict(teuthology.get_clients(ctx=ctx, roles=config.keys())) + # client = clients.popitem() + # log.info(client) + osds = ctx.cluster.only(teuthology.is_type('osd')) + log.info("OSDS") + log.info(osds) + log.info(osds.remotes) + + manager = ctx.managers['ceph'] + while (len(manager.get_osd_status()['up']) != + len(manager.get_osd_status()['raw'])): + time.sleep(10) + while (len(manager.get_osd_status()['in']) != + len(manager.get_osd_status()['up'])): + time.sleep(10) + manager.raw_cluster_cmd('osd', 'set', 'noout') + manager.raw_cluster_cmd('osd', 'set', 'nodown') + + PGNUM = config.get('pgnum', 12) + log.info("pgnum: {num}".format(num=PGNUM)) + + ERRORS = 0 + + REP_POOL = "rep_pool" + REP_NAME = "REPobject" + create_replicated_pool(cli_remote, REP_POOL, PGNUM) + ERRORS += test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME) + + EC_POOL = "ec_pool" + EC_NAME = "ECobject" + create_ec_pool(cli_remote, EC_POOL, 'default', PGNUM) + ERRORS += test_objectstore(ctx, config, cli_remote, + EC_POOL, EC_NAME, ec=True) + + if ERRORS == 0: + log.info("TEST PASSED") + else: + log.error("TEST FAILED WITH {errcount} ERRORS".format(errcount=ERRORS)) + + assert ERRORS == 0 + + try: + yield + finally: + log.info('Ending ceph_objectstore_tool') + + +def test_objectstore(ctx, config, cli_remote, REP_POOL, REP_NAME, ec=False): + manager = ctx.managers['ceph'] + + osds = ctx.cluster.only(teuthology.is_type('osd')) + + TEUTHDIR = teuthology.get_testdir(ctx) + DATADIR = os.path.join(TEUTHDIR, "ceph.data") + DATALINECOUNT = 10000 + ERRORS = 0 + NUM_OBJECTS = config.get('objects', 10) + log.info("objects: {num}".format(num=NUM_OBJECTS)) + + pool_dump = manager.get_pool_dump(REP_POOL) + REPID = pool_dump['pool'] + + log.debug("repid={num}".format(num=REPID)) + + db = {} + + LOCALDIR = tempfile.mkdtemp("cod") + + cod_setup_local_data(log, ctx, NUM_OBJECTS, LOCALDIR, + REP_NAME, DATALINECOUNT) + allremote = [] + allremote.append(cli_remote) + allremote += list(osds.remotes.keys()) + allremote = list(set(allremote)) + for remote in allremote: + cod_setup_remote_data(log, ctx, remote, NUM_OBJECTS, DATADIR, + REP_NAME, DATALINECOUNT) + + ERRORS += cod_setup(log, ctx, cli_remote, NUM_OBJECTS, DATADIR, + REP_NAME, DATALINECOUNT, REP_POOL, db, ec) + + pgs = {} + for stats in manager.get_pg_stats(): + if stats["pgid"].find(str(REPID) + ".") != 0: + continue + if pool_dump["type"] == ceph_manager.CephManager.REPLICATED_POOL: + for osd in stats["acting"]: + pgs.setdefault(osd, []).append(stats["pgid"]) + elif pool_dump["type"] == ceph_manager.CephManager.ERASURE_CODED_POOL: + shard = 0 + for osd in stats["acting"]: + pgs.setdefault(osd, []).append("{pgid}s{shard}". + format(pgid=stats["pgid"], + shard=shard)) + shard += 1 + else: + raise Exception("{pool} has an unexpected type {type}". + format(pool=REP_POOL, type=pool_dump["type"])) + + log.info(pgs) + log.info(db) + + for osd in manager.get_osd_status()['up']: + manager.kill_osd(osd) + time.sleep(5) + + pgswithobjects = set() + objsinpg = {} + + # Test --op list and generate json for all objects + log.info("Test --op list by generating json for all objects") + prefix = ("sudo ceph-objectstore-tool " + "--data-path {fpath} " + "--journal-path {jpath} ").format(fpath=FSPATH, jpath=JPATH) + for remote in osds.remotes.keys(): + log.debug(remote) + log.debug(osds.remotes[remote]) + for role in osds.remotes[remote]: + if not role.startswith("osd."): + continue + osdid = int(role.split('.')[1]) + log.info("process osd.{id} on {remote}". + format(id=osdid, remote=remote)) + cmd = (prefix + "--op list").format(id=osdid) + try: + lines = remote.sh(cmd, check_status=False).splitlines() + for pgline in lines: + if not pgline: + continue + (pg, obj) = json.loads(pgline) + name = obj['oid'] + if name in db: + pgswithobjects.add(pg) + objsinpg.setdefault(pg, []).append(name) + db[name].setdefault("pg2json", + {})[pg] = json.dumps(obj) + except CommandFailedError as e: + log.error("Bad exit status {ret} from --op list request". + format(ret=e.exitstatus)) + ERRORS += 1 + + log.info(db) + log.info(pgswithobjects) + log.info(objsinpg) + + if pool_dump["type"] == ceph_manager.CephManager.REPLICATED_POOL: + # Test get-bytes + log.info("Test get-bytes and set-bytes") + for basename in db.keys(): + file = os.path.join(DATADIR, basename) + GETNAME = os.path.join(DATADIR, "get") + SETNAME = os.path.join(DATADIR, "set") + + for remote in osds.remotes.keys(): + for role in osds.remotes[remote]: + if not role.startswith("osd."): + continue + osdid = int(role.split('.')[1]) + if osdid not in pgs: + continue + + for pg, JSON in db[basename]["pg2json"].items(): + if pg in pgs[osdid]: + cmd = ((prefix + "--pgid {pg}"). + format(id=osdid, pg=pg).split()) + cmd.append(run.Raw("'{json}'".format(json=JSON))) + cmd += ("get-bytes {fname}". + format(fname=GETNAME).split()) + proc = remote.run(args=cmd, check_status=False) + if proc.exitstatus != 0: + remote.run(args="rm -f {getfile}". + format(getfile=GETNAME).split()) + log.error("Bad exit status {ret}". + format(ret=proc.exitstatus)) + ERRORS += 1 + continue + cmd = ("diff -q {file} {getfile}". + format(file=file, getfile=GETNAME)) + proc = remote.run(args=cmd.split()) + if proc.exitstatus != 0: + log.error("Data from get-bytes differ") + # log.debug("Got:") + # cat_file(logging.DEBUG, GETNAME) + # log.debug("Expected:") + # cat_file(logging.DEBUG, file) + ERRORS += 1 + remote.run(args="rm -f {getfile}". + format(getfile=GETNAME).split()) + + data = ("put-bytes going into {file}\n". + format(file=file)) + teuthology.write_file(remote, SETNAME, data) + cmd = ((prefix + "--pgid {pg}"). + format(id=osdid, pg=pg).split()) + cmd.append(run.Raw("'{json}'".format(json=JSON))) + cmd += ("set-bytes {fname}". + format(fname=SETNAME).split()) + proc = remote.run(args=cmd, check_status=False) + proc.wait() + if proc.exitstatus != 0: + log.info("set-bytes failed for object {obj} " + "in pg {pg} osd.{id} ret={ret}". + format(obj=basename, pg=pg, + id=osdid, ret=proc.exitstatus)) + ERRORS += 1 + + cmd = ((prefix + "--pgid {pg}"). + format(id=osdid, pg=pg).split()) + cmd.append(run.Raw("'{json}'".format(json=JSON))) + cmd += "get-bytes -".split() + try: + output = remote.sh(cmd, wait=True) + if data != output: + log.error("Data inconsistent after " + "set-bytes, got:") + log.error(output) + ERRORS += 1 + except CommandFailedError as e: + log.error("get-bytes after " + "set-bytes ret={ret}". + format(ret=e.exitstatus)) + ERRORS += 1 + + cmd = ((prefix + "--pgid {pg}"). + format(id=osdid, pg=pg).split()) + cmd.append(run.Raw("'{json}'".format(json=JSON))) + cmd += ("set-bytes {fname}". + format(fname=file).split()) + proc = remote.run(args=cmd, check_status=False) + proc.wait() + if proc.exitstatus != 0: + log.info("set-bytes failed for object {obj} " + "in pg {pg} osd.{id} ret={ret}". + format(obj=basename, pg=pg, + id=osdid, ret=proc.exitstatus)) + ERRORS += 1 + + log.info("Test list-attrs get-attr") + for basename in db.keys(): + file = os.path.join(DATADIR, basename) + GETNAME = os.path.join(DATADIR, "get") + SETNAME = os.path.join(DATADIR, "set") + + for remote in osds.remotes.keys(): + for role in osds.remotes[remote]: + if not role.startswith("osd."): + continue + osdid = int(role.split('.')[1]) + if osdid not in pgs: + continue + + for pg, JSON in db[basename]["pg2json"].items(): + if pg in pgs[osdid]: + cmd = ((prefix + "--pgid {pg}"). + format(id=osdid, pg=pg).split()) + cmd.append(run.Raw("'{json}'".format(json=JSON))) + cmd += ["list-attrs"] + try: + keys = remote.sh(cmd, wait=True, stderr=BytesIO()).split() + except CommandFailedError as e: + log.error("Bad exit status {ret}". + format(ret=e.exitstatus)) + ERRORS += 1 + continue + values = dict(db[basename]["xattr"]) + + for key in keys: + if (key == "_" or + key == "snapset" or + key == "hinfo_key"): + continue + key = key.strip("_") + if key not in values: + log.error("The key {key} should be present". + format(key=key)) + ERRORS += 1 + continue + exp = values.pop(key) + cmd = ((prefix + "--pgid {pg}"). + format(id=osdid, pg=pg).split()) + cmd.append(run.Raw("'{json}'".format(json=JSON))) + cmd += ("get-attr {key}". + format(key="_" + key).split()) + try: + val = remote.sh(cmd, wait=True) + except CommandFailedError as e: + log.error("get-attr failed with {ret}". + format(ret=e.exitstatus)) + ERRORS += 1 + continue + if exp != val: + log.error("For key {key} got value {got} " + "instead of {expected}". + format(key=key, got=val, + expected=exp)) + ERRORS += 1 + if "hinfo_key" in keys: + cmd_prefix = prefix.format(id=osdid) + cmd = """ + expected=$({prefix} --pgid {pg} '{json}' get-attr {key} | base64) + echo placeholder | {prefix} --pgid {pg} '{json}' set-attr {key} - + test $({prefix} --pgid {pg} '{json}' get-attr {key}) = placeholder + echo $expected | base64 --decode | \ + {prefix} --pgid {pg} '{json}' set-attr {key} - + test $({prefix} --pgid {pg} '{json}' get-attr {key} | base64) = $expected + """.format(prefix=cmd_prefix, pg=pg, json=JSON, + key="hinfo_key") + log.debug(cmd) + proc = remote.run(args=['bash', '-e', '-x', + '-c', cmd], + check_status=False, + stdout=BytesIO(), + stderr=BytesIO()) + proc.wait() + if proc.exitstatus != 0: + log.error("failed with " + + str(proc.exitstatus)) + log.error(" ".join([ + six.ensure_str(proc.stdout.getvalue()), + six.ensure_str(proc.stderr.getvalue()), + ])) + ERRORS += 1 + + if len(values) != 0: + log.error("Not all keys found, remaining keys:") + log.error(values) + + log.info("Test pg info") + for remote in osds.remotes.keys(): + for role in osds.remotes[remote]: + if not role.startswith("osd."): + continue + osdid = int(role.split('.')[1]) + if osdid not in pgs: + continue + + for pg in pgs[osdid]: + cmd = ((prefix + "--op info --pgid {pg}"). + format(id=osdid, pg=pg).split()) + try: + info = remote.sh(cmd, wait=True) + except CommandFailedError as e: + log.error("Failure of --op info command with {ret}". + format(e.exitstatus)) + ERRORS += 1 + continue + if not str(pg) in info: + log.error("Bad data from info: {info}".format(info=info)) + ERRORS += 1 + + log.info("Test pg logging") + for remote in osds.remotes.keys(): + for role in osds.remotes[remote]: + if not role.startswith("osd."): + continue + osdid = int(role.split('.')[1]) + if osdid not in pgs: + continue + + for pg in pgs[osdid]: + cmd = ((prefix + "--op log --pgid {pg}"). + format(id=osdid, pg=pg).split()) + try: + output = remote.sh(cmd, wait=True) + except CommandFailedError as e: + log.error("Getting log failed for pg {pg} " + "from osd.{id} with {ret}". + format(pg=pg, id=osdid, ret=e.exitstatus)) + ERRORS += 1 + continue + HASOBJ = pg in pgswithobjects + MODOBJ = "modify" in output + if HASOBJ != MODOBJ: + log.error("Bad log for pg {pg} from osd.{id}". + format(pg=pg, id=osdid)) + MSG = (HASOBJ and [""] or ["NOT "])[0] + log.error("Log should {msg}have a modify entry". + format(msg=MSG)) + ERRORS += 1 + + log.info("Test pg export") + EXP_ERRORS = 0 + for remote in osds.remotes.keys(): + for role in osds.remotes[remote]: + if not role.startswith("osd."): + continue + osdid = int(role.split('.')[1]) + if osdid not in pgs: + continue + + for pg in pgs[osdid]: + fpath = os.path.join(DATADIR, "osd{id}.{pg}". + format(id=osdid, pg=pg)) + + cmd = ((prefix + "--op export --pgid {pg} --file {file}"). + format(id=osdid, pg=pg, file=fpath)) + try: + remote.sh(cmd, wait=True) + except CommandFailedError as e: + log.error("Exporting failed for pg {pg} " + "on osd.{id} with {ret}". + format(pg=pg, id=osdid, ret=e.exitstatus)) + EXP_ERRORS += 1 + + ERRORS += EXP_ERRORS + + log.info("Test pg removal") + RM_ERRORS = 0 + for remote in osds.remotes.keys(): + for role in osds.remotes[remote]: + if not role.startswith("osd."): + continue + osdid = int(role.split('.')[1]) + if osdid not in pgs: + continue + + for pg in pgs[osdid]: + cmd = ((prefix + "--force --op remove --pgid {pg}"). + format(pg=pg, id=osdid)) + try: + remote.sh(cmd, wait=True) + except CommandFailedError as e: + log.error("Removing failed for pg {pg} " + "on osd.{id} with {ret}". + format(pg=pg, id=osdid, ret=e.exitstatus)) + RM_ERRORS += 1 + + ERRORS += RM_ERRORS + + IMP_ERRORS = 0 + if EXP_ERRORS == 0 and RM_ERRORS == 0: + log.info("Test pg import") + + for remote in osds.remotes.keys(): + for role in osds.remotes[remote]: + if not role.startswith("osd."): + continue + osdid = int(role.split('.')[1]) + if osdid not in pgs: + continue + + for pg in pgs[osdid]: + fpath = os.path.join(DATADIR, "osd{id}.{pg}". + format(id=osdid, pg=pg)) + + cmd = ((prefix + "--op import --file {file}"). + format(id=osdid, file=fpath)) + try: + remote.sh(cmd, wait=True) + except CommandFailedError as e: + log.error("Import failed from {file} with {ret}". + format(file=fpath, ret=e.exitstatus)) + IMP_ERRORS += 1 + else: + log.warning("SKIPPING IMPORT TESTS DUE TO PREVIOUS FAILURES") + + ERRORS += IMP_ERRORS + + if EXP_ERRORS == 0 and RM_ERRORS == 0 and IMP_ERRORS == 0: + log.info("Restarting OSDs....") + # They are still look to be up because of setting nodown + for osd in manager.get_osd_status()['up']: + manager.revive_osd(osd) + # Wait for health? + time.sleep(5) + # Let scrub after test runs verify consistency of all copies + log.info("Verify replicated import data") + objects = range(1, NUM_OBJECTS + 1) + for i in objects: + NAME = REP_NAME + "{num}".format(num=i) + TESTNAME = os.path.join(DATADIR, "gettest") + REFNAME = os.path.join(DATADIR, NAME) + + proc = rados(ctx, cli_remote, + ['-p', REP_POOL, 'get', NAME, TESTNAME], wait=False) + + ret = proc.wait() + if ret != 0: + log.error("After import, rados get failed with {ret}". + format(ret=proc.exitstatus)) + ERRORS += 1 + continue + + cmd = "diff -q {gettest} {ref}".format(gettest=TESTNAME, + ref=REFNAME) + proc = cli_remote.run(args=cmd, check_status=False) + proc.wait() + if proc.exitstatus != 0: + log.error("Data comparison failed for {obj}".format(obj=NAME)) + ERRORS += 1 + + return ERRORS diff --git a/qa/tasks/ceph_test_case.py b/qa/tasks/ceph_test_case.py new file mode 100644 index 00000000..9e26439e --- /dev/null +++ b/qa/tasks/ceph_test_case.py @@ -0,0 +1,203 @@ +import unittest +from unittest import case +import time +import logging + +from teuthology.orchestra.run import CommandFailedError + +log = logging.getLogger(__name__) + +class TestTimeoutError(RuntimeError): + pass + +class CephTestCase(unittest.TestCase): + """ + For test tasks that want to define a structured set of + tests implemented in python. Subclass this with appropriate + helpers for the subsystem you're testing. + """ + + # Environment references + mounts = None + fs = None + recovery_fs = None + ceph_cluster = None + mds_cluster = None + mgr_cluster = None + ctx = None + + mon_manager = None + + # Declarative test requirements: subclasses should override these to indicate + # their special needs. If not met, tests will be skipped. + REQUIRE_MEMSTORE = False + + def setUp(self): + self._mon_configs_set = set() + + self.ceph_cluster.mon_manager.raw_cluster_cmd("log", + "Starting test {0}".format(self.id())) + + if self.REQUIRE_MEMSTORE: + objectstore = self.ceph_cluster.get_config("osd_objectstore", "osd") + if objectstore != "memstore": + # You certainly *could* run this on a real OSD, but you don't want to sit + # here for hours waiting for the test to fill up a 1TB drive! + raise case.SkipTest("Require `memstore` OSD backend (test " \ + "would take too long on full sized OSDs") + + def tearDown(self): + self.config_clear() + + self.ceph_cluster.mon_manager.raw_cluster_cmd("log", + "Ended test {0}".format(self.id())) + + def config_clear(self): + for section, key in self._mon_configs_set: + self.config_rm(section, key) + self._mon_configs_set.clear() + + def _fix_key(self, key): + return str(key).replace(' ', '_') + + def config_get(self, section, key): + key = self._fix_key(key) + return self.ceph_cluster.mon_manager.raw_cluster_cmd("config", "get", section, key).strip() + + def config_show(self, entity, key): + key = self._fix_key(key) + return self.ceph_cluster.mon_manager.raw_cluster_cmd("config", "show", entity, key).strip() + + def config_minimal(self): + return self.ceph_cluster.mon_manager.raw_cluster_cmd("config", "generate-minimal-conf").strip() + + def config_rm(self, section, key): + key = self._fix_key(key) + self.ceph_cluster.mon_manager.raw_cluster_cmd("config", "rm", section, key) + # simplification: skip removing from _mon_configs_set; + # let tearDown clear everything again + + def config_set(self, section, key, value): + key = self._fix_key(key) + self._mon_configs_set.add((section, key)) + self.ceph_cluster.mon_manager.raw_cluster_cmd("config", "set", section, key, str(value)) + + def assert_cluster_log(self, expected_pattern, invert_match=False, + timeout=10, watch_channel=None): + """ + Context manager. Assert that during execution, or up to 5 seconds later, + the Ceph cluster log emits a message matching the expected pattern. + + :param expected_pattern: A string that you expect to see in the log output + :type expected_pattern: str + :param watch_channel: Specifies the channel to be watched. This can be + 'cluster', 'audit', ... + :type watch_channel: str + """ + + ceph_manager = self.ceph_cluster.mon_manager + + class ContextManager(object): + def match(self): + found = expected_pattern in self.watcher_process.stdout.getvalue() + if invert_match: + return not found + + return found + + def __enter__(self): + self.watcher_process = ceph_manager.run_ceph_w(watch_channel) + + def __exit__(self, exc_type, exc_val, exc_tb): + if not self.watcher_process.finished: + # Check if we got an early match, wait a bit if we didn't + if self.match(): + return + else: + log.debug("No log hits yet, waiting...") + # Default monc tick interval is 10s, so wait that long and + # then some grace + time.sleep(5 + timeout) + + self.watcher_process.stdin.close() + try: + self.watcher_process.wait() + except CommandFailedError: + pass + + if not self.match(): + log.error("Log output: \n{0}\n".format(self.watcher_process.stdout.getvalue())) + raise AssertionError("Expected log message not found: '{0}'".format(expected_pattern)) + + return ContextManager() + + def wait_for_health(self, pattern, timeout): + """ + Wait until 'ceph health' contains messages matching the pattern + """ + def seen_health_warning(): + health = self.ceph_cluster.mon_manager.get_mon_health() + codes = [s for s in health['checks']] + summary_strings = [s[1]['summary']['message'] for s in health['checks'].items()] + if len(summary_strings) == 0: + log.debug("Not expected number of summary strings ({0})".format(summary_strings)) + return False + else: + for ss in summary_strings: + if pattern in ss: + return True + if pattern in codes: + return True + + log.debug("Not found expected summary strings yet ({0})".format(summary_strings)) + return False + + self.wait_until_true(seen_health_warning, timeout) + + def wait_for_health_clear(self, timeout): + """ + Wait until `ceph health` returns no messages + """ + def is_clear(): + health = self.ceph_cluster.mon_manager.get_mon_health() + return len(health['checks']) == 0 + + self.wait_until_true(is_clear, timeout) + + def wait_until_equal(self, get_fn, expect_val, timeout, reject_fn=None): + period = 5 + elapsed = 0 + while True: + val = get_fn() + if val == expect_val: + return + elif reject_fn and reject_fn(val): + raise RuntimeError("wait_until_equal: forbidden value {0} seen".format(val)) + else: + if elapsed >= timeout: + raise TestTimeoutError("Timed out after {0} seconds waiting for {1} (currently {2})".format( + elapsed, expect_val, val + )) + else: + log.debug("wait_until_equal: {0} != {1}, waiting...".format(val, expect_val)) + time.sleep(period) + elapsed += period + + log.debug("wait_until_equal: success") + + @classmethod + def wait_until_true(cls, condition, timeout, period=5): + elapsed = 0 + while True: + if condition(): + log.debug("wait_until_true: success in {0}s".format(elapsed)) + return + else: + if elapsed >= timeout: + raise TestTimeoutError("Timed out after {0}s".format(elapsed)) + else: + log.debug("wait_until_true: waiting...") + time.sleep(period) + elapsed += period + + diff --git a/qa/tasks/cephfs/__init__.py b/qa/tasks/cephfs/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/qa/tasks/cephfs/cephfs_test_case.py b/qa/tasks/cephfs/cephfs_test_case.py new file mode 100644 index 00000000..f901f44b --- /dev/null +++ b/qa/tasks/cephfs/cephfs_test_case.py @@ -0,0 +1,324 @@ +import time +import json +import logging +from unittest import case +from tasks.ceph_test_case import CephTestCase +import os +import re + +from tasks.cephfs.fuse_mount import FuseMount + +from teuthology.orchestra import run +from teuthology.orchestra.run import CommandFailedError +from teuthology.contextutil import safe_while + + +log = logging.getLogger(__name__) + + +def for_teuthology(f): + """ + Decorator that adds an "is_for_teuthology" attribute to the wrapped function + """ + f.is_for_teuthology = True + return f + + +def needs_trimming(f): + """ + Mark fn as requiring a client capable of trimming its cache (i.e. for ceph-fuse + this means it needs to be able to run as root, currently) + """ + f.needs_trimming = True + return f + + +class CephFSTestCase(CephTestCase): + """ + Test case for Ceph FS, requires caller to populate Filesystem and Mounts, + into the fs, mount_a, mount_b class attributes (setting mount_b is optional) + + Handles resetting the cluster under test between tests. + """ + + # FIXME weird explicit naming + mount_a = None + mount_b = None + recovery_mount = None + + # Declarative test requirements: subclasses should override these to indicate + # their special needs. If not met, tests will be skipped. + CLIENTS_REQUIRED = 1 + MDSS_REQUIRED = 1 + REQUIRE_KCLIENT_REMOTE = False + REQUIRE_ONE_CLIENT_REMOTE = False + + # Whether to create the default filesystem during setUp + REQUIRE_FILESYSTEM = True + + # requires REQUIRE_FILESYSTEM = True + REQUIRE_RECOVERY_FILESYSTEM = False + + LOAD_SETTINGS = [] + + def setUp(self): + super(CephFSTestCase, self).setUp() + + self.config_set('mon', 'mon_allow_pool_delete', True) + + if len(self.mds_cluster.mds_ids) < self.MDSS_REQUIRED: + raise case.SkipTest("Only have {0} MDSs, require {1}".format( + len(self.mds_cluster.mds_ids), self.MDSS_REQUIRED + )) + + if len(self.mounts) < self.CLIENTS_REQUIRED: + raise case.SkipTest("Only have {0} clients, require {1}".format( + len(self.mounts), self.CLIENTS_REQUIRED + )) + + if self.REQUIRE_KCLIENT_REMOTE: + if not isinstance(self.mounts[0], FuseMount) or not isinstance(self.mounts[1], FuseMount): + # kclient kill() power cycles nodes, so requires clients to each be on + # their own node + if self.mounts[0].client_remote.hostname == self.mounts[1].client_remote.hostname: + raise case.SkipTest("kclient clients must be on separate nodes") + + if self.REQUIRE_ONE_CLIENT_REMOTE: + if self.mounts[0].client_remote.hostname in self.mds_cluster.get_mds_hostnames(): + raise case.SkipTest("Require first client to be on separate server from MDSs") + + # Create friendly mount_a, mount_b attrs + for i in range(0, self.CLIENTS_REQUIRED): + setattr(self, "mount_{0}".format(chr(ord('a') + i)), self.mounts[i]) + + self.mds_cluster.clear_firewall() + + # Unmount all clients, we are about to blow away the filesystem + for mount in self.mounts: + if mount.is_mounted(): + mount.umount_wait(force=True) + + # To avoid any issues with e.g. unlink bugs, we destroy and recreate + # the filesystem rather than just doing a rm -rf of files + self.mds_cluster.delete_all_filesystems() + self.mds_cluster.mds_restart() # to reset any run-time configs, etc. + self.fs = None # is now invalid! + self.recovery_fs = None + + # In case anything is in the OSD blacklist list, clear it out. This is to avoid + # the OSD map changing in the background (due to blacklist expiry) while tests run. + try: + self.mds_cluster.mon_manager.raw_cluster_cmd("osd", "blacklist", "clear") + except CommandFailedError: + # Fallback for older Ceph cluster + blacklist = json.loads(self.mds_cluster.mon_manager.raw_cluster_cmd("osd", + "dump", "--format=json-pretty"))['blacklist'] + log.info("Removing {0} blacklist entries".format(len(blacklist))) + for addr, blacklisted_at in blacklist.items(): + self.mds_cluster.mon_manager.raw_cluster_cmd("osd", "blacklist", "rm", addr) + + client_mount_ids = [m.client_id for m in self.mounts] + # In case the test changes the IDs of clients, stash them so that we can + # reset in tearDown + self._original_client_ids = client_mount_ids + log.info(client_mount_ids) + + # In case there were any extra auth identities around from a previous + # test, delete them + for entry in self.auth_list(): + ent_type, ent_id = entry['entity'].split(".") + if ent_type == "client" and ent_id not in client_mount_ids and ent_id != "admin": + self.mds_cluster.mon_manager.raw_cluster_cmd("auth", "del", entry['entity']) + + if self.REQUIRE_FILESYSTEM: + self.fs = self.mds_cluster.newfs(create=True) + + # In case some test messed with auth caps, reset them + for client_id in client_mount_ids: + self.mds_cluster.mon_manager.raw_cluster_cmd_result( + 'auth', 'caps', "client.{0}".format(client_id), + 'mds', 'allow', + 'mon', 'allow r', + 'osd', 'allow rw pool={0}'.format(self.fs.get_data_pool_name())) + + # wait for ranks to become active + self.fs.wait_for_daemons() + + # Mount the requested number of clients + for i in range(0, self.CLIENTS_REQUIRED): + self.mounts[i].mount() + self.mounts[i].wait_until_mounted() + + if self.REQUIRE_RECOVERY_FILESYSTEM: + if not self.REQUIRE_FILESYSTEM: + raise case.SkipTest("Recovery filesystem requires a primary filesystem as well") + self.fs.mon_manager.raw_cluster_cmd('fs', 'flag', 'set', + 'enable_multiple', 'true', + '--yes-i-really-mean-it') + self.recovery_fs = self.mds_cluster.newfs(name="recovery_fs", create=False) + self.recovery_fs.set_metadata_overlay(True) + self.recovery_fs.set_data_pool_name(self.fs.get_data_pool_name()) + self.recovery_fs.create() + self.recovery_fs.getinfo(refresh=True) + self.recovery_fs.mds_restart() + self.recovery_fs.wait_for_daemons() + + # Load an config settings of interest + for setting in self.LOAD_SETTINGS: + setattr(self, setting, float(self.fs.mds_asok( + ['config', 'get', setting], list(self.mds_cluster.mds_ids)[0] + )[setting])) + + self.configs_set = set() + + def tearDown(self): + self.mds_cluster.clear_firewall() + for m in self.mounts: + m.teardown() + + for i, m in enumerate(self.mounts): + m.client_id = self._original_client_ids[i] + + for subsys, key in self.configs_set: + self.mds_cluster.clear_ceph_conf(subsys, key) + + return super(CephFSTestCase, self).tearDown() + + def set_conf(self, subsys, key, value): + self.configs_set.add((subsys, key)) + self.mds_cluster.set_ceph_conf(subsys, key, value) + + def auth_list(self): + """ + Convenience wrapper on "ceph auth ls" + """ + return json.loads(self.mds_cluster.mon_manager.raw_cluster_cmd( + "auth", "ls", "--format=json-pretty" + ))['auth_dump'] + + def assert_session_count(self, expected, ls_data=None, mds_id=None): + if ls_data is None: + ls_data = self.fs.mds_asok(['session', 'ls'], mds_id=mds_id) + + alive_count = len([s for s in ls_data if s['state'] != 'killing']) + + self.assertEqual(expected, alive_count, "Expected {0} sessions, found {1}".format( + expected, alive_count + )) + + def assert_session_state(self, client_id, expected_state): + self.assertEqual( + self._session_by_id( + self.fs.mds_asok(['session', 'ls'])).get(client_id, {'state': None})['state'], + expected_state) + + def get_session_data(self, client_id): + return self._session_by_id(client_id) + + def _session_list(self): + ls_data = self.fs.mds_asok(['session', 'ls']) + ls_data = [s for s in ls_data if s['state'] not in ['stale', 'closed']] + return ls_data + + def get_session(self, client_id, session_ls=None): + if session_ls is None: + session_ls = self.fs.mds_asok(['session', 'ls']) + + return self._session_by_id(session_ls)[client_id] + + def _session_by_id(self, session_ls): + return dict([(s['id'], s) for s in session_ls]) + + def perf_dump(self, rank=0, status=None): + return self.fs.rank_asok(['perf', 'dump'], rank=rank, status=status) + + def wait_until_evicted(self, client_id, timeout=30): + def is_client_evicted(): + ls = self._session_list() + for s in ls: + if s['id'] == client_id: + return False + return True + self.wait_until_true(is_client_evicted, timeout) + + def wait_for_daemon_start(self, daemon_ids=None): + """ + Wait until all the daemons appear in the FSMap, either assigned + MDS ranks or in the list of standbys + """ + def get_daemon_names(): + return [info['name'] for info in self.mds_cluster.status().get_all()] + + if daemon_ids is None: + daemon_ids = self.mds_cluster.mds_ids + + try: + self.wait_until_true( + lambda: set(daemon_ids) & set(get_daemon_names()) == set(daemon_ids), + timeout=30 + ) + except RuntimeError: + log.warning("Timeout waiting for daemons {0}, while we have {1}".format( + daemon_ids, get_daemon_names() + )) + raise + + def delete_mds_coredump(self, daemon_id): + # delete coredump file, otherwise teuthology.internal.coredump will + # catch it later and treat it as a failure. + core_pattern = self.mds_cluster.mds_daemons[daemon_id].remote.sh( + "sudo sysctl -n kernel.core_pattern") + core_dir = os.path.dirname(core_pattern.strip()) + if core_dir: # Non-default core_pattern with a directory in it + # We have seen a core_pattern that looks like it's from teuthology's coredump + # task, so proceed to clear out the core file + if core_dir[0] == '|': + log.info("Piped core dumps to program {0}, skip cleaning".format(core_dir[1:])) + return; + + log.info("Clearing core from directory: {0}".format(core_dir)) + + # Verify that we see the expected single coredump + ls_output = self.mds_cluster.mds_daemons[daemon_id].remote.sh([ + "cd", core_dir, run.Raw('&&'), + "sudo", "ls", run.Raw('|'), "sudo", "xargs", "file" + ]) + cores = [l.partition(":")[0] + for l in ls_output.strip().split("\n") + if re.match(r'.*ceph-mds.* -i +{0}'.format(daemon_id), l)] + + log.info("Enumerated cores: {0}".format(cores)) + self.assertEqual(len(cores), 1) + + log.info("Found core file {0}, deleting it".format(cores[0])) + + self.mds_cluster.mds_daemons[daemon_id].remote.run(args=[ + "cd", core_dir, run.Raw('&&'), "sudo", "rm", "-f", cores[0] + ]) + else: + log.info("No core_pattern directory set, nothing to clear (internal.coredump not enabled?)") + + def _wait_subtrees(self, status, rank, test): + timeout = 30 + pause = 2 + test = sorted(test) + for i in range(timeout // pause): + subtrees = self.fs.mds_asok(["get", "subtrees"], mds_id=status.get_rank(self.fs.id, rank)['name']) + subtrees = filter(lambda s: s['dir']['path'].startswith('/'), subtrees) + filtered = sorted([(s['dir']['path'], s['auth_first']) for s in subtrees]) + log.info("%s =?= %s", filtered, test) + if filtered == test: + # Confirm export_pin in output is correct: + for s in subtrees: + self.assertTrue(s['export_pin'] == s['auth_first']) + return subtrees + time.sleep(pause) + raise RuntimeError("rank {0} failed to reach desired subtree state", rank) + + def _wait_until_scrub_complete(self, path="/", recursive=True): + out_json = self.fs.rank_tell(["scrub", "start", path] + ["recursive"] if recursive else []) + with safe_while(sleep=10, tries=10) as proceed: + while proceed(): + out_json = self.fs.rank_tell(["scrub", "status"]) + if out_json['status'] == "no active scrubs running": + break; diff --git a/qa/tasks/cephfs/filesystem.py b/qa/tasks/cephfs/filesystem.py new file mode 100644 index 00000000..c5531f94 --- /dev/null +++ b/qa/tasks/cephfs/filesystem.py @@ -0,0 +1,1386 @@ + +import json +import logging +from gevent import Greenlet +import os +import time +import datetime +import re +import errno +import random +import traceback + +from io import BytesIO +from io import StringIO + +from teuthology.exceptions import CommandFailedError +from teuthology import misc +from teuthology.nuke import clear_firewall +from teuthology.parallel import parallel +from tasks.ceph_manager import write_conf +from tasks import ceph_manager + + +log = logging.getLogger(__name__) + + +DAEMON_WAIT_TIMEOUT = 120 +ROOT_INO = 1 + +class FileLayout(object): + def __init__(self, pool=None, pool_namespace=None, stripe_unit=None, stripe_count=None, object_size=None): + self.pool = pool + self.pool_namespace = pool_namespace + self.stripe_unit = stripe_unit + self.stripe_count = stripe_count + self.object_size = object_size + + @classmethod + def load_from_ceph(layout_str): + # TODO + pass + + def items(self): + if self.pool is not None: + yield ("pool", self.pool) + if self.pool_namespace: + yield ("pool_namespace", self.pool_namespace) + if self.stripe_unit is not None: + yield ("stripe_unit", self.stripe_unit) + if self.stripe_count is not None: + yield ("stripe_count", self.stripe_count) + if self.object_size is not None: + yield ("object_size", self.stripe_size) + +class ObjectNotFound(Exception): + def __init__(self, object_name): + self._object_name = object_name + + def __str__(self): + return "Object not found: '{0}'".format(self._object_name) + +class FSStatus(object): + """ + Operations on a snapshot of the FSMap. + """ + def __init__(self, mon_manager): + self.mon = mon_manager + self.map = json.loads(self.mon.raw_cluster_cmd("fs", "dump", "--format=json")) + + def __str__(self): + return json.dumps(self.map, indent = 2, sort_keys = True) + + # Expose the fsmap for manual inspection. + def __getitem__(self, key): + """ + Get a field from the fsmap. + """ + return self.map[key] + + def get_filesystems(self): + """ + Iterator for all filesystems. + """ + for fs in self.map['filesystems']: + yield fs + + def get_all(self): + """ + Iterator for all the mds_info components in the FSMap. + """ + for info in self.get_standbys(): + yield info + for fs in self.map['filesystems']: + for info in fs['mdsmap']['info'].values(): + yield info + + def get_standbys(self): + """ + Iterator for all standbys. + """ + for info in self.map['standbys']: + yield info + + def get_fsmap(self, fscid): + """ + Get the fsmap for the given FSCID. + """ + for fs in self.map['filesystems']: + if fscid is None or fs['id'] == fscid: + return fs + raise RuntimeError("FSCID {0} not in map".format(fscid)) + + def get_fsmap_byname(self, name): + """ + Get the fsmap for the given file system name. + """ + for fs in self.map['filesystems']: + if name is None or fs['mdsmap']['fs_name'] == name: + return fs + raise RuntimeError("FS {0} not in map".format(name)) + + def get_replays(self, fscid): + """ + Get the standby:replay MDS for the given FSCID. + """ + fs = self.get_fsmap(fscid) + for info in fs['mdsmap']['info'].values(): + if info['state'] == 'up:standby-replay': + yield info + + def get_ranks(self, fscid): + """ + Get the ranks for the given FSCID. + """ + fs = self.get_fsmap(fscid) + for info in fs['mdsmap']['info'].values(): + if info['rank'] >= 0 and info['state'] != 'up:standby-replay': + yield info + + def get_rank(self, fscid, rank): + """ + Get the rank for the given FSCID. + """ + for info in self.get_ranks(fscid): + if info['rank'] == rank: + return info + raise RuntimeError("FSCID {0} has no rank {1}".format(fscid, rank)) + + def get_mds(self, name): + """ + Get the info for the given MDS name. + """ + for info in self.get_all(): + if info['name'] == name: + return info + return None + + def get_mds_addr(self, name): + """ + Return the instance addr as a string, like "10.214.133.138:6807\/10825" + """ + info = self.get_mds(name) + if info: + return info['addr'] + else: + log.warning(json.dumps(list(self.get_all()), indent=2)) # dump for debugging + raise RuntimeError("MDS id '{0}' not found in map".format(name)) + +class CephCluster(object): + @property + def admin_remote(self): + first_mon = misc.get_first_mon(self._ctx, None) + (result,) = self._ctx.cluster.only(first_mon).remotes.keys() + return result + + def __init__(self, ctx): + self._ctx = ctx + self.mon_manager = ceph_manager.CephManager(self.admin_remote, ctx=ctx, logger=log.getChild('ceph_manager')) + + def get_config(self, key, service_type=None): + """ + Get config from mon by default, or a specific service if caller asks for it + """ + if service_type is None: + service_type = 'mon' + + service_id = sorted(misc.all_roles_of_type(self._ctx.cluster, service_type))[0] + return self.json_asok(['config', 'get', key], service_type, service_id)[key] + + def set_ceph_conf(self, subsys, key, value): + if subsys not in self._ctx.ceph['ceph'].conf: + self._ctx.ceph['ceph'].conf[subsys] = {} + self._ctx.ceph['ceph'].conf[subsys][key] = value + write_conf(self._ctx) # XXX because we don't have the ceph task's config object, if they + # used a different config path this won't work. + + def clear_ceph_conf(self, subsys, key): + del self._ctx.ceph['ceph'].conf[subsys][key] + write_conf(self._ctx) + + def json_asok(self, command, service_type, service_id, timeout=None): + if timeout is None: + timeout = 15*60 + proc = self.mon_manager.admin_socket(service_type, service_id, command, timeout=timeout) + response_data = proc.stdout.getvalue() + log.info("_json_asok output: {0}".format(response_data)) + if response_data.strip(): + return json.loads(response_data) + else: + return None + + +class MDSCluster(CephCluster): + """ + Collective operations on all the MDS daemons in the Ceph cluster. These + daemons may be in use by various Filesystems. + + For the benefit of pre-multi-filesystem tests, this class is also + a parent of Filesystem. The correct way to use MDSCluster going forward is + as a separate instance outside of your (multiple) Filesystem instances. + """ + def __init__(self, ctx): + super(MDSCluster, self).__init__(ctx) + + self.mds_ids = list(misc.all_roles_of_type(ctx.cluster, 'mds')) + + if len(self.mds_ids) == 0: + raise RuntimeError("This task requires at least one MDS") + + if hasattr(self._ctx, "daemons"): + # Presence of 'daemons' attribute implies ceph task rather than ceph_deploy task + self.mds_daemons = dict([(mds_id, self._ctx.daemons.get_daemon('mds', mds_id)) for mds_id in self.mds_ids]) + + def _one_or_all(self, mds_id, cb, in_parallel=True): + """ + Call a callback for a single named MDS, or for all. + + Note that the parallelism here isn't for performance, it's to avoid being overly kind + to the cluster by waiting a graceful ssh-latency of time between doing things, and to + avoid being overly kind by executing them in a particular order. However, some actions + don't cope with being done in parallel, so it's optional (`in_parallel`) + + :param mds_id: MDS daemon name, or None + :param cb: Callback taking single argument of MDS daemon name + :param in_parallel: whether to invoke callbacks concurrently (else one after the other) + """ + if mds_id is None: + if in_parallel: + with parallel() as p: + for mds_id in self.mds_ids: + p.spawn(cb, mds_id) + else: + for mds_id in self.mds_ids: + cb(mds_id) + else: + cb(mds_id) + + def get_config(self, key, service_type=None): + """ + get_config specialization of service_type="mds" + """ + if service_type != "mds": + return super(MDSCluster, self).get_config(key, service_type) + + # Some tests stop MDS daemons, don't send commands to a dead one: + running_daemons = [i for i, mds in self.mds_daemons.items() if mds.running()] + service_id = random.sample(running_daemons, 1)[0] + return self.json_asok(['config', 'get', key], service_type, service_id)[key] + + def mds_stop(self, mds_id=None): + """ + Stop the MDS daemon process(se). If it held a rank, that rank + will eventually go laggy. + """ + self._one_or_all(mds_id, lambda id_: self.mds_daemons[id_].stop()) + + def mds_fail(self, mds_id=None): + """ + Inform MDSMonitor of the death of the daemon process(es). If it held + a rank, that rank will be relinquished. + """ + self._one_or_all(mds_id, lambda id_: self.mon_manager.raw_cluster_cmd("mds", "fail", id_)) + + def mds_restart(self, mds_id=None): + self._one_or_all(mds_id, lambda id_: self.mds_daemons[id_].restart()) + + def mds_fail_restart(self, mds_id=None): + """ + Variation on restart that includes marking MDSs as failed, so that doing this + operation followed by waiting for healthy daemon states guarantees that they + have gone down and come up, rather than potentially seeing the healthy states + that existed before the restart. + """ + def _fail_restart(id_): + self.mds_daemons[id_].stop() + self.mon_manager.raw_cluster_cmd("mds", "fail", id_) + self.mds_daemons[id_].restart() + + self._one_or_all(mds_id, _fail_restart) + + def mds_signal(self, mds_id, sig, silent=False): + """ + signal a MDS daemon + """ + self.mds_daemons[mds_id].signal(sig, silent); + + def newfs(self, name='cephfs', create=True): + return Filesystem(self._ctx, name=name, create=create) + + def status(self): + return FSStatus(self.mon_manager) + + def delete_all_filesystems(self): + """ + Remove all filesystems that exist, and any pools in use by them. + """ + pools = json.loads(self.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['pools'] + pool_id_name = {} + for pool in pools: + pool_id_name[pool['pool']] = pool['pool_name'] + + # mark cluster down for each fs to prevent churn during deletion + status = self.status() + for fs in status.get_filesystems(): + self.mon_manager.raw_cluster_cmd("fs", "fail", str(fs['mdsmap']['fs_name'])) + + # get a new copy as actives may have since changed + status = self.status() + for fs in status.get_filesystems(): + mdsmap = fs['mdsmap'] + metadata_pool = pool_id_name[mdsmap['metadata_pool']] + + self.mon_manager.raw_cluster_cmd('fs', 'rm', mdsmap['fs_name'], '--yes-i-really-mean-it') + self.mon_manager.raw_cluster_cmd('osd', 'pool', 'delete', + metadata_pool, metadata_pool, + '--yes-i-really-really-mean-it') + for data_pool in mdsmap['data_pools']: + data_pool = pool_id_name[data_pool] + try: + self.mon_manager.raw_cluster_cmd('osd', 'pool', 'delete', + data_pool, data_pool, + '--yes-i-really-really-mean-it') + except CommandFailedError as e: + if e.exitstatus == 16: # EBUSY, this data pool is used + pass # by two metadata pools, let the 2nd + else: # pass delete it + raise + + def get_standby_daemons(self): + return set([s['name'] for s in self.status().get_standbys()]) + + def get_mds_hostnames(self): + result = set() + for mds_id in self.mds_ids: + mds_remote = self.mon_manager.find_remote('mds', mds_id) + result.add(mds_remote.hostname) + + return list(result) + + def set_clients_block(self, blocked, mds_id=None): + """ + Block (using iptables) client communications to this MDS. Be careful: if + other services are running on this MDS, or other MDSs try to talk to this + MDS, their communications may also be blocked as collatoral damage. + + :param mds_id: Optional ID of MDS to block, default to all + :return: + """ + da_flag = "-A" if blocked else "-D" + + def set_block(_mds_id): + remote = self.mon_manager.find_remote('mds', _mds_id) + status = self.status() + + addr = status.get_mds_addr(_mds_id) + ip_str, port_str, inst_str = re.match("(.+):(.+)/(.+)", addr).groups() + + remote.run( + args=["sudo", "iptables", da_flag, "OUTPUT", "-p", "tcp", "--sport", port_str, "-j", "REJECT", "-m", + "comment", "--comment", "teuthology"]) + remote.run( + args=["sudo", "iptables", da_flag, "INPUT", "-p", "tcp", "--dport", port_str, "-j", "REJECT", "-m", + "comment", "--comment", "teuthology"]) + + self._one_or_all(mds_id, set_block, in_parallel=False) + + def clear_firewall(self): + clear_firewall(self._ctx) + + def get_mds_info(self, mds_id): + return FSStatus(self.mon_manager).get_mds(mds_id) + + def is_pool_full(self, pool_name): + pools = json.loads(self.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['pools'] + for pool in pools: + if pool['pool_name'] == pool_name: + return 'full' in pool['flags_names'].split(",") + + raise RuntimeError("Pool not found '{0}'".format(pool_name)) + +class Filesystem(MDSCluster): + """ + This object is for driving a CephFS filesystem. The MDS daemons driven by + MDSCluster may be shared with other Filesystems. + """ + def __init__(self, ctx, fs_config=None, fscid=None, name=None, create=False, + ec_profile=None): + super(Filesystem, self).__init__(ctx) + + self.name = name + self.ec_profile = ec_profile + self.id = None + self.metadata_pool_name = None + self.metadata_overlay = False + self.data_pool_name = None + self.data_pools = None + self.fs_config = fs_config + + client_list = list(misc.all_roles_of_type(self._ctx.cluster, 'client')) + self.client_id = client_list[0] + self.client_remote = list(misc.get_clients(ctx=ctx, roles=["client.{0}".format(self.client_id)]))[0][1] + + if name is not None: + if fscid is not None: + raise RuntimeError("cannot specify fscid when creating fs") + if create and not self.legacy_configured(): + self.create() + else: + if fscid is not None: + self.id = fscid + self.getinfo(refresh = True) + + # Stash a reference to the first created filesystem on ctx, so + # that if someone drops to the interactive shell they can easily + # poke our methods. + if not hasattr(self._ctx, "filesystem"): + self._ctx.filesystem = self + + def get_task_status(self, status_key): + return self.mon_manager.get_service_task_status("mds", status_key) + + def getinfo(self, refresh = False): + status = self.status() + if self.id is not None: + fsmap = status.get_fsmap(self.id) + elif self.name is not None: + fsmap = status.get_fsmap_byname(self.name) + else: + fss = [fs for fs in status.get_filesystems()] + if len(fss) == 1: + fsmap = fss[0] + elif len(fss) == 0: + raise RuntimeError("no file system available") + else: + raise RuntimeError("more than one file system available") + self.id = fsmap['id'] + self.name = fsmap['mdsmap']['fs_name'] + self.get_pool_names(status = status, refresh = refresh) + return status + + def set_metadata_overlay(self, overlay): + if self.id is not None: + raise RuntimeError("cannot specify fscid when configuring overlay") + self.metadata_overlay = overlay + + def deactivate(self, rank): + if rank < 0: + raise RuntimeError("invalid rank") + elif rank == 0: + raise RuntimeError("cannot deactivate rank 0") + self.mon_manager.raw_cluster_cmd("mds", "deactivate", "%d:%d" % (self.id, rank)) + + def reach_max_mds(self): + # Try to reach rank count == max_mds, up or down (UPGRADE SENSITIVE!) + status = self.getinfo() + mds_map = self.get_mds_map(status=status) + max_mds = mds_map['max_mds'] + + count = len(list(self.get_ranks(status=status))) + if count > max_mds: + try: + # deactivate mds in decending order + status = self.wait_for_daemons(status=status, skip_max_mds_check=True) + while count > max_mds: + targets = sorted(self.get_ranks(status=status), key=lambda r: r['rank'], reverse=True) + target = targets[0] + log.debug("deactivating rank %d" % target['rank']) + self.deactivate(target['rank']) + status = self.wait_for_daemons(skip_max_mds_check=True) + count = len(list(self.get_ranks(status=status))) + except: + # In Mimic, deactivation is done automatically: + log.info("Error:\n{}".format(traceback.format_exc())) + status = self.wait_for_daemons() + else: + status = self.wait_for_daemons() + + mds_map = self.get_mds_map(status=status) + assert(mds_map['max_mds'] == max_mds) + assert(mds_map['in'] == list(range(0, max_mds))) + + def fail(self): + self.mon_manager.raw_cluster_cmd("fs", "fail", str(self.name)) + + def set_var(self, var, *args): + a = map(str, args) + self.mon_manager.raw_cluster_cmd("fs", "set", self.name, var, *a) + + def set_down(self, down=True): + self.set_var("down", str(down).lower()) + + def set_joinable(self, joinable=True): + self.set_var("joinable", str(joinable).lower()) + + def set_max_mds(self, max_mds): + self.set_var("max_mds", "%d" % max_mds) + + def set_session_timeout(self, timeout): + self.set_var("session_timeout", "%d" % timeout) + + def set_allow_standby_replay(self, yes): + self.set_var("allow_standby_replay", str(yes).lower()) + + def set_allow_new_snaps(self, yes): + self.set_var("allow_new_snaps", str(yes).lower(), '--yes-i-really-mean-it') + + # In Octopus+, the PG count can be omitted to use the default. We keep the + # hard-coded value for deployments of Mimic/Nautilus. + pgs_per_fs_pool = 8 + + def create(self): + if self.name is None: + self.name = "cephfs" + if self.metadata_pool_name is None: + self.metadata_pool_name = "{0}_metadata".format(self.name) + if self.data_pool_name is None: + data_pool_name = "{0}_data".format(self.name) + else: + data_pool_name = self.data_pool_name + + log.debug("Creating filesystem '{0}'".format(self.name)) + + self.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', + self.metadata_pool_name, self.pgs_per_fs_pool.__str__()) + if self.metadata_overlay: + self.mon_manager.raw_cluster_cmd('fs', 'new', + self.name, self.metadata_pool_name, data_pool_name, + '--allow-dangerous-metadata-overlay') + else: + if self.ec_profile and 'disabled' not in self.ec_profile: + log.debug("EC profile is %s", self.ec_profile) + cmd = ['osd', 'erasure-code-profile', 'set', data_pool_name] + cmd.extend(self.ec_profile) + self.mon_manager.raw_cluster_cmd(*cmd) + self.mon_manager.raw_cluster_cmd( + 'osd', 'pool', 'create', + data_pool_name, self.pgs_per_fs_pool.__str__(), 'erasure', + data_pool_name) + self.mon_manager.raw_cluster_cmd( + 'osd', 'pool', 'set', + data_pool_name, 'allow_ec_overwrites', 'true') + else: + self.mon_manager.raw_cluster_cmd( + 'osd', 'pool', 'create', + data_pool_name, self.pgs_per_fs_pool.__str__()) + self.mon_manager.raw_cluster_cmd('fs', 'new', + self.name, + self.metadata_pool_name, + data_pool_name, + "--force") + self.check_pool_application(self.metadata_pool_name) + self.check_pool_application(data_pool_name) + # Turn off spurious standby count warnings from modifying max_mds in tests. + try: + self.mon_manager.raw_cluster_cmd('fs', 'set', self.name, 'standby_count_wanted', '0') + except CommandFailedError as e: + if e.exitstatus == 22: + # standby_count_wanted not available prior to luminous (upgrade tests would fail otherwise) + pass + else: + raise + + if self.fs_config is not None: + max_mds = self.fs_config.get('max_mds', 1) + if max_mds > 1: + self.set_max_mds(max_mds) + + # If absent will use the default value (60 seconds) + session_timeout = self.fs_config.get('session_timeout', 60) + if session_timeout != 60: + self.set_session_timeout(session_timeout) + + self.getinfo(refresh = True) + + + def check_pool_application(self, pool_name): + osd_map = self.mon_manager.get_osd_dump_json() + for pool in osd_map['pools']: + if pool['pool_name'] == pool_name: + if "application_metadata" in pool: + if not "cephfs" in pool['application_metadata']: + raise RuntimeError("Pool %p does not name cephfs as application!".\ + format(pool_name)) + + + def __del__(self): + if getattr(self._ctx, "filesystem", None) == self: + delattr(self._ctx, "filesystem") + + def exists(self): + """ + Whether a filesystem exists in the mon's filesystem list + """ + fs_list = json.loads(self.mon_manager.raw_cluster_cmd('fs', 'ls', '--format=json-pretty')) + return self.name in [fs['name'] for fs in fs_list] + + def legacy_configured(self): + """ + Check if a legacy (i.e. pre "fs new") filesystem configuration is present. If this is + the case, the caller should avoid using Filesystem.create + """ + try: + out_text = self.mon_manager.raw_cluster_cmd('--format=json-pretty', 'osd', 'lspools') + pools = json.loads(out_text) + metadata_pool_exists = 'metadata' in [p['poolname'] for p in pools] + if metadata_pool_exists: + self.metadata_pool_name = 'metadata' + except CommandFailedError as e: + # For use in upgrade tests, Ceph cuttlefish and earlier don't support + # structured output (--format) from the CLI. + if e.exitstatus == 22: + metadata_pool_exists = True + else: + raise + + return metadata_pool_exists + + def _df(self): + return json.loads(self.mon_manager.raw_cluster_cmd("df", "--format=json-pretty")) + + def get_mds_map(self, status=None): + if status is None: + status = self.status() + return status.get_fsmap(self.id)['mdsmap'] + + def get_var(self, var, status=None): + return self.get_mds_map(status=status)[var] + + def set_dir_layout(self, mount, path, layout): + for name, value in layout.items(): + mount.run_shell(args=["setfattr", "-n", "ceph.dir.layout."+name, "-v", str(value), path]) + + def add_data_pool(self, name, create=True): + if create: + self.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', name, self.pgs_per_fs_pool.__str__()) + self.mon_manager.raw_cluster_cmd('fs', 'add_data_pool', self.name, name) + self.get_pool_names(refresh = True) + for poolid, fs_name in self.data_pools.items(): + if name == fs_name: + return poolid + raise RuntimeError("could not get just created pool '{0}'".format(name)) + + def get_pool_names(self, refresh = False, status = None): + if refresh or self.metadata_pool_name is None or self.data_pools is None: + if status is None: + status = self.status() + fsmap = status.get_fsmap(self.id) + + osd_map = self.mon_manager.get_osd_dump_json() + id_to_name = {} + for p in osd_map['pools']: + id_to_name[p['pool']] = p['pool_name'] + + self.metadata_pool_name = id_to_name[fsmap['mdsmap']['metadata_pool']] + self.data_pools = {} + for data_pool in fsmap['mdsmap']['data_pools']: + self.data_pools[data_pool] = id_to_name[data_pool] + + def get_data_pool_name(self, refresh = False): + if refresh or self.data_pools is None: + self.get_pool_names(refresh = True) + assert(len(self.data_pools) == 1) + return next(iter(self.data_pools.values())) + + def get_data_pool_id(self, refresh = False): + """ + Don't call this if you have multiple data pools + :return: integer + """ + if refresh or self.data_pools is None: + self.get_pool_names(refresh = True) + assert(len(self.data_pools) == 1) + return next(iter(self.data_pools.keys())) + + def get_data_pool_names(self, refresh = False): + if refresh or self.data_pools is None: + self.get_pool_names(refresh = True) + return list(self.data_pools.values()) + + def get_metadata_pool_name(self): + return self.metadata_pool_name + + def set_data_pool_name(self, name): + if self.id is not None: + raise RuntimeError("can't set filesystem name if its fscid is set") + self.data_pool_name = name + + def get_namespace_id(self): + return self.id + + def get_pool_df(self, pool_name): + """ + Return a dict like: + {u'bytes_used': 0, u'max_avail': 83848701, u'objects': 0, u'kb_used': 0} + """ + for pool_df in self._df()['pools']: + if pool_df['name'] == pool_name: + return pool_df['stats'] + + raise RuntimeError("Pool name '{0}' not found".format(pool_name)) + + def get_usage(self): + return self._df()['stats']['total_used_bytes'] + + def are_daemons_healthy(self, status=None, skip_max_mds_check=False): + """ + Return true if all daemons are in one of active, standby, standby-replay, and + at least max_mds daemons are in 'active'. + + Unlike most of Filesystem, this function is tolerant of new-style `fs` + commands being missing, because we are part of the ceph installation + process during upgrade suites, so must fall back to old style commands + when we get an EINVAL on a new style command. + + :return: + """ + # First, check to see that processes haven't exited with an error code + for mds in self._ctx.daemons.iter_daemons_of_role('mds'): + mds.check_status() + + active_count = 0 + try: + mds_map = self.get_mds_map(status=status) + except CommandFailedError as cfe: + # Old version, fall back to non-multi-fs commands + if cfe.exitstatus == errno.EINVAL: + mds_map = json.loads( + self.mon_manager.raw_cluster_cmd('mds', 'dump', '--format=json')) + else: + raise + + log.debug("are_daemons_healthy: mds map: {0}".format(mds_map)) + + for mds_id, mds_status in mds_map['info'].items(): + if mds_status['state'] not in ["up:active", "up:standby", "up:standby-replay"]: + log.warning("Unhealthy mds state {0}:{1}".format(mds_id, mds_status['state'])) + return False + elif mds_status['state'] == 'up:active': + active_count += 1 + + log.debug("are_daemons_healthy: {0}/{1}".format( + active_count, mds_map['max_mds'] + )) + + if not skip_max_mds_check: + if active_count > mds_map['max_mds']: + log.debug("are_daemons_healthy: number of actives is greater than max_mds: {0}".format(mds_map)) + return False + elif active_count == mds_map['max_mds']: + # The MDSMap says these guys are active, but let's check they really are + for mds_id, mds_status in mds_map['info'].items(): + if mds_status['state'] == 'up:active': + try: + daemon_status = self.mds_asok(["status"], mds_id=mds_status['name']) + except CommandFailedError as cfe: + if cfe.exitstatus == errno.EINVAL: + # Old version, can't do this check + continue + else: + # MDS not even running + return False + + if daemon_status['state'] != 'up:active': + # MDS hasn't taken the latest map yet + return False + + return True + else: + return False + else: + log.debug("are_daemons_healthy: skipping max_mds check") + return True + + def get_daemon_names(self, state=None, status=None): + """ + Return MDS daemon names of those daemons in the given state + :param state: + :return: + """ + mdsmap = self.get_mds_map(status) + result = [] + for mds_status in sorted(mdsmap['info'].values(), + key=lambda _: _['rank']): + if mds_status['state'] == state or state is None: + result.append(mds_status['name']) + + return result + + def get_active_names(self): + """ + Return MDS daemon names of those daemons holding ranks + in state up:active + + :return: list of strings like ['a', 'b'], sorted by rank + """ + return self.get_daemon_names("up:active") + + def get_all_mds_rank(self, status=None): + mdsmap = self.get_mds_map(status) + result = [] + for mds_status in sorted(mdsmap['info'].values(), + key=lambda _: _['rank']): + if mds_status['rank'] != -1 and mds_status['state'] != 'up:standby-replay': + result.append(mds_status['rank']) + + return result + + def get_rank(self, rank=0, status=None): + if status is None: + status = self.getinfo() + return status.get_rank(self.id, rank) + + def rank_restart(self, rank=0, status=None): + name = self.get_rank(rank=rank, status=status)['name'] + self.mds_restart(mds_id=name) + + def rank_signal(self, signal, rank=0, status=None): + name = self.get_rank(rank=rank, status=status)['name'] + self.mds_signal(name, signal) + + def rank_freeze(self, yes, rank=0): + self.mon_manager.raw_cluster_cmd("mds", "freeze", "{}:{}".format(self.id, rank), str(yes).lower()) + + def rank_fail(self, rank=0): + self.mon_manager.raw_cluster_cmd("mds", "fail", "{}:{}".format(self.id, rank)) + + def get_ranks(self, status=None): + if status is None: + status = self.getinfo() + return status.get_ranks(self.id) + + def get_replays(self, status=None): + if status is None: + status = self.getinfo() + return status.get_replays(self.id) + + def get_replay(self, rank=0, status=None): + for replay in self.get_replays(status=status): + if replay['rank'] == rank: + return replay + return None + + def get_rank_names(self, status=None): + """ + Return MDS daemon names of those daemons holding a rank, + sorted by rank. This includes e.g. up:replay/reconnect + as well as active, but does not include standby or + standby-replay. + """ + mdsmap = self.get_mds_map(status) + result = [] + for mds_status in sorted(mdsmap['info'].values(), + key=lambda _: _['rank']): + if mds_status['rank'] != -1 and mds_status['state'] != 'up:standby-replay': + result.append(mds_status['name']) + + return result + + def wait_for_daemons(self, timeout=None, skip_max_mds_check=False, status=None): + """ + Wait until all daemons are healthy + :return: + """ + + if timeout is None: + timeout = DAEMON_WAIT_TIMEOUT + + if status is None: + status = self.status() + + elapsed = 0 + while True: + if self.are_daemons_healthy(status=status, skip_max_mds_check=skip_max_mds_check): + return status + else: + time.sleep(1) + elapsed += 1 + + if elapsed > timeout: + log.debug("status = {0}".format(status)) + raise RuntimeError("Timed out waiting for MDS daemons to become healthy") + + status = self.status() + + def get_lone_mds_id(self): + """ + Get a single MDS ID: the only one if there is only one + configured, else the only one currently holding a rank, + else raise an error. + """ + if len(self.mds_ids) != 1: + alive = self.get_rank_names() + if len(alive) == 1: + return alive[0] + else: + raise ValueError("Explicit MDS argument required when multiple MDSs in use") + else: + return self.mds_ids[0] + + def recreate(self): + log.info("Creating new filesystem") + self.delete_all_filesystems() + self.id = None + self.create() + + def put_metadata_object_raw(self, object_id, infile): + """ + Save an object to the metadata pool + """ + temp_bin_path = infile + self.client_remote.run(args=[ + 'sudo', os.path.join(self._prefix, 'rados'), '-p', self.metadata_pool_name, 'put', object_id, temp_bin_path + ]) + + def get_metadata_object_raw(self, object_id): + """ + Retrieve an object from the metadata pool and store it in a file. + """ + temp_bin_path = '/tmp/' + object_id + '.bin' + + self.client_remote.run(args=[ + 'sudo', os.path.join(self._prefix, 'rados'), '-p', self.metadata_pool_name, 'get', object_id, temp_bin_path + ]) + + return temp_bin_path + + def get_metadata_object(self, object_type, object_id): + """ + Retrieve an object from the metadata pool, pass it through + ceph-dencoder to dump it to JSON, and return the decoded object. + """ + temp_bin_path = '/tmp/out.bin' + + self.client_remote.run(args=[ + 'sudo', os.path.join(self._prefix, 'rados'), '-p', self.metadata_pool_name, 'get', object_id, temp_bin_path + ]) + + dump_json = self.client_remote.sh([ + 'sudo', os.path.join(self._prefix, 'ceph-dencoder'), 'type', object_type, 'import', temp_bin_path, 'decode', 'dump_json' + ]).strip() + try: + dump = json.loads(dump_json) + except (TypeError, ValueError): + log.error("Failed to decode JSON: '{0}'".format(dump_json)) + raise + + return dump + + def get_journal_version(self): + """ + Read the JournalPointer and Journal::Header objects to learn the version of + encoding in use. + """ + journal_pointer_object = '400.00000000' + journal_pointer_dump = self.get_metadata_object("JournalPointer", journal_pointer_object) + journal_ino = journal_pointer_dump['journal_pointer']['front'] + + journal_header_object = "{0:x}.00000000".format(journal_ino) + journal_header_dump = self.get_metadata_object('Journaler::Header', journal_header_object) + + version = journal_header_dump['journal_header']['stream_format'] + log.debug("Read journal version {0}".format(version)) + + return version + + def mds_asok(self, command, mds_id=None, timeout=None): + if mds_id is None: + mds_id = self.get_lone_mds_id() + + return self.json_asok(command, 'mds', mds_id, timeout=timeout) + + def rank_asok(self, command, rank=0, status=None, timeout=None): + info = self.get_rank(rank=rank, status=status) + return self.json_asok(command, 'mds', info['name'], timeout=timeout) + + def rank_tell(self, command, rank=0, status=None): + info = self.get_rank(rank=rank, status=status) + return json.loads(self.mon_manager.raw_cluster_cmd("tell", 'mds.{0}'.format(info['name']), *command)) + + def read_cache(self, path, depth=None): + cmd = ["dump", "tree", path] + if depth is not None: + cmd.append(depth.__str__()) + result = self.mds_asok(cmd) + if len(result) == 0: + raise RuntimeError("Path not found in cache: {0}".format(path)) + + return result + + def wait_for_state(self, goal_state, reject=None, timeout=None, mds_id=None, rank=None): + """ + Block until the MDS reaches a particular state, or a failure condition + is met. + + When there are multiple MDSs, succeed when exaclty one MDS is in the + goal state, or fail when any MDS is in the reject state. + + :param goal_state: Return once the MDS is in this state + :param reject: Fail if the MDS enters this state before the goal state + :param timeout: Fail if this many seconds pass before reaching goal + :return: number of seconds waited, rounded down to integer + """ + + started_at = time.time() + while True: + status = self.status() + if rank is not None: + try: + mds_info = status.get_rank(self.id, rank) + current_state = mds_info['state'] if mds_info else None + log.debug("Looked up MDS state for mds.{0}: {1}".format(rank, current_state)) + except: + mdsmap = self.get_mds_map(status=status) + if rank in mdsmap['failed']: + log.debug("Waiting for rank {0} to come back.".format(rank)) + current_state = None + else: + raise + elif mds_id is not None: + # mds_info is None if no daemon with this ID exists in the map + mds_info = status.get_mds(mds_id) + current_state = mds_info['state'] if mds_info else None + log.debug("Looked up MDS state for {0}: {1}".format(mds_id, current_state)) + else: + # In general, look for a single MDS + states = [m['state'] for m in status.get_ranks(self.id)] + if [s for s in states if s == goal_state] == [goal_state]: + current_state = goal_state + elif reject in states: + current_state = reject + else: + current_state = None + log.debug("mapped states {0} to {1}".format(states, current_state)) + + elapsed = time.time() - started_at + if current_state == goal_state: + log.debug("reached state '{0}' in {1}s".format(current_state, elapsed)) + return elapsed + elif reject is not None and current_state == reject: + raise RuntimeError("MDS in reject state {0}".format(current_state)) + elif timeout is not None and elapsed > timeout: + log.error("MDS status at timeout: {0}".format(status.get_fsmap(self.id))) + raise RuntimeError( + "Reached timeout after {0} seconds waiting for state {1}, while in state {2}".format( + elapsed, goal_state, current_state + )) + else: + time.sleep(1) + + def _read_data_xattr(self, ino_no, xattr_name, type, pool): + mds_id = self.mds_ids[0] + remote = self.mds_daemons[mds_id].remote + if pool is None: + pool = self.get_data_pool_name() + + obj_name = "{0:x}.00000000".format(ino_no) + + args = [ + os.path.join(self._prefix, "rados"), "-p", pool, "getxattr", obj_name, xattr_name + ] + try: + proc = remote.run(args=args, stdout=BytesIO()) + except CommandFailedError as e: + log.error(e.__str__()) + raise ObjectNotFound(obj_name) + + data = proc.stdout.getvalue() + dump = remote.sh( + [os.path.join(self._prefix, "ceph-dencoder"), + "type", type, + "import", "-", + "decode", "dump_json"], + stdin=data, + stdout=StringIO() + ) + + return json.loads(dump.strip()) + + def _write_data_xattr(self, ino_no, xattr_name, data, pool=None): + """ + Write to an xattr of the 0th data object of an inode. Will + succeed whether the object and/or xattr already exist or not. + + :param ino_no: integer inode number + :param xattr_name: string name of the xattr + :param data: byte array data to write to the xattr + :param pool: name of data pool or None to use primary data pool + :return: None + """ + remote = self.mds_daemons[self.mds_ids[0]].remote + if pool is None: + pool = self.get_data_pool_name() + + obj_name = "{0:x}.00000000".format(ino_no) + args = [ + os.path.join(self._prefix, "rados"), "-p", pool, "setxattr", + obj_name, xattr_name, data + ] + remote.sh(args) + + def read_backtrace(self, ino_no, pool=None): + """ + Read the backtrace from the data pool, return a dict in the format + given by inode_backtrace_t::dump, which is something like: + + :: + + rados -p cephfs_data getxattr 10000000002.00000000 parent > out.bin + ceph-dencoder type inode_backtrace_t import out.bin decode dump_json + + { "ino": 1099511627778, + "ancestors": [ + { "dirino": 1, + "dname": "blah", + "version": 11}], + "pool": 1, + "old_pools": []} + + :param pool: name of pool to read backtrace from. If omitted, FS must have only + one data pool and that will be used. + """ + return self._read_data_xattr(ino_no, "parent", "inode_backtrace_t", pool) + + def read_layout(self, ino_no, pool=None): + """ + Read 'layout' xattr of an inode and parse the result, returning a dict like: + :: + { + "stripe_unit": 4194304, + "stripe_count": 1, + "object_size": 4194304, + "pool_id": 1, + "pool_ns": "", + } + + :param pool: name of pool to read backtrace from. If omitted, FS must have only + one data pool and that will be used. + """ + return self._read_data_xattr(ino_no, "layout", "file_layout_t", pool) + + def _enumerate_data_objects(self, ino, size): + """ + Get the list of expected data objects for a range, and the list of objects + that really exist. + + :return a tuple of two lists of strings (expected, actual) + """ + stripe_size = 1024 * 1024 * 4 + + size = max(stripe_size, size) + + want_objects = [ + "{0:x}.{1:08x}".format(ino, n) + for n in range(0, ((size - 1) // stripe_size) + 1) + ] + + exist_objects = self.rados(["ls"], pool=self.get_data_pool_name()).split("\n") + + return want_objects, exist_objects + + def data_objects_present(self, ino, size): + """ + Check that *all* the expected data objects for an inode are present in the data pool + """ + + want_objects, exist_objects = self._enumerate_data_objects(ino, size) + missing = set(want_objects) - set(exist_objects) + + if missing: + log.debug("Objects missing (ino {0}, size {1}): {2}".format( + ino, size, missing + )) + return False + else: + log.debug("All objects for ino {0} size {1} found".format(ino, size)) + return True + + def data_objects_absent(self, ino, size): + want_objects, exist_objects = self._enumerate_data_objects(ino, size) + present = set(want_objects) & set(exist_objects) + + if present: + log.debug("Objects not absent (ino {0}, size {1}): {2}".format( + ino, size, present + )) + return False + else: + log.debug("All objects for ino {0} size {1} are absent".format(ino, size)) + return True + + def dirfrag_exists(self, ino, frag): + try: + self.rados(["stat", "{0:x}.{1:08x}".format(ino, frag)]) + except CommandFailedError: + return False + else: + return True + + def rados(self, args, pool=None, namespace=None, stdin_data=None, + stdin_file=None, + stdout_data=None): + """ + Call into the `rados` CLI from an MDS + """ + + if pool is None: + pool = self.get_metadata_pool_name() + + # Doesn't matter which MDS we use to run rados commands, they all + # have access to the pools + mds_id = self.mds_ids[0] + remote = self.mds_daemons[mds_id].remote + + # NB we could alternatively use librados pybindings for this, but it's a one-liner + # using the `rados` CLI + args = ([os.path.join(self._prefix, "rados"), "-p", pool] + + (["--namespace", namespace] if namespace else []) + + args) + + if stdin_file is not None: + args = ["bash", "-c", "cat " + stdin_file + " | " + " ".join(args)] + if stdout_data is None: + stdout_data = StringIO() + + p = remote.run(args=args, + stdin=stdin_data, + stdout=stdout_data) + return p.stdout.getvalue().strip() + + def list_dirfrag(self, dir_ino): + """ + Read the named object and return the list of omap keys + + :return a list of 0 or more strings + """ + + dirfrag_obj_name = "{0:x}.00000000".format(dir_ino) + + try: + key_list_str = self.rados(["listomapkeys", dirfrag_obj_name]) + except CommandFailedError as e: + log.error(e.__str__()) + raise ObjectNotFound(dirfrag_obj_name) + + return key_list_str.split("\n") if key_list_str else [] + + def erase_metadata_objects(self, prefix): + """ + For all objects in the metadata pool matching the prefix, + erase them. + + This O(N) with the number of objects in the pool, so only suitable + for use on toy test filesystems. + """ + all_objects = self.rados(["ls"]).split("\n") + matching_objects = [o for o in all_objects if o.startswith(prefix)] + for o in matching_objects: + self.rados(["rm", o]) + + def erase_mds_objects(self, rank): + """ + Erase all the per-MDS objects for a particular rank. This includes + inotable, sessiontable, journal + """ + + def obj_prefix(multiplier): + """ + MDS object naming conventions like rank 1's + journal is at 201.*** + """ + return "%x." % (multiplier * 0x100 + rank) + + # MDS_INO_LOG_OFFSET + self.erase_metadata_objects(obj_prefix(2)) + # MDS_INO_LOG_BACKUP_OFFSET + self.erase_metadata_objects(obj_prefix(3)) + # MDS_INO_LOG_POINTER_OFFSET + self.erase_metadata_objects(obj_prefix(4)) + # MDSTables & SessionMap + self.erase_metadata_objects("mds{rank:d}_".format(rank=rank)) + + @property + def _prefix(self): + """ + Override this to set a different + """ + return "" + + def _make_rank(self, rank): + return "{}:{}".format(self.name, rank) + + def _run_tool(self, tool, args, rank=None, quiet=False): + # Tests frequently have [client] configuration that jacks up + # the objecter log level (unlikely to be interesting here) + # and does not set the mds log level (very interesting here) + if quiet: + base_args = [os.path.join(self._prefix, tool), '--debug-mds=1', '--debug-objecter=1'] + else: + base_args = [os.path.join(self._prefix, tool), '--debug-mds=4', '--debug-objecter=1'] + + if rank is not None: + base_args.extend(["--rank", "%s" % str(rank)]) + + t1 = datetime.datetime.now() + r = self.tool_remote.sh(script=base_args + args, stdout=StringIO()).strip() + duration = datetime.datetime.now() - t1 + log.debug("Ran {0} in time {1}, result:\n{2}".format( + base_args + args, duration, r + )) + return r + + @property + def tool_remote(self): + """ + An arbitrary remote to use when invoking recovery tools. Use an MDS host because + it'll definitely have keys with perms to access cephfs metadata pool. This is public + so that tests can use this remote to go get locally written output files from the tools. + """ + mds_id = self.mds_ids[0] + return self.mds_daemons[mds_id].remote + + def journal_tool(self, args, rank, quiet=False): + """ + Invoke cephfs-journal-tool with the passed arguments for a rank, and return its stdout + """ + fs_rank = self._make_rank(rank) + return self._run_tool("cephfs-journal-tool", args, fs_rank, quiet) + + def table_tool(self, args, quiet=False): + """ + Invoke cephfs-table-tool with the passed arguments, and return its stdout + """ + return self._run_tool("cephfs-table-tool", args, None, quiet) + + def data_scan(self, args, quiet=False, worker_count=1): + """ + Invoke cephfs-data-scan with the passed arguments, and return its stdout + + :param worker_count: if greater than 1, multiple workers will be run + in parallel and the return value will be None + """ + + workers = [] + + for n in range(0, worker_count): + if worker_count > 1: + # data-scan args first token is a command, followed by args to it. + # insert worker arguments after the command. + cmd = args[0] + worker_args = [cmd] + ["--worker_n", n.__str__(), "--worker_m", worker_count.__str__()] + args[1:] + else: + worker_args = args + + workers.append(Greenlet.spawn(lambda wargs=worker_args: + self._run_tool("cephfs-data-scan", wargs, None, quiet))) + + for w in workers: + w.get() + + if worker_count == 1: + return workers[0].value + else: + return None + + def is_full(self): + return self.is_pool_full(self.get_data_pool_name()) diff --git a/qa/tasks/cephfs/fuse_mount.py b/qa/tasks/cephfs/fuse_mount.py new file mode 100644 index 00000000..664de4f4 --- /dev/null +++ b/qa/tasks/cephfs/fuse_mount.py @@ -0,0 +1,502 @@ +from io import StringIO +import json +import time +import logging + +import six + +from textwrap import dedent + +from teuthology import misc +from teuthology.contextutil import MaxWhileTries +from teuthology.orchestra import run +from teuthology.orchestra.run import CommandFailedError +from tasks.cephfs.mount import CephFSMount + +log = logging.getLogger(__name__) + + +class FuseMount(CephFSMount): + def __init__(self, ctx, client_config, test_dir, client_id, client_remote): + super(FuseMount, self).__init__(ctx, test_dir, client_id, client_remote) + + self.client_config = client_config if client_config else {} + self.fuse_daemon = None + self._fuse_conn = None + self.id = None + self.inst = None + self.addr = None + + def mount(self, mount_path=None, mount_fs_name=None, mountpoint=None): + if mountpoint is not None: + self.mountpoint = mountpoint + self.setupfs(name=mount_fs_name) + + try: + return self._mount(mount_path, mount_fs_name) + except RuntimeError: + # Catch exceptions by the mount() logic (i.e. not remote command + # failures) and ensure the mount is not left half-up. + # Otherwise we might leave a zombie mount point that causes + # anyone traversing cephtest/ to get hung up on. + log.warning("Trying to clean up after failed mount") + self.umount_wait(force=True) + raise + + def _mount(self, mount_path, mount_fs_name): + log.info("Client client.%s config is %s" % (self.client_id, self.client_config)) + + daemon_signal = 'kill' + if self.client_config.get('coverage') or self.client_config.get('valgrind') is not None: + daemon_signal = 'term' + + log.info('Mounting ceph-fuse client.{id} at {remote} {mnt}...'.format( + id=self.client_id, remote=self.client_remote, mnt=self.mountpoint)) + + self.client_remote.run(args=['mkdir', '-p', self.mountpoint], + timeout=(15*60), cwd=self.test_dir) + + run_cmd = [ + 'sudo', + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=self.test_dir), + 'daemon-helper', + daemon_signal, + ] + + fuse_cmd = ['ceph-fuse', "-f"] + + if mount_path is not None: + fuse_cmd += ["--client_mountpoint={0}".format(mount_path)] + + if mount_fs_name is not None: + fuse_cmd += ["--client_mds_namespace={0}".format(mount_fs_name)] + + fuse_cmd += [ + '--name', 'client.{id}'.format(id=self.client_id), + # TODO ceph-fuse doesn't understand dash dash '--', + self.mountpoint, + ] + + cwd = self.test_dir + if self.client_config.get('valgrind') is not None: + run_cmd = misc.get_valgrind_args( + self.test_dir, + 'client.{id}'.format(id=self.client_id), + run_cmd, + self.client_config.get('valgrind'), + ) + cwd = None # misc.get_valgrind_args chdir for us + + run_cmd.extend(fuse_cmd) + + def list_connections(): + from teuthology.misc import get_system_type + + conn_dir = "/sys/fs/fuse/connections" + + self.client_remote.run(args=['sudo', 'modprobe', 'fuse'], + check_status=False) + self.client_remote.run( + args=["sudo", "mount", "-t", "fusectl", conn_dir, conn_dir], + check_status=False, timeout=(30)) + + try: + ls_str = self.client_remote.sh("ls " + conn_dir, + stdout=StringIO(), + timeout=(15*60)).strip() + except CommandFailedError: + return [] + + if ls_str: + return [int(n) for n in ls_str.split("\n")] + else: + return [] + + # Before starting ceph-fuse process, note the contents of + # /sys/fs/fuse/connections + pre_mount_conns = list_connections() + log.info("Pre-mount connections: {0}".format(pre_mount_conns)) + + proc = self.client_remote.run( + args=run_cmd, + cwd=cwd, + logger=log.getChild('ceph-fuse.{id}'.format(id=self.client_id)), + stdin=run.PIPE, + wait=False, + ) + self.fuse_daemon = proc + + # Wait for the connection reference to appear in /sys + mount_wait = self.client_config.get('mount_wait', 0) + if mount_wait > 0: + log.info("Fuse mount waits {0} seconds before checking /sys/".format(mount_wait)) + time.sleep(mount_wait) + timeout = int(self.client_config.get('mount_timeout', 30)) + waited = 0 + + post_mount_conns = list_connections() + while len(post_mount_conns) <= len(pre_mount_conns): + if self.fuse_daemon.finished: + # Did mount fail? Raise the CommandFailedError instead of + # hitting the "failed to populate /sys/" timeout + self.fuse_daemon.wait() + time.sleep(1) + waited += 1 + if waited > timeout: + raise RuntimeError("Fuse mount failed to populate /sys/ after {0} seconds".format( + waited + )) + else: + post_mount_conns = list_connections() + + log.info("Post-mount connections: {0}".format(post_mount_conns)) + + # Record our fuse connection number so that we can use it when + # forcing an unmount + new_conns = list(set(post_mount_conns) - set(pre_mount_conns)) + if len(new_conns) == 0: + raise RuntimeError("New fuse connection directory not found ({0})".format(new_conns)) + elif len(new_conns) > 1: + raise RuntimeError("Unexpectedly numerous fuse connections {0}".format(new_conns)) + else: + self._fuse_conn = new_conns[0] + + self.gather_mount_info() + + def gather_mount_info(self): + status = self.admin_socket(['status']) + self.id = status['id'] + self.client_pid = status['metadata']['pid'] + try: + self.inst = status['inst_str'] + self.addr = status['addr_str'] + except KeyError: + sessions = self.fs.rank_asok(['session', 'ls']) + for s in sessions: + if s['id'] == self.id: + self.inst = s['inst'] + self.addr = self.inst.split()[1] + if self.inst is None: + raise RuntimeError("cannot find client session") + + def is_mounted(self): + proc = self.client_remote.run( + args=[ + 'stat', + '--file-system', + '--printf=%T\n', + '--', + self.mountpoint, + ], + cwd=self.test_dir, + stdout=StringIO(), + stderr=StringIO(), + wait=False, + timeout=(15*60) + ) + try: + proc.wait() + except CommandFailedError: + error = proc.stderr.getvalue() + if ("endpoint is not connected" in error + or "Software caused connection abort" in error): + # This happens is fuse is killed without unmount + log.warning("Found stale moutn point at {0}".format(self.mountpoint)) + return True + else: + # This happens if the mount directory doesn't exist + log.info('mount point does not exist: %s', self.mountpoint) + return False + + fstype = six.ensure_str(proc.stdout.getvalue()).rstrip('\n') + if fstype == 'fuseblk': + log.info('ceph-fuse is mounted on %s', self.mountpoint) + return True + else: + log.debug('ceph-fuse not mounted, got fs type {fstype!r}'.format( + fstype=fstype)) + return False + + def wait_until_mounted(self): + """ + Check to make sure that fuse is mounted on mountpoint. If not, + sleep for 5 seconds and check again. + """ + + while not self.is_mounted(): + # Even if it's not mounted, it should at least + # be running: catch simple failures where it has terminated. + assert not self.fuse_daemon.poll() + + time.sleep(5) + + # Now that we're mounted, set permissions so that the rest of the test will have + # unrestricted access to the filesystem mount. + try: + stderr = StringIO() + self.client_remote.run(args=['sudo', 'chmod', '1777', self.mountpoint], timeout=(15*60), cwd=self.test_dir, stderr=stderr) + except run.CommandFailedError: + stderr = stderr.getvalue() + if "Read-only file system".lower() in stderr.lower(): + pass + else: + raise + + def _mountpoint_exists(self): + return self.client_remote.run(args=["ls", "-d", self.mountpoint], check_status=False, cwd=self.test_dir, timeout=(15*60)).exitstatus == 0 + + def umount(self): + if not self.is_mounted(): + return + + try: + log.info('Running fusermount -u on {name}...'.format(name=self.client_remote.name)) + self.client_remote.run( + args=[ + 'sudo', + 'fusermount', + '-u', + self.mountpoint, + ], + cwd=self.test_dir, + timeout=(30*60), + ) + except run.CommandFailedError: + log.info('Failed to unmount ceph-fuse on {name}, aborting...'.format(name=self.client_remote.name)) + + self.client_remote.run(args=[ + 'sudo', + run.Raw('PATH=/usr/sbin:$PATH'), + 'lsof', + run.Raw(';'), + 'ps', + 'auxf', + ], timeout=(60*15)) + + # abort the fuse mount, killing all hung processes + if self._fuse_conn: + self.run_python(dedent(""" + import os + path = "/sys/fs/fuse/connections/{0}/abort" + if os.path.exists(path): + open(path, "w").write("1") + """).format(self._fuse_conn)) + self._fuse_conn = None + + stderr = StringIO() + try: + # make sure its unmounted + self.client_remote.run( + args=[ + 'sudo', + 'umount', + '-l', + '-f', + self.mountpoint, + ], + stderr=stderr, + timeout=(60*15) + ) + except CommandFailedError: + if self.is_mounted(): + raise + + assert not self.is_mounted() + self._fuse_conn = None + self.id = None + self.inst = None + self.addr = None + + def umount_wait(self, force=False, require_clean=False, timeout=900): + """ + :param force: Complete cleanly even if the MDS is offline + """ + if force: + assert not require_clean # mutually exclusive + + # When we expect to be forcing, kill the ceph-fuse process directly. + # This should avoid hitting the more aggressive fallback killing + # in umount() which can affect other mounts too. + self.fuse_daemon.stdin.close() + + # However, we will still hit the aggressive wait if there is an ongoing + # mount -o remount (especially if the remount is stuck because MDSs + # are unavailable) + + self.umount() + + try: + if self.fuse_daemon: + # Permit a timeout, so that we do not block forever + run.wait([self.fuse_daemon], timeout) + except MaxWhileTries: + log.error("process failed to terminate after unmount. This probably" + " indicates a bug within ceph-fuse.") + raise + except CommandFailedError: + if require_clean: + raise + + self.cleanup() + + def cleanup(self): + """ + Remove the mount point. + + Prerequisite: the client is not mounted. + """ + stderr = StringIO() + try: + self.client_remote.run( + args=[ + 'rmdir', + '--', + self.mountpoint, + ], + cwd=self.test_dir, + stderr=stderr, + timeout=(60*5), + check_status=False, + ) + except CommandFailedError: + if "No such file or directory" in stderr.getvalue(): + pass + else: + raise + + def kill(self): + """ + Terminate the client without removing the mount point. + """ + log.info('Killing ceph-fuse connection on {name}...'.format(name=self.client_remote.name)) + self.fuse_daemon.stdin.close() + try: + self.fuse_daemon.wait() + except CommandFailedError: + pass + + def kill_cleanup(self): + """ + Follow up ``kill`` to get to a clean unmounted state. + """ + log.info('Cleaning up killed ceph-fuse connection') + self.umount() + self.cleanup() + + def teardown(self): + """ + Whatever the state of the mount, get it gone. + """ + super(FuseMount, self).teardown() + + self.umount() + + if self.fuse_daemon and not self.fuse_daemon.finished: + self.fuse_daemon.stdin.close() + try: + self.fuse_daemon.wait() + except CommandFailedError: + pass + + # Indiscriminate, unlike the touchier cleanup() + self.client_remote.run( + args=[ + 'rm', + '-rf', + self.mountpoint, + ], + cwd=self.test_dir, + timeout=(60*5) + ) + + def _asok_path(self): + return "/var/run/ceph/ceph-client.{0}.*.asok".format(self.client_id) + + @property + def _prefix(self): + return "" + + def admin_socket(self, args): + pyscript = """ +import glob +import re +import os +import subprocess + +def find_socket(client_name): + asok_path = "{asok_path}" + files = glob.glob(asok_path) + + # Given a non-glob path, it better be there + if "*" not in asok_path: + assert(len(files) == 1) + return files[0] + + for f in files: + pid = re.match(".*\.(\d+)\.asok$", f).group(1) + if os.path.exists("/proc/{{0}}".format(pid)): + return f + raise RuntimeError("Client socket {{0}} not found".format(client_name)) + +print(find_socket("{client_name}")) +""".format( + asok_path=self._asok_path(), + client_name="client.{0}".format(self.client_id)) + + # Find the admin socket + asok_path = self.client_remote.sh( + ['sudo', 'python3', '-c', pyscript], + stdout=StringIO(), + timeout=(15*60)).strip() + log.info("Found client admin socket at {0}".format(asok_path)) + + # Query client ID from admin socket + json_data = self.client_remote.sh( + ['sudo', self._prefix + 'ceph', '--admin-daemon', asok_path] + args, + stdout=StringIO(), + timeout=(15*60)) + return json.loads(json_data) + + def get_global_id(self): + """ + Look up the CephFS client ID for this mount + """ + return self.admin_socket(['mds_sessions'])['id'] + + def get_global_inst(self): + """ + Look up the CephFS client instance for this mount + """ + return self.inst + + def get_global_addr(self): + """ + Look up the CephFS client addr for this mount + """ + return self.addr + + def get_client_pid(self): + """ + return pid of ceph-fuse process + """ + status = self.admin_socket(['status']) + return status['metadata']['pid'] + + def get_osd_epoch(self): + """ + Return 2-tuple of osd_epoch, osd_epoch_barrier + """ + status = self.admin_socket(['status']) + return status['osd_epoch'], status['osd_epoch_barrier'] + + def get_dentry_count(self): + """ + Return 2-tuple of dentry_count, dentry_pinned_count + """ + status = self.admin_socket(['status']) + return status['dentry_count'], status['dentry_pinned_count'] + + def set_cache_size(self, size): + return self.admin_socket(['config', 'set', 'client_cache_size', str(size)]) diff --git a/qa/tasks/cephfs/kernel_mount.py b/qa/tasks/cephfs/kernel_mount.py new file mode 100644 index 00000000..d027bcfc --- /dev/null +++ b/qa/tasks/cephfs/kernel_mount.py @@ -0,0 +1,260 @@ +import json +import logging +import time +from textwrap import dedent +from teuthology.orchestra.run import CommandFailedError +from teuthology import misc + +from teuthology.orchestra import remote as orchestra_remote +from teuthology.orchestra import run +from teuthology.contextutil import MaxWhileTries +from tasks.cephfs.mount import CephFSMount + +log = logging.getLogger(__name__) + + +UMOUNT_TIMEOUT = 300 + + +class KernelMount(CephFSMount): + def __init__(self, ctx, test_dir, client_id, client_remote, + ipmi_user, ipmi_password, ipmi_domain): + super(KernelMount, self).__init__(ctx, test_dir, client_id, client_remote) + + self.mounted = False + self.ipmi_user = ipmi_user + self.ipmi_password = ipmi_password + self.ipmi_domain = ipmi_domain + + def mount(self, mount_path=None, mount_fs_name=None, mountpoint=None): + if mountpoint is not None: + self.mountpoint = mountpoint + self.setupfs(name=mount_fs_name) + + log.info('Mounting kclient client.{id} at {remote} {mnt}...'.format( + id=self.client_id, remote=self.client_remote, mnt=self.mountpoint)) + + self.client_remote.run(args=['mkdir', '-p', self.mountpoint], + timeout=(5*60)) + + if mount_path is None: + mount_path = "/" + + opts = 'name={id},norequire_active_mds,conf={conf}'.format(id=self.client_id, + conf=self.config_path) + + if mount_fs_name is not None: + opts += ",mds_namespace={0}".format(mount_fs_name) + + self.client_remote.run( + args=[ + 'sudo', + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=self.test_dir), + '/bin/mount', + '-t', + 'ceph', + ':{mount_path}'.format(mount_path=mount_path), + self.mountpoint, + '-v', + '-o', + opts + ], + timeout=(30*60), + ) + + self.client_remote.run( + args=['sudo', 'chmod', '1777', self.mountpoint], timeout=(5*60)) + + self.mounted = True + + def umount(self, force=False): + if not self.is_mounted(): + return + + log.debug('Unmounting client client.{id}...'.format(id=self.client_id)) + + cmd=['sudo', 'umount', self.mountpoint] + if force: + cmd.append('-f') + + try: + self.client_remote.run(args=cmd, timeout=(15*60)) + except Exception as e: + self.client_remote.run(args=[ + 'sudo', + run.Raw('PATH=/usr/sbin:$PATH'), + 'lsof', + run.Raw(';'), + 'ps', 'auxf', + ], timeout=(15*60)) + raise e + + rproc = self.client_remote.run( + args=[ + 'rmdir', + '--', + self.mountpoint, + ], + wait=False + ) + run.wait([rproc], UMOUNT_TIMEOUT) + self.mounted = False + + def cleanup(self): + pass + + def umount_wait(self, force=False, require_clean=False, timeout=900): + """ + Unlike the fuse client, the kernel client's umount is immediate + """ + if not self.is_mounted(): + return + + try: + self.umount(force) + except (CommandFailedError, MaxWhileTries): + if not force: + raise + + self.kill() + self.kill_cleanup() + + self.mounted = False + + def is_mounted(self): + return self.mounted + + def wait_until_mounted(self): + """ + Unlike the fuse client, the kernel client is up and running as soon + as the initial mount() function returns. + """ + assert self.mounted + + def teardown(self): + super(KernelMount, self).teardown() + if self.mounted: + self.umount() + + def kill(self): + """ + The Ceph kernel client doesn't have a mechanism to kill itself (doing + that in side the kernel would be weird anyway), so we reboot the whole node + to get the same effect. + + We use IPMI to reboot, because we don't want the client to send any + releases of capabilities. + """ + + con = orchestra_remote.getRemoteConsole(self.client_remote.hostname, + self.ipmi_user, + self.ipmi_password, + self.ipmi_domain) + con.hard_reset(wait_for_login=False) + + self.mounted = False + + def kill_cleanup(self): + assert not self.mounted + + # We need to do a sleep here because we don't know how long it will + # take for a hard_reset to be effected. + time.sleep(30) + + try: + # Wait for node to come back up after reboot + misc.reconnect(None, 300, [self.client_remote]) + except: + # attempt to get some useful debug output: + con = orchestra_remote.getRemoteConsole(self.client_remote.hostname, + self.ipmi_user, + self.ipmi_password, + self.ipmi_domain) + con.check_status(timeout=60) + raise + + # Remove mount directory + self.client_remote.run(args=['uptime'], timeout=10) + + # Remove mount directory + self.client_remote.run( + args=[ + 'rmdir', + '--', + self.mountpoint, + ], + timeout=(5*60), + check_status=False, + ) + + def _find_debug_dir(self): + """ + Find the debugfs folder for this mount + """ + pyscript = dedent(""" + import glob + import os + import json + + def get_id_to_dir(): + result = {} + for dir in glob.glob("/sys/kernel/debug/ceph/*"): + mds_sessions_lines = open(os.path.join(dir, "mds_sessions")).readlines() + client_id = mds_sessions_lines[1].split()[1].strip('"') + + result[client_id] = dir + return result + + print(json.dumps(get_id_to_dir())) + """) + + output = self.client_remote.sh([ + 'sudo', 'python3', '-c', pyscript + ], timeout=(5*60)) + client_id_to_dir = json.loads(output) + + try: + return client_id_to_dir[self.client_id] + except KeyError: + log.error("Client id '{0}' debug dir not found (clients seen were: {1})".format( + self.client_id, ",".join(client_id_to_dir.keys()) + )) + raise + + def _read_debug_file(self, filename): + debug_dir = self._find_debug_dir() + + pyscript = dedent(""" + import os + + print(open(os.path.join("{debug_dir}", "{filename}")).read()) + """).format(debug_dir=debug_dir, filename=filename) + + output = self.client_remote.sh([ + 'sudo', 'python3', '-c', pyscript + ], timeout=(5*60)) + return output + + def get_global_id(self): + """ + Look up the CephFS client ID for this mount, using debugfs. + """ + + assert self.mounted + + mds_sessions = self._read_debug_file("mds_sessions") + lines = mds_sessions.split("\n") + return int(lines[0].split()[1]) + + def get_osd_epoch(self): + """ + Return 2-tuple of osd_epoch, osd_epoch_barrier + """ + osd_map = self._read_debug_file("osdmap") + lines = osd_map.split("\n") + first_line_tokens = lines[0].split() + epoch, barrier = int(first_line_tokens[1]), int(first_line_tokens[3]) + + return epoch, barrier diff --git a/qa/tasks/cephfs/mount.py b/qa/tasks/cephfs/mount.py new file mode 100644 index 00000000..d486f1b6 --- /dev/null +++ b/qa/tasks/cephfs/mount.py @@ -0,0 +1,728 @@ +from contextlib import contextmanager +from io import BytesIO +import json +import logging +import datetime +import six +import time +from six import StringIO +from textwrap import dedent +import os +from teuthology.misc import sudo_write_file +from teuthology.orchestra import run +from teuthology.orchestra.run import CommandFailedError, ConnectionLostError, Raw +from tasks.cephfs.filesystem import Filesystem + +log = logging.getLogger(__name__) + + +class CephFSMount(object): + def __init__(self, ctx, test_dir, client_id, client_remote): + """ + :param test_dir: Global teuthology test dir + :param client_id: Client ID, the 'foo' in client.foo + :param client_remote: Remote instance for the host where client will run + """ + + self.ctx = ctx + self.test_dir = test_dir + self.client_id = client_id + self.client_remote = client_remote + self.mountpoint_dir_name = 'mnt.{id}'.format(id=self.client_id) + self._mountpoint = None + self.fs = None + + self.test_files = ['a', 'b', 'c'] + + self.background_procs = [] + + @property + def mountpoint(self): + if self._mountpoint == None: + self._mountpoint= os.path.join( + self.test_dir, '{dir_name}'.format(dir_name=self.mountpoint_dir_name)) + return self._mountpoint + + @mountpoint.setter + def mountpoint(self, path): + if not isinstance(path, str): + raise RuntimeError('path should be of str type.') + self._mountpoint = path + + def is_mounted(self): + raise NotImplementedError() + + def setupfs(self, name=None): + if name is None and self.fs is not None: + # Previous mount existed, reuse the old name + name = self.fs.name + self.fs = Filesystem(self.ctx, name=name) + log.info('Wait for MDS to reach steady state...') + self.fs.wait_for_daemons() + log.info('Ready to start {}...'.format(type(self).__name__)) + + def mount(self, mount_path=None, mount_fs_name=None, mountpoint=None): + raise NotImplementedError() + + def umount(self): + raise NotImplementedError() + + def umount_wait(self, force=False, require_clean=False): + """ + + :param force: Expect that the mount will not shutdown cleanly: kill + it hard. + :param require_clean: Wait for the Ceph client associated with the + mount (e.g. ceph-fuse) to terminate, and + raise if it doesn't do so cleanly. + :return: + """ + raise NotImplementedError() + + def kill_cleanup(self): + raise NotImplementedError() + + def kill(self): + raise NotImplementedError() + + def cleanup(self): + raise NotImplementedError() + + def wait_until_mounted(self): + raise NotImplementedError() + + def get_keyring_path(self): + return '/etc/ceph/ceph.client.{id}.keyring'.format(id=self.client_id) + + @property + def config_path(self): + """ + Path to ceph.conf: override this if you're not a normal systemwide ceph install + :return: stringv + """ + return "/etc/ceph/ceph.conf" + + @contextmanager + def mounted(self): + """ + A context manager, from an initially unmounted state, to mount + this, yield, and then unmount and clean up. + """ + self.mount() + self.wait_until_mounted() + try: + yield + finally: + self.umount_wait() + + def is_blacklisted(self): + addr = self.get_global_addr() + blacklist = json.loads(self.fs.mon_manager.raw_cluster_cmd("osd", "blacklist", "ls", "--format=json")) + for b in blacklist: + if addr == b["addr"]: + return True + return False + + def create_files(self): + assert(self.is_mounted()) + + for suffix in self.test_files: + log.info("Creating file {0}".format(suffix)) + self.client_remote.run(args=[ + 'sudo', 'touch', os.path.join(self.mountpoint, suffix) + ]) + + def check_files(self): + assert(self.is_mounted()) + + for suffix in self.test_files: + log.info("Checking file {0}".format(suffix)) + r = self.client_remote.run(args=[ + 'sudo', 'ls', os.path.join(self.mountpoint, suffix) + ], check_status=False) + if r.exitstatus != 0: + raise RuntimeError("Expected file {0} not found".format(suffix)) + + def write_file(self, path, data, perms=None): + """ + Write the given data at the given path and set the given perms to the + file on the path. + """ + if path.find(self.mountpoint) == -1: + path = os.path.join(self.mountpoint, path) + + sudo_write_file(self.client_remote, path, data) + + if perms: + self.run_shell(args=f'chmod {perms} {path}') + + def read_file(self, path): + """ + Return the data from the file on given path. + """ + if path.find(self.mountpoint) == -1: + path = os.path.join(self.mountpoint, path) + + return self.run_shell(args=['sudo', 'cat', path], omit_sudo=False).\ + stdout.getvalue().strip() + + def create_destroy(self): + assert(self.is_mounted()) + + filename = "{0} {1}".format(datetime.datetime.now(), self.client_id) + log.debug("Creating test file {0}".format(filename)) + self.client_remote.run(args=[ + 'sudo', 'touch', os.path.join(self.mountpoint, filename) + ]) + log.debug("Deleting test file {0}".format(filename)) + self.client_remote.run(args=[ + 'sudo', 'rm', '-f', os.path.join(self.mountpoint, filename) + ]) + + def _run_python(self, pyscript, py_version='python3'): + return self.client_remote.run( + args=['sudo', 'adjust-ulimits', 'daemon-helper', 'kill', + py_version, '-c', pyscript], wait=False, stdin=run.PIPE, + stdout=StringIO()) + + def run_python(self, pyscript, py_version='python3'): + p = self._run_python(pyscript, py_version) + p.wait() + return six.ensure_str(p.stdout.getvalue().strip()) + + def run_shell(self, args, wait=True, check_status=True, omit_sudo=True): + if isinstance(args, str): + args = args.split() + + args = ["cd", self.mountpoint, run.Raw('&&'), "sudo"] + args + return self.client_remote.run(args=args, stdout=StringIO(), + stderr=StringIO(), wait=wait, + check_status=check_status, + omit_sudo=omit_sudo) + + def run_shell_payload(self, payload, **kwargs): + return self.run_shell(["bash", "-c", Raw(f"'{payload}'")], **kwargs) + + def open_no_data(self, basename): + """ + A pure metadata operation + """ + assert(self.is_mounted()) + + path = os.path.join(self.mountpoint, basename) + + p = self._run_python(dedent( + """ + f = open("{path}", 'w') + """.format(path=path) + )) + p.wait() + + def open_background(self, basename="background_file", write=True): + """ + Open a file for writing, then block such that the client + will hold a capability. + + Don't return until the remote process has got as far as opening + the file, then return the RemoteProcess instance. + """ + assert(self.is_mounted()) + + path = os.path.join(self.mountpoint, basename) + + if write: + pyscript = dedent(""" + import time + + with open("{path}", 'w') as f: + f.write('content') + f.flush() + f.write('content2') + while True: + time.sleep(1) + """).format(path=path) + else: + pyscript = dedent(""" + import time + + with open("{path}", 'r') as f: + while True: + time.sleep(1) + """).format(path=path) + + rproc = self._run_python(pyscript) + self.background_procs.append(rproc) + + # This wait would not be sufficient if the file had already + # existed, but it's simple and in practice users of open_background + # are not using it on existing files. + self.wait_for_visible(basename) + + return rproc + + def wait_for_dir_empty(self, dirname, timeout=30): + i = 0 + dirpath = os.path.join(self.mountpoint, dirname) + while i < timeout: + nr_entries = int(self.getfattr(dirpath, "ceph.dir.entries")) + if nr_entries == 0: + log.debug("Directory {0} seen empty from {1} after {2}s ".format( + dirname, self.client_id, i)) + return + else: + time.sleep(1) + i += 1 + + raise RuntimeError("Timed out after {0}s waiting for {1} to become empty from {2}".format( + i, dirname, self.client_id)) + + def wait_for_visible(self, basename="background_file", timeout=30): + i = 0 + while i < timeout: + r = self.client_remote.run(args=[ + 'sudo', 'ls', os.path.join(self.mountpoint, basename) + ], check_status=False) + if r.exitstatus == 0: + log.debug("File {0} became visible from {1} after {2}s".format( + basename, self.client_id, i)) + return + else: + time.sleep(1) + i += 1 + + raise RuntimeError("Timed out after {0}s waiting for {1} to become visible from {2}".format( + i, basename, self.client_id)) + + def lock_background(self, basename="background_file", do_flock=True): + """ + Open and lock a files for writing, hold the lock in a background process + """ + assert(self.is_mounted()) + + path = os.path.join(self.mountpoint, basename) + + script_builder = """ + import time + import fcntl + import struct""" + if do_flock: + script_builder += """ + f1 = open("{path}-1", 'w') + fcntl.flock(f1, fcntl.LOCK_EX | fcntl.LOCK_NB)""" + script_builder += """ + f2 = open("{path}-2", 'w') + lockdata = struct.pack('hhllhh', fcntl.F_WRLCK, 0, 0, 0, 0, 0) + fcntl.fcntl(f2, fcntl.F_SETLK, lockdata) + while True: + time.sleep(1) + """ + + pyscript = dedent(script_builder).format(path=path) + + log.info("lock_background file {0}".format(basename)) + rproc = self._run_python(pyscript) + self.background_procs.append(rproc) + return rproc + + def lock_and_release(self, basename="background_file"): + assert(self.is_mounted()) + + path = os.path.join(self.mountpoint, basename) + + script = """ + import time + import fcntl + import struct + f1 = open("{path}-1", 'w') + fcntl.flock(f1, fcntl.LOCK_EX) + f2 = open("{path}-2", 'w') + lockdata = struct.pack('hhllhh', fcntl.F_WRLCK, 0, 0, 0, 0, 0) + fcntl.fcntl(f2, fcntl.F_SETLK, lockdata) + """ + pyscript = dedent(script).format(path=path) + + log.info("lock_and_release file {0}".format(basename)) + return self._run_python(pyscript) + + def check_filelock(self, basename="background_file", do_flock=True): + assert(self.is_mounted()) + + path = os.path.join(self.mountpoint, basename) + + script_builder = """ + import fcntl + import errno + import struct""" + if do_flock: + script_builder += """ + f1 = open("{path}-1", 'r') + try: + fcntl.flock(f1, fcntl.LOCK_EX | fcntl.LOCK_NB) + except IOError as e: + if e.errno == errno.EAGAIN: + pass + else: + raise RuntimeError("flock on file {path}-1 not found")""" + script_builder += """ + f2 = open("{path}-2", 'r') + try: + lockdata = struct.pack('hhllhh', fcntl.F_WRLCK, 0, 0, 0, 0, 0) + fcntl.fcntl(f2, fcntl.F_SETLK, lockdata) + except IOError as e: + if e.errno == errno.EAGAIN: + pass + else: + raise RuntimeError("posix lock on file {path}-2 not found") + """ + pyscript = dedent(script_builder).format(path=path) + + log.info("check lock on file {0}".format(basename)) + self.client_remote.run(args=[ + 'sudo', 'python3', '-c', pyscript + ]) + + def write_background(self, basename="background_file", loop=False): + """ + Open a file for writing, complete as soon as you can + :param basename: + :return: + """ + assert(self.is_mounted()) + + path = os.path.join(self.mountpoint, basename) + + pyscript = dedent(""" + import os + import time + + fd = os.open("{path}", os.O_RDWR | os.O_CREAT, 0o644) + try: + while True: + os.write(fd, b'content') + time.sleep(1) + if not {loop}: + break + except IOError as e: + pass + os.close(fd) + """).format(path=path, loop=str(loop)) + + rproc = self._run_python(pyscript) + self.background_procs.append(rproc) + return rproc + + def write_n_mb(self, filename, n_mb, seek=0, wait=True): + """ + Write the requested number of megabytes to a file + """ + assert(self.is_mounted()) + + return self.run_shell(["dd", "if=/dev/urandom", "of={0}".format(filename), + "bs=1M", "conv=fdatasync", + "count={0}".format(int(n_mb)), + "seek={0}".format(int(seek)) + ], wait=wait) + + def write_test_pattern(self, filename, size): + log.info("Writing {0} bytes to {1}".format(size, filename)) + return self.run_python(dedent(""" + import zlib + path = "{path}" + with open(path, 'w') as f: + for i in range(0, {size}): + val = zlib.crc32(str(i).encode('utf-8')) & 7 + f.write(chr(val)) + """.format( + path=os.path.join(self.mountpoint, filename), + size=size + ))) + + def validate_test_pattern(self, filename, size): + log.info("Validating {0} bytes from {1}".format(size, filename)) + return self.run_python(dedent(""" + import zlib + path = "{path}" + with open(path, 'r') as f: + bytes = f.read() + if len(bytes) != {size}: + raise RuntimeError("Bad length {{0}} vs. expected {{1}}".format( + len(bytes), {size} + )) + for i, b in enumerate(bytes): + val = zlib.crc32(str(i).encode('utf-8')) & 7 + if b != chr(val): + raise RuntimeError("Bad data at offset {{0}}".format(i)) + """.format( + path=os.path.join(self.mountpoint, filename), + size=size + ))) + + def open_n_background(self, fs_path, count): + """ + Open N files for writing, hold them open in a background process + + :param fs_path: Path relative to CephFS root, e.g. "foo/bar" + :return: a RemoteProcess + """ + assert(self.is_mounted()) + + abs_path = os.path.join(self.mountpoint, fs_path) + + pyscript = dedent(""" + import sys + import time + import os + + n = {count} + abs_path = "{abs_path}" + + if not os.path.exists(abs_path): + os.makedirs(abs_path) + + handles = [] + for i in range(0, n): + fname = "file_"+str(i) + path = os.path.join(abs_path, fname) + handles.append(open(path, 'w')) + + while True: + time.sleep(1) + """).format(abs_path=abs_path, count=count) + + rproc = self._run_python(pyscript) + self.background_procs.append(rproc) + return rproc + + def create_n_files(self, fs_path, count, sync=False): + assert(self.is_mounted()) + + abs_path = os.path.join(self.mountpoint, fs_path) + + pyscript = dedent(""" + import sys + import time + import os + + n = {count} + abs_path = "{abs_path}" + + if not os.path.exists(os.path.dirname(abs_path)): + os.makedirs(os.path.dirname(abs_path)) + + for i in range(0, n): + fname = "{{0}}_{{1}}".format(abs_path, i) + with open(fname, 'w') as f: + f.write('content') + if {sync}: + f.flush() + os.fsync(f.fileno()) + """).format(abs_path=abs_path, count=count, sync=str(sync)) + + self.run_python(pyscript) + + def teardown(self): + for p in self.background_procs: + log.info("Terminating background process") + self._kill_background(p) + + self.background_procs = [] + + def _kill_background(self, p): + if p.stdin: + p.stdin.close() + try: + p.wait() + except (CommandFailedError, ConnectionLostError): + pass + + def kill_background(self, p): + """ + For a process that was returned by one of the _background member functions, + kill it hard. + """ + self._kill_background(p) + self.background_procs.remove(p) + + def send_signal(self, signal): + signal = signal.lower() + if signal.lower() not in ['sigstop', 'sigcont', 'sigterm', 'sigkill']: + raise NotImplementedError + + self.client_remote.run(args=['sudo', 'kill', '-{0}'.format(signal), + self.client_pid], omit_sudo=False) + + def get_global_id(self): + raise NotImplementedError() + + def get_global_inst(self): + raise NotImplementedError() + + def get_global_addr(self): + raise NotImplementedError() + + def get_osd_epoch(self): + raise NotImplementedError() + + def stat(self, fs_path, wait=True): + """ + stat a file, and return the result as a dictionary like this: + { + "st_ctime": 1414161137.0, + "st_mtime": 1414161137.0, + "st_nlink": 33, + "st_gid": 0, + "st_dev": 16777218, + "st_size": 1190, + "st_ino": 2, + "st_uid": 0, + "st_mode": 16877, + "st_atime": 1431520593.0 + } + + Raises exception on absent file. + """ + abs_path = os.path.join(self.mountpoint, fs_path) + + pyscript = dedent(""" + import os + import stat + import json + import sys + + try: + s = os.stat("{path}") + except OSError as e: + sys.exit(e.errno) + + attrs = ["st_mode", "st_ino", "st_dev", "st_nlink", "st_uid", "st_gid", "st_size", "st_atime", "st_mtime", "st_ctime"] + print(json.dumps( + dict([(a, getattr(s, a)) for a in attrs]), + indent=2)) + """).format(path=abs_path) + proc = self._run_python(pyscript) + if wait: + proc.wait() + return json.loads(proc.stdout.getvalue().strip()) + else: + return proc + + def touch(self, fs_path): + """ + Create a dentry if it doesn't already exist. This python + implementation exists because the usual command line tool doesn't + pass through error codes like EIO. + + :param fs_path: + :return: + """ + abs_path = os.path.join(self.mountpoint, fs_path) + pyscript = dedent(""" + import sys + import errno + + try: + f = open("{path}", "w") + f.close() + except IOError as e: + sys.exit(errno.EIO) + """).format(path=abs_path) + proc = self._run_python(pyscript) + proc.wait() + + def path_to_ino(self, fs_path, follow_symlinks=True): + abs_path = os.path.join(self.mountpoint, fs_path) + + if follow_symlinks: + pyscript = dedent(""" + import os + import stat + + print(os.stat("{path}").st_ino) + """).format(path=abs_path) + else: + pyscript = dedent(""" + import os + import stat + + print(os.lstat("{path}").st_ino) + """).format(path=abs_path) + + proc = self._run_python(pyscript) + proc.wait() + return int(proc.stdout.getvalue().strip()) + + def path_to_nlink(self, fs_path): + abs_path = os.path.join(self.mountpoint, fs_path) + + pyscript = dedent(""" + import os + import stat + + print(os.stat("{path}").st_nlink) + """).format(path=abs_path) + + proc = self._run_python(pyscript) + proc.wait() + return int(proc.stdout.getvalue().strip()) + + def ls(self, path=None): + """ + Wrap ls: return a list of strings + """ + cmd = ["ls"] + if path: + cmd.append(path) + + ls_text = self.run_shell(cmd).stdout.getvalue().strip() + + if ls_text: + return ls_text.split("\n") + else: + # Special case because otherwise split on empty string + # gives you [''] instead of [] + return [] + + def setfattr(self, path, key, val): + """ + Wrap setfattr. + + :param path: relative to mount point + :param key: xattr name + :param val: xattr value + :return: None + """ + self.run_shell(["setfattr", "-n", key, "-v", val, path]) + + def getfattr(self, path, attr): + """ + Wrap getfattr: return the values of a named xattr on one file, or + None if the attribute is not found. + + :return: a string + """ + p = self.run_shell(["getfattr", "--only-values", "-n", attr, path], wait=False) + try: + p.wait() + except CommandFailedError as e: + if e.exitstatus == 1 and "No such attribute" in p.stderr.getvalue(): + return None + else: + raise + + return str(p.stdout.getvalue()) + + def df(self): + """ + Wrap df: return a dict of usage fields in bytes + """ + + p = self.run_shell(["df", "-B1", "."]) + lines = p.stdout.getvalue().strip().split("\n") + fs, total, used, avail = lines[1].split()[:4] + log.warning(lines) + + return { + "total": int(total), + "used": int(used), + "available": int(avail) + } diff --git a/qa/tasks/cephfs/test_admin.py b/qa/tasks/cephfs/test_admin.py new file mode 100644 index 00000000..e4ce5570 --- /dev/null +++ b/qa/tasks/cephfs/test_admin.py @@ -0,0 +1,229 @@ +import json + +from teuthology.orchestra.run import CommandFailedError + +from unittest import case +from tasks.cephfs.cephfs_test_case import CephFSTestCase +from tasks.cephfs.fuse_mount import FuseMount + +from tasks.cephfs.filesystem import FileLayout + +class TestAdminCommands(CephFSTestCase): + """ + Tests for administration command. + """ + + CLIENTS_REQUIRED = 1 + MDSS_REQUIRED = 1 + + def test_fs_status(self): + """ + That `ceph fs status` command functions. + """ + + s = self.fs.mon_manager.raw_cluster_cmd("fs", "status") + self.assertTrue("active" in s) + + def _setup_ec_pools(self, n, metadata=True, overwrites=True): + if metadata: + self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', n+"-meta", "8") + cmd = ['osd', 'erasure-code-profile', 'set', n+"-profile", "m=2", "k=2", "crush-failure-domain=osd"] + self.fs.mon_manager.raw_cluster_cmd(*cmd) + self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', n+"-data", "8", "erasure", n+"-profile") + if overwrites: + self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'set', n+"-data", 'allow_ec_overwrites', 'true') + + def _check_pool_application_metadata_key_value(self, pool, app, key, value): + output = self.fs.mon_manager.raw_cluster_cmd( + 'osd', 'pool', 'application', 'get', pool, app, key) + self.assertEqual(str(output.strip()), value) + + def test_add_data_pool_root(self): + """ + That a new data pool can be added and used for the root directory. + """ + + p = self.fs.add_data_pool("foo") + self.fs.set_dir_layout(self.mount_a, ".", FileLayout(pool=p)) + + def test_add_data_pool_application_metadata(self): + """ + That the application metadata set on a newly added data pool is as expected. + """ + pool_name = "foo" + mon_cmd = self.fs.mon_manager.raw_cluster_cmd + mon_cmd('osd', 'pool', 'create', pool_name, str(self.fs.pgs_per_fs_pool)) + # Check whether https://tracker.ceph.com/issues/43061 is fixed + mon_cmd('osd', 'pool', 'application', 'enable', pool_name, 'cephfs') + self.fs.add_data_pool(pool_name, create=False) + self._check_pool_application_metadata_key_value( + pool_name, 'cephfs', 'data', self.fs.name) + + def test_add_data_pool_subdir(self): + """ + That a new data pool can be added and used for a sub-directory. + """ + + p = self.fs.add_data_pool("foo") + self.mount_a.run_shell(["mkdir", "subdir"]) + self.fs.set_dir_layout(self.mount_a, "subdir", FileLayout(pool=p)) + + def test_add_data_pool_non_alphamueric_name_as_subdir(self): + """ + That a new data pool with non-alphanumeric name can be added and used for a sub-directory. + """ + p = self.fs.add_data_pool("I-am-data_pool00.") + self.mount_a.run_shell("mkdir subdir") + self.fs.set_dir_layout(self.mount_a, "subdir", FileLayout(pool=p)) + + def test_add_data_pool_ec(self): + """ + That a new EC data pool can be added. + """ + + n = "test_add_data_pool_ec" + self._setup_ec_pools(n, metadata=False) + p = self.fs.add_data_pool(n+"-data", create=False) + + def test_new_default_ec(self): + """ + That a new file system warns/fails with an EC default data pool. + """ + + self.fs.delete_all_filesystems() + n = "test_new_default_ec" + self._setup_ec_pools(n) + try: + self.fs.mon_manager.raw_cluster_cmd('fs', 'new', n, n+"-meta", n+"-data") + except CommandFailedError as e: + if e.exitstatus == 22: + pass + else: + raise + else: + raise RuntimeError("expected failure") + + def test_new_default_ec_force(self): + """ + That a new file system succeeds with an EC default data pool with --force. + """ + + self.fs.delete_all_filesystems() + n = "test_new_default_ec_force" + self._setup_ec_pools(n) + self.fs.mon_manager.raw_cluster_cmd('fs', 'new', n, n+"-meta", n+"-data", "--force") + + def test_new_default_ec_no_overwrite(self): + """ + That a new file system fails with an EC default data pool without overwrite. + """ + + self.fs.delete_all_filesystems() + n = "test_new_default_ec_no_overwrite" + self._setup_ec_pools(n, overwrites=False) + try: + self.fs.mon_manager.raw_cluster_cmd('fs', 'new', n, n+"-meta", n+"-data") + except CommandFailedError as e: + if e.exitstatus == 22: + pass + else: + raise + else: + raise RuntimeError("expected failure") + # and even with --force ! + try: + self.fs.mon_manager.raw_cluster_cmd('fs', 'new', n, n+"-meta", n+"-data", "--force") + except CommandFailedError as e: + if e.exitstatus == 22: + pass + else: + raise + else: + raise RuntimeError("expected failure") + + def test_fs_new_pool_application_metadata(self): + """ + That the application metadata set on the pools of a newly created filesystem are as expected. + """ + self.fs.delete_all_filesystems() + fs_name = "test_fs_new_pool_application" + keys = ['metadata', 'data'] + pool_names = [fs_name+'-'+key for key in keys] + mon_cmd = self.fs.mon_manager.raw_cluster_cmd + for p in pool_names: + mon_cmd('osd', 'pool', 'create', p, str(self.fs.pgs_per_fs_pool)) + mon_cmd('osd', 'pool', 'application', 'enable', p, 'cephfs') + mon_cmd('fs', 'new', fs_name, pool_names[0], pool_names[1]) + for i in range(2): + self._check_pool_application_metadata_key_value( + pool_names[i], 'cephfs', keys[i], fs_name) + + +class TestConfigCommands(CephFSTestCase): + """ + Test that daemons and clients respond to the otherwise rarely-used + runtime config modification operations. + """ + + CLIENTS_REQUIRED = 1 + MDSS_REQUIRED = 1 + + def test_ceph_config_show(self): + """ + That I can successfully show MDS configuration. + """ + + names = self.fs.get_rank_names() + for n in names: + s = self.fs.mon_manager.raw_cluster_cmd("config", "show", "mds."+n) + self.assertTrue("NAME" in s) + self.assertTrue("mon_host" in s) + + def test_client_config(self): + """ + That I can successfully issue asok "config set" commands + + :return: + """ + + if not isinstance(self.mount_a, FuseMount): + raise case.SkipTest("Test only applies to FUSE clients") + + test_key = "client_cache_size" + test_val = "123" + self.mount_a.admin_socket(['config', 'set', test_key, test_val]) + out = self.mount_a.admin_socket(['config', 'get', test_key]) + self.assertEqual(out[test_key], test_val) + + self.mount_a.write_n_mb("file.bin", 1); + + # Implicitly asserting that things don't have lockdep error in shutdown + self.mount_a.umount_wait(require_clean=True) + self.fs.mds_stop() + + def test_mds_config_asok(self): + test_key = "mds_max_purge_ops" + test_val = "123" + self.fs.mds_asok(['config', 'set', test_key, test_val]) + out = self.fs.mds_asok(['config', 'get', test_key]) + self.assertEqual(out[test_key], test_val) + + # Implicitly asserting that things don't have lockdep error in shutdown + self.mount_a.umount_wait(require_clean=True) + self.fs.mds_stop() + + def test_mds_config_tell(self): + test_key = "mds_max_purge_ops" + test_val = "123" + + mds_id = self.fs.get_lone_mds_id() + self.fs.mon_manager.raw_cluster_cmd("tell", "mds.{0}".format(mds_id), "injectargs", + "--{0}={1}".format(test_key, test_val)) + + # Read it back with asok because there is no `tell` equivalent + out = self.fs.mds_asok(['config', 'get', test_key]) + self.assertEqual(out[test_key], test_val) + + # Implicitly asserting that things don't have lockdep error in shutdown + self.mount_a.umount_wait(require_clean=True) + self.fs.mds_stop() diff --git a/qa/tasks/cephfs/test_auto_repair.py b/qa/tasks/cephfs/test_auto_repair.py new file mode 100644 index 00000000..c0aa2e4c --- /dev/null +++ b/qa/tasks/cephfs/test_auto_repair.py @@ -0,0 +1,90 @@ + +""" +Exercise the MDS's auto repair functions +""" + +import logging +import time + +from teuthology.orchestra.run import CommandFailedError +from tasks.cephfs.cephfs_test_case import CephFSTestCase + + +log = logging.getLogger(__name__) + + +# Arbitrary timeouts for operations involving restarting +# an MDS or waiting for it to come up +MDS_RESTART_GRACE = 60 + + +class TestMDSAutoRepair(CephFSTestCase): + def test_backtrace_repair(self): + """ + MDS should verify/fix backtrace on fetch dirfrag + """ + + self.mount_a.run_shell(["mkdir", "testdir1"]) + self.mount_a.run_shell(["touch", "testdir1/testfile"]) + dir_objname = "{:x}.00000000".format(self.mount_a.path_to_ino("testdir1")) + + # drop inodes caps + self.mount_a.umount_wait() + + # flush journal entries to dirfrag objects, and expire journal + self.fs.mds_asok(['flush', 'journal']) + + # Restart the MDS to drop the metadata cache (because we expired the journal, + # nothing gets replayed into cache on restart) + self.fs.mds_stop() + self.fs.mds_fail_restart() + self.fs.wait_for_daemons() + + # remove testdir1's backtrace + self.fs.rados(["rmxattr", dir_objname, "parent"]) + + # readdir (fetch dirfrag) should fix testdir1's backtrace + self.mount_a.mount() + self.mount_a.wait_until_mounted() + self.mount_a.run_shell(["ls", "testdir1"]) + + # flush journal entries to dirfrag objects + self.fs.mds_asok(['flush', 'journal']) + + # check if backtrace exists + self.fs.rados(["getxattr", dir_objname, "parent"]) + + def test_mds_readonly(self): + """ + test if MDS behave correct when it's readonly + """ + # operation should successd when MDS is not readonly + self.mount_a.run_shell(["touch", "test_file1"]) + writer = self.mount_a.write_background(loop=True) + + time.sleep(10) + self.assertFalse(writer.finished) + + # force MDS to read-only mode + self.fs.mds_asok(['force_readonly']) + time.sleep(10) + + # touching test file should fail + try: + self.mount_a.run_shell(["touch", "test_file1"]) + except CommandFailedError: + pass + else: + self.assertTrue(False) + + # background writer also should fail + self.assertTrue(writer.finished) + + # The MDS should report its readonly health state to the mon + self.wait_for_health("MDS_READ_ONLY", timeout=30) + + # restart mds to make it writable + self.fs.mds_fail_restart() + self.fs.wait_for_daemons() + + self.wait_for_health_clear(timeout=30) diff --git a/qa/tasks/cephfs/test_backtrace.py b/qa/tasks/cephfs/test_backtrace.py new file mode 100644 index 00000000..af246a1e --- /dev/null +++ b/qa/tasks/cephfs/test_backtrace.py @@ -0,0 +1,78 @@ + +from tasks.cephfs.cephfs_test_case import CephFSTestCase + + +class TestBacktrace(CephFSTestCase): + def test_backtrace(self): + """ + That the 'parent' and 'layout' xattrs on the head objects of files + are updated correctly. + """ + + old_data_pool_name = self.fs.get_data_pool_name() + old_pool_id = self.fs.get_data_pool_id() + + # Create a file for subsequent checks + self.mount_a.run_shell(["mkdir", "parent_a"]) + self.mount_a.run_shell(["touch", "parent_a/alpha"]) + file_ino = self.mount_a.path_to_ino("parent_a/alpha") + + # That backtrace and layout are written after initial flush + self.fs.mds_asok(["flush", "journal"]) + backtrace = self.fs.read_backtrace(file_ino) + self.assertEqual(['alpha', 'parent_a'], [a['dname'] for a in backtrace['ancestors']]) + layout = self.fs.read_layout(file_ino) + self.assertDictEqual(layout, { + "stripe_unit": 4194304, + "stripe_count": 1, + "object_size": 4194304, + "pool_id": old_pool_id, + "pool_ns": "", + }) + self.assertEqual(backtrace['pool'], old_pool_id) + + # That backtrace is written after parentage changes + self.mount_a.run_shell(["mkdir", "parent_b"]) + self.mount_a.run_shell(["mv", "parent_a/alpha", "parent_b/alpha"]) + + self.fs.mds_asok(["flush", "journal"]) + backtrace = self.fs.read_backtrace(file_ino) + self.assertEqual(['alpha', 'parent_b'], [a['dname'] for a in backtrace['ancestors']]) + + # Create a new data pool + new_pool_name = "data_new" + new_pool_id = self.fs.add_data_pool(new_pool_name) + + # That an object which has switched pools gets its backtrace updated + self.mount_a.setfattr("./parent_b/alpha", + "ceph.file.layout.pool", new_pool_name) + self.fs.mds_asok(["flush", "journal"]) + backtrace_old_pool = self.fs.read_backtrace(file_ino, pool=old_data_pool_name) + self.assertEqual(backtrace_old_pool['pool'], new_pool_id) + backtrace_new_pool = self.fs.read_backtrace(file_ino, pool=new_pool_name) + self.assertEqual(backtrace_new_pool['pool'], new_pool_id) + new_pool_layout = self.fs.read_layout(file_ino, pool=new_pool_name) + self.assertEqual(new_pool_layout['pool_id'], new_pool_id) + self.assertEqual(new_pool_layout['pool_ns'], '') + + # That subsequent linkage changes are only written to new pool backtrace + self.mount_a.run_shell(["mkdir", "parent_c"]) + self.mount_a.run_shell(["mv", "parent_b/alpha", "parent_c/alpha"]) + self.fs.mds_asok(["flush", "journal"]) + backtrace_old_pool = self.fs.read_backtrace(file_ino, pool=old_data_pool_name) + self.assertEqual(['alpha', 'parent_b'], [a['dname'] for a in backtrace_old_pool['ancestors']]) + backtrace_new_pool = self.fs.read_backtrace(file_ino, pool=new_pool_name) + self.assertEqual(['alpha', 'parent_c'], [a['dname'] for a in backtrace_new_pool['ancestors']]) + + # That layout is written to new pool after change to other field in layout + self.mount_a.setfattr("./parent_c/alpha", + "ceph.file.layout.object_size", "8388608") + + self.fs.mds_asok(["flush", "journal"]) + new_pool_layout = self.fs.read_layout(file_ino, pool=new_pool_name) + self.assertEqual(new_pool_layout['object_size'], 8388608) + + # ...but not to the old pool: the old pool's backtrace points to the new pool, and that's enough, + # we don't update the layout in all the old pools whenever it changes + old_pool_layout = self.fs.read_layout(file_ino, pool=old_data_pool_name) + self.assertEqual(old_pool_layout['object_size'], 4194304) diff --git a/qa/tasks/cephfs/test_cap_flush.py b/qa/tasks/cephfs/test_cap_flush.py new file mode 100644 index 00000000..27b9af67 --- /dev/null +++ b/qa/tasks/cephfs/test_cap_flush.py @@ -0,0 +1,64 @@ + +import os +import time +from textwrap import dedent +from unittest import SkipTest +from tasks.cephfs.fuse_mount import FuseMount +from tasks.cephfs.cephfs_test_case import CephFSTestCase, for_teuthology + +class TestCapFlush(CephFSTestCase): + @for_teuthology + def test_replay_create(self): + """ + MDS starts to handle client caps when it enters clientreplay stage. + When handling a client cap in clientreplay stage, it's possible that + corresponding inode does not exist because the client request which + creates inode hasn't been replayed. + """ + + if not isinstance(self.mount_a, FuseMount): + raise SkipTest("Require FUSE client to inject client release failure") + + dir_path = os.path.join(self.mount_a.mountpoint, "testdir") + py_script = dedent(""" + import os + os.mkdir("{0}") + fd = os.open("{0}", os.O_RDONLY) + os.fchmod(fd, 0o777) + os.fsync(fd) + """).format(dir_path) + self.mount_a.run_python(py_script) + + self.fs.mds_asok(["flush", "journal"]) + + # client will only get unsafe replay + self.fs.mds_asok(["config", "set", "mds_log_pause", "1"]) + + file_name = "testfile" + file_path = dir_path + "/" + file_name + + # Create a file and modify its mode. ceph-fuse will mark Ax cap dirty + py_script = dedent(""" + import os + os.chdir("{0}") + os.setgid(65534) + os.setuid(65534) + fd = os.open("{1}", os.O_CREAT | os.O_RDWR, 0o644) + os.fchmod(fd, 0o640) + """).format(dir_path, file_name) + self.mount_a.run_python(py_script) + + # Modify file mode by different user. ceph-fuse will send a setattr request + self.mount_a.run_shell(["chmod", "600", file_path], wait=False) + + time.sleep(10) + + # Restart mds. Client will re-send the unsafe request and cap flush + self.fs.mds_stop() + self.fs.mds_fail_restart() + self.fs.wait_for_daemons() + + mode = self.mount_a.run_shell(['stat', '-c' '%a', file_path]).stdout.getvalue().strip() + # If the cap flush get dropped, mode should be 0644. + # (Ax cap stays in dirty state, which prevents setattr reply from updating file mode) + self.assertEqual(mode, "600") diff --git a/qa/tasks/cephfs/test_cephfs_shell.py b/qa/tasks/cephfs/test_cephfs_shell.py new file mode 100644 index 00000000..8ddbaedb --- /dev/null +++ b/qa/tasks/cephfs/test_cephfs_shell.py @@ -0,0 +1,279 @@ +import os +import crypt +import logging +from six import StringIO +from tasks.cephfs.cephfs_test_case import CephFSTestCase + +log = logging.getLogger(__name__) + + +class TestCephFSShell(CephFSTestCase): + CLIENTS_REQUIRED = 1 + + def _cephfs_shell(self, cmd, opts=None, stdin=None): + args = ["cephfs-shell", "-c", self.mount_a.config_path] + if opts is not None: + args.extend(opts) + args.extend(("--", cmd)) + log.info("Running command: {}".format(" ".join(args))) + status = self.mount_a.client_remote.run(args=args, stdout=StringIO(), + stdin=stdin) + return status.stdout.getvalue().strip() + + def test_help(self): + """ + Test that help outputs commands. + """ + + o = self._cephfs_shell("help") + + log.info("output:\n{}".format(o)) + + def test_mkdir(self): + """ + Test that mkdir creates directory + """ + o = self._cephfs_shell("mkdir d1") + log.info("cephfs-shell output:\n{}".format(o)) + + o = self.mount_a.stat('d1') + log.info("mount_a output:\n{}".format(o)) + + def test_mkdir_with_07000_octal_mode(self): + """ + Test that mkdir fails with octal mode greater than 0777 + """ + o = self._cephfs_shell("mkdir -m 07000 d2") + log.info("cephfs-shell output:\n{}".format(o)) + + # mkdir d2 should fail + try: + o = self.mount_a.stat('d2') + log.info("mount_a output:\n{}".format(o)) + except: + pass + + def test_mkdir_with_negative_octal_mode(self): + """ + Test that mkdir fails with negative octal mode + """ + o = self._cephfs_shell("mkdir -m -0755 d3") + log.info("cephfs-shell output:\n{}".format(o)) + + # mkdir d3 should fail + try: + o = self.mount_a.stat('d3') + log.info("mount_a output:\n{}".format(o)) + except: + pass + + def test_mkdir_with_non_octal_mode(self): + """ + Test that mkdir passes with non-octal mode + """ + o = self._cephfs_shell("mkdir -m u=rwx d4") + log.info("cephfs-shell output:\n{}".format(o)) + + # mkdir d4 should pass + o = self.mount_a.stat('d4') + assert((o['st_mode'] & 0o700) == 0o700) + + def test_mkdir_with_bad_non_octal_mode(self): + """ + Test that mkdir failes with bad non-octal mode + """ + o = self._cephfs_shell("mkdir -m ugx=0755 d5") + log.info("cephfs-shell output:\n{}".format(o)) + + # mkdir d5 should fail + try: + o = self.mount_a.stat('d5') + log.info("mount_a output:\n{}".format(o)) + except: + pass + + def test_mkdir_path_without_path_option(self): + """ + Test that mkdir fails without path option for creating path + """ + o = self._cephfs_shell("mkdir d5/d6/d7") + log.info("cephfs-shell output:\n{}".format(o)) + + # mkdir d5/d6/d7 should fail + try: + o = self.mount_a.stat('d5/d6/d7') + log.info("mount_a output:\n{}".format(o)) + except: + pass + + def test_mkdir_path_with_path_option(self): + """ + Test that mkdir passes with path option for creating path + """ + o = self._cephfs_shell("mkdir -p d5/d6/d7") + log.info("cephfs-shell output:\n{}".format(o)) + + # mkdir d5/d6/d7 should pass + o = self.mount_a.stat('d5/d6/d7') + log.info("mount_a output:\n{}".format(o)) + + def validate_stat_output(self, s): + l = s.split('\n') + log.info("lines:\n{}".format(l)) + rv = l[-1] # get last line; a failed stat will have "1" as the line + log.info("rv:{}".format(rv)) + r = 0 + try: + r = int(rv) # a non-numeric line will cause an exception + except: + pass + assert(r == 0) + + def test_put_and_get_without_target_directory(self): + """ + Test that put fails without target path + """ + # generate test data in a directory + self._cephfs_shell("!mkdir p1") + self._cephfs_shell('!dd if=/dev/urandom of=p1/dump1 bs=1M count=1') + self._cephfs_shell('!dd if=/dev/urandom of=p1/dump2 bs=2M count=1') + self._cephfs_shell('!dd if=/dev/urandom of=p1/dump3 bs=3M count=1') + + # copy the whole directory over to the cephfs + o = self._cephfs_shell("put p1") + log.info("cephfs-shell output:\n{}".format(o)) + + # put p1 should pass + o = self.mount_a.stat('p1') + log.info("mount_a output:\n{}".format(o)) + o = self.mount_a.stat('p1/dump1') + log.info("mount_a output:\n{}".format(o)) + o = self.mount_a.stat('p1/dump2') + log.info("mount_a output:\n{}".format(o)) + o = self.mount_a.stat('p1/dump3') + log.info("mount_a output:\n{}".format(o)) + + self._cephfs_shell('!rm -rf p1') + o = self._cephfs_shell("get p1") + o = self._cephfs_shell('!stat p1 || echo $?') + log.info("cephfs-shell output:\n{}".format(o)) + self.validate_stat_output(o) + + o = self._cephfs_shell('!stat p1/dump1 || echo $?') + log.info("cephfs-shell output:\n{}".format(o)) + self.validate_stat_output(o) + + o = self._cephfs_shell('!stat p1/dump2 || echo $?') + log.info("cephfs-shell output:\n{}".format(o)) + self.validate_stat_output(o) + + o = self._cephfs_shell('!stat p1/dump3 || echo $?') + log.info("cephfs-shell output:\n{}".format(o)) + self.validate_stat_output(o) + + # the 'put' command gets tested as well with the 'get' comamnd + def test_get_with_target_name(self): + """ + Test that get passes with target name + """ + s = 'C' * 1024 + s_hash = crypt.crypt(s, '.A') + o = self._cephfs_shell("put - dump4", stdin=s) + log.info("cephfs-shell output:\n{}".format(o)) + + # put - dump4 should pass + o = self.mount_a.stat('dump4') + log.info("mount_a output:\n{}".format(o)) + + o = self._cephfs_shell("get dump4 .") + log.info("cephfs-shell output:\n{}".format(o)) + + o = self._cephfs_shell("!cat dump4") + o_hash = crypt.crypt(o, '.A') + + # s_hash must be equal to o_hash + log.info("s_hash:{}".format(s_hash)) + log.info("o_hash:{}".format(o_hash)) + assert(s_hash == o_hash) + + def test_get_without_target_name(self): + """ + Test that get passes with target name + """ + s = 'D' * 1024 + o = self._cephfs_shell("put - dump5", stdin=s) + log.info("cephfs-shell output:\n{}".format(o)) + + # put - dump5 should pass + o = self.mount_a.stat('dump5') + log.info("mount_a output:\n{}".format(o)) + + # get dump5 should fail + o = self._cephfs_shell("get dump5") + o = self._cephfs_shell("!stat dump5 || echo $?") + log.info("cephfs-shell output:\n{}".format(o)) + l = o.split('\n') + try: + ret = int(l[1]) + # verify that stat dump5 passes + # if ret == 1, then that implies the stat failed + # which implies that there was a problem with "get dump5" + assert(ret != 1) + except ValueError: + # we have a valid stat output; so this is good + # if the int() fails then that means there's a valid stat output + pass + + def test_get_to_console(self): + """ + Test that get passes with target name + """ + s = 'E' * 1024 + s_hash = crypt.crypt(s, '.A') + o = self._cephfs_shell("put - dump6", stdin=s) + log.info("cephfs-shell output:\n{}".format(o)) + + # put - dump6 should pass + o = self.mount_a.stat('dump6') + log.info("mount_a output:\n{}".format(o)) + + # get dump6 - should pass + o = self._cephfs_shell("get dump6 -") + o_hash = crypt.crypt(o, '.A') + log.info("cephfs-shell output:\n{}".format(o)) + + # s_hash must be equal to o_hash + log.info("s_hash:{}".format(s_hash)) + log.info("o_hash:{}".format(o_hash)) + assert(s_hash == o_hash) + +# def test_ls(self): +# """ +# Test that ls passes +# """ +# o = self._cephfs_shell("ls") +# log.info("cephfs-shell output:\n{}".format(o)) +# +# o = self.mount_a.run_shell(['ls']).stdout.getvalue().strip().replace("\n", " ").split() +# log.info("mount_a output:\n{}".format(o)) +# +# # ls should not list hidden files without the -a switch +# if '.' in o or '..' in o: +# log.info('ls failed') +# else: +# log.info('ls succeeded') +# +# def test_ls_a(self): +# """ +# Test that ls -a passes +# """ +# o = self._cephfs_shell("ls -a") +# log.info("cephfs-shell output:\n{}".format(o)) +# +# o = self.mount_a.run_shell(['ls', '-a']).stdout.getvalue().strip().replace("\n", " ").split() +# log.info("mount_a output:\n{}".format(o)) +# +# if '.' in o and '..' in o: +# log.info('ls -a succeeded') +# else: +# log.info('ls -a failed') diff --git a/qa/tasks/cephfs/test_client_limits.py b/qa/tasks/cephfs/test_client_limits.py new file mode 100644 index 00000000..613a405a --- /dev/null +++ b/qa/tasks/cephfs/test_client_limits.py @@ -0,0 +1,330 @@ + +""" +Exercise the MDS's behaviour when clients and the MDCache reach or +exceed the limits of how many caps/inodes they should hold. +""" + +import logging +from textwrap import dedent +from unittest import SkipTest +from teuthology.orchestra.run import CommandFailedError +from tasks.ceph_test_case import TestTimeoutError +from tasks.cephfs.cephfs_test_case import CephFSTestCase, needs_trimming +from tasks.cephfs.fuse_mount import FuseMount +import os + + +log = logging.getLogger(__name__) + + +# Arbitrary timeouts for operations involving restarting +# an MDS or waiting for it to come up +MDS_RESTART_GRACE = 60 + +# Hardcoded values from Server::recall_client_state +CAP_RECALL_RATIO = 0.8 +CAP_RECALL_MIN = 100 + + +class TestClientLimits(CephFSTestCase): + REQUIRE_KCLIENT_REMOTE = True + CLIENTS_REQUIRED = 2 + + def _test_client_pin(self, use_subdir, open_files): + """ + When a client pins an inode in its cache, for example because the file is held open, + it should reject requests from the MDS to trim these caps. The MDS should complain + to the user that it is unable to enforce its cache size limits because of this + objectionable client. + + :param use_subdir: whether to put test files in a subdir or use root + """ + + self.config_set('mds', 'mds_cache_memory_limit', "1K") + self.config_set('mds', 'mds_recall_max_caps', int(open_files/2)) + self.config_set('mds', 'mds_recall_warning_threshold', open_files) + + mds_min_caps_per_client = int(self.config_get('mds.a', "mds_min_caps_per_client")) + self.config_set('mds', 'mds_min_caps_working_set', mds_min_caps_per_client) + mds_recall_warning_decay_rate = float(self.config_get('mds.a', "mds_recall_warning_decay_rate")) + self.assertGreaterEqual(open_files, mds_min_caps_per_client) + + mount_a_client_id = self.mount_a.get_global_id() + path = "subdir" if use_subdir else "." + open_proc = self.mount_a.open_n_background(path, open_files) + + # Client should now hold: + # `open_files` caps for the open files + # 1 cap for root + # 1 cap for subdir + self.wait_until_equal(lambda: self.get_session(mount_a_client_id)['num_caps'], + open_files + (2 if use_subdir else 1), + timeout=600, + reject_fn=lambda x: x > open_files + 2) + + # MDS should not be happy about that, as the client is failing to comply + # with the SESSION_RECALL messages it is being sent + self.wait_for_health("MDS_CLIENT_RECALL", mds_recall_warning_decay_rate*2) + + # We can also test that the MDS health warning for oversized + # cache is functioning as intended. + self.wait_for_health("MDS_CACHE_OVERSIZED", mds_recall_warning_decay_rate*2) + + # When the client closes the files, it should retain only as many caps as allowed + # under the SESSION_RECALL policy + log.info("Terminating process holding files open") + open_proc.stdin.close() + try: + open_proc.wait() + except CommandFailedError: + # We killed it, so it raises an error + pass + + # The remaining caps should comply with the numbers sent from MDS in SESSION_RECALL message, + # which depend on the caps outstanding, cache size and overall ratio + def expected_caps(): + num_caps = self.get_session(mount_a_client_id)['num_caps'] + if num_caps <= mds_min_caps_per_client: + return True + else: + return False + + self.wait_until_true(expected_caps, timeout=60) + + @needs_trimming + def test_client_pin_root(self): + self._test_client_pin(False, 400) + + @needs_trimming + def test_client_pin(self): + self._test_client_pin(True, 800) + + @needs_trimming + def test_client_pin_mincaps(self): + self._test_client_pin(True, 200) + + def test_client_min_caps_working_set(self): + """ + When a client has inodes pinned in its cache (open files), that the MDS + will not warn about the client not responding to cache pressure when + the number of caps is below mds_min_caps_working_set. + """ + + # Set MDS cache memory limit to a low value that will make the MDS to + # ask the client to trim the caps. + cache_memory_limit = "1K" + open_files = 400 + + self.config_set('mds', 'mds_cache_memory_limit', cache_memory_limit) + self.config_set('mds', 'mds_recall_max_caps', int(open_files/2)) + self.config_set('mds', 'mds_recall_warning_threshold', open_files) + self.config_set('mds', 'mds_min_caps_working_set', open_files*2) + + mds_min_caps_per_client = int(self.config_get('mds.a', "mds_min_caps_per_client")) + mds_recall_warning_decay_rate = float(self.config_get('mds.a', "mds_recall_warning_decay_rate")) + self.assertGreaterEqual(open_files, mds_min_caps_per_client) + + mount_a_client_id = self.mount_a.get_global_id() + self.mount_a.open_n_background("subdir", open_files) + + # Client should now hold: + # `open_files` caps for the open files + # 1 cap for root + # 1 cap for subdir + self.wait_until_equal(lambda: self.get_session(mount_a_client_id)['num_caps'], + open_files + 2, + timeout=600, + reject_fn=lambda x: x > open_files + 2) + + # We can also test that the MDS health warning for oversized + # cache is functioning as intended. + self.wait_for_health("MDS_CACHE_OVERSIZED", mds_recall_warning_decay_rate*2) + + try: + # MDS should not be happy about that but it's not sending + # MDS_CLIENT_RECALL warnings because the client's caps are below + # mds_min_caps_working_set. + self.wait_for_health("MDS_CLIENT_RECALL", mds_recall_warning_decay_rate*2) + except TestTimeoutError: + pass + else: + raise RuntimeError("expected no client recall warning") + + def test_cap_acquisition_throttle_readdir(self): + """ + Mostly readdir acquires caps faster than the mds recalls, so the cap + acquisition via readdir is throttled by retrying the readdir after + a fraction of second (0.5) by default when throttling condition is met. + """ + + max_caps_per_client = 500 + cap_acquisition_throttle = 250 + + self.config_set('mds', 'mds_max_caps_per_client', max_caps_per_client) + self.config_set('mds', 'mds_session_cap_acquisition_throttle', cap_acquisition_throttle) + + # Create 1500 files split across 6 directories, 250 each. + for i in range(1, 7): + self.mount_a.create_n_files("dir{0}/file".format(i), cap_acquisition_throttle, sync=True) + + mount_a_client_id = self.mount_a.get_global_id() + + # recursive readdir + self.mount_a.run_shell_payload("find | wc") + + # validate cap_acquisition decay counter after readdir to exceed throttle count i.e 250 + cap_acquisition_value = self.get_session(mount_a_client_id)['cap_acquisition']['value'] + self.assertGreaterEqual(cap_acquisition_value, cap_acquisition_throttle) + + # validate the throttle condition to be hit atleast once + cap_acquisition_throttle_hit_count = self.perf_dump()['mds_server']['cap_acquisition_throttle'] + self.assertGreaterEqual(cap_acquisition_throttle_hit_count, 1) + + def test_client_release_bug(self): + """ + When a client has a bug (which we will simulate) preventing it from releasing caps, + the MDS should notice that releases are not being sent promptly, and generate a health + metric to that effect. + """ + + # The debug hook to inject the failure only exists in the fuse client + if not isinstance(self.mount_a, FuseMount): + raise SkipTest("Require FUSE client to inject client release failure") + + self.set_conf('client.{0}'.format(self.mount_a.client_id), 'client inject release failure', 'true') + self.mount_a.teardown() + self.mount_a.mount() + self.mount_a.wait_until_mounted() + mount_a_client_id = self.mount_a.get_global_id() + + # Client A creates a file. He will hold the write caps on the file, and later (simulated bug) fail + # to comply with the MDSs request to release that cap + self.mount_a.run_shell(["touch", "file1"]) + + # Client B tries to stat the file that client A created + rproc = self.mount_b.write_background("file1") + + # After session_timeout, we should see a health warning (extra lag from + # MDS beacon period) + session_timeout = self.fs.get_var("session_timeout") + self.wait_for_health("MDS_CLIENT_LATE_RELEASE", session_timeout + 10) + + # Client B should still be stuck + self.assertFalse(rproc.finished) + + # Kill client A + self.mount_a.kill() + self.mount_a.kill_cleanup() + + # Client B should complete + self.fs.mds_asok(['session', 'evict', "%s" % mount_a_client_id]) + rproc.wait() + + def test_client_oldest_tid(self): + """ + When a client does not advance its oldest tid, the MDS should notice that + and generate health warnings. + """ + + # num of requests client issues + max_requests = 1000 + + # The debug hook to inject the failure only exists in the fuse client + if not isinstance(self.mount_a, FuseMount): + raise SkipTest("Require FUSE client to inject client release failure") + + self.set_conf('client', 'client inject fixed oldest tid', 'true') + self.mount_a.teardown() + self.mount_a.mount() + self.mount_a.wait_until_mounted() + + self.fs.mds_asok(['config', 'set', 'mds_max_completed_requests', '{0}'.format(max_requests)]) + + # Create lots of files + self.mount_a.create_n_files("testdir/file1", max_requests + 100) + + # Create a few files synchronously. This makes sure previous requests are completed + self.mount_a.create_n_files("testdir/file2", 5, True) + + # Wait for the health warnings. Assume mds can handle 10 request per second at least + self.wait_for_health("MDS_CLIENT_OLDEST_TID", max_requests // 10) + + def _test_client_cache_size(self, mount_subdir): + """ + check if client invalidate kernel dcache according to its cache size config + """ + + # The debug hook to inject the failure only exists in the fuse client + if not isinstance(self.mount_a, FuseMount): + raise SkipTest("Require FUSE client to inject client release failure") + + if mount_subdir: + # fuse assigns a fix inode number (1) to root inode. But in mounting into + # subdir case, the actual inode number of root is not 1. This mismatch + # confuses fuse_lowlevel_notify_inval_entry() when invalidating dentries + # in root directory. + self.mount_a.run_shell(["mkdir", "subdir"]) + self.mount_a.umount_wait() + self.set_conf('client', 'client mountpoint', '/subdir') + self.mount_a.mount() + self.mount_a.wait_until_mounted() + root_ino = self.mount_a.path_to_ino(".") + self.assertEqual(root_ino, 1); + + dir_path = os.path.join(self.mount_a.mountpoint, "testdir") + + mkdir_script = dedent(""" + import os + os.mkdir("{path}") + for n in range(0, {num_dirs}): + os.mkdir("{path}/dir{{0}}".format(n)) + """) + + num_dirs = 1000 + self.mount_a.run_python(mkdir_script.format(path=dir_path, num_dirs=num_dirs)) + self.mount_a.run_shell(["sync"]) + + dentry_count, dentry_pinned_count = self.mount_a.get_dentry_count() + self.assertGreaterEqual(dentry_count, num_dirs) + self.assertGreaterEqual(dentry_pinned_count, num_dirs) + + cache_size = num_dirs // 10 + self.mount_a.set_cache_size(cache_size) + + def trimmed(): + dentry_count, dentry_pinned_count = self.mount_a.get_dentry_count() + log.info("waiting, dentry_count, dentry_pinned_count: {0}, {1}".format( + dentry_count, dentry_pinned_count + )) + if dentry_count > cache_size or dentry_pinned_count > cache_size: + return False + + return True + + self.wait_until_true(trimmed, 30) + + @needs_trimming + def test_client_cache_size(self): + self._test_client_cache_size(False) + self._test_client_cache_size(True) + + def test_client_max_caps(self): + """ + That the MDS will not let a client sit above mds_max_caps_per_client caps. + """ + + mds_min_caps_per_client = int(self.config_get('mds.a', "mds_min_caps_per_client")) + mds_max_caps_per_client = 2*mds_min_caps_per_client + self.config_set('mds', 'mds_max_caps_per_client', mds_max_caps_per_client) + + self.mount_a.create_n_files("foo/", 3*mds_max_caps_per_client, sync=True) + + mount_a_client_id = self.mount_a.get_global_id() + def expected_caps(): + num_caps = self.get_session(mount_a_client_id)['num_caps'] + if num_caps <= mds_max_caps_per_client: + return True + else: + return False + + self.wait_until_true(expected_caps, timeout=60) diff --git a/qa/tasks/cephfs/test_client_recovery.py b/qa/tasks/cephfs/test_client_recovery.py new file mode 100644 index 00000000..c7806b71 --- /dev/null +++ b/qa/tasks/cephfs/test_client_recovery.py @@ -0,0 +1,633 @@ + +""" +Teuthology task for exercising CephFS client recovery +""" + +import logging +from textwrap import dedent +import time +import distutils.version as version +import re +import os + +from teuthology.orchestra.run import CommandFailedError, ConnectionLostError +from tasks.cephfs.fuse_mount import FuseMount +from tasks.cephfs.cephfs_test_case import CephFSTestCase +from teuthology.packaging import get_package_version +from unittest import SkipTest + + +log = logging.getLogger(__name__) + + +# Arbitrary timeouts for operations involving restarting +# an MDS or waiting for it to come up +MDS_RESTART_GRACE = 60 + + +class TestClientNetworkRecovery(CephFSTestCase): + REQUIRE_KCLIENT_REMOTE = True + REQUIRE_ONE_CLIENT_REMOTE = True + CLIENTS_REQUIRED = 2 + + LOAD_SETTINGS = ["mds_reconnect_timeout", "ms_max_backoff"] + + # Environment references + mds_reconnect_timeout = None + ms_max_backoff = None + + def test_network_death(self): + """ + Simulate software freeze or temporary network failure. + + Check that the client blocks I/O during failure, and completes + I/O after failure. + """ + + session_timeout = self.fs.get_var("session_timeout") + self.fs.mds_asok(['config', 'set', 'mds_defer_session_stale', 'false']) + + # We only need one client + self.mount_b.umount_wait() + + # Initially our one client session should be visible + client_id = self.mount_a.get_global_id() + ls_data = self._session_list() + self.assert_session_count(1, ls_data) + self.assertEqual(ls_data[0]['id'], client_id) + self.assert_session_state(client_id, "open") + + # ...and capable of doing I/O without blocking + self.mount_a.create_files() + + # ...but if we turn off the network + self.fs.set_clients_block(True) + + # ...and try and start an I/O + write_blocked = self.mount_a.write_background() + + # ...then it should block + self.assertFalse(write_blocked.finished) + self.assert_session_state(client_id, "open") + time.sleep(session_timeout * 1.5) # Long enough for MDS to consider session stale + self.assertFalse(write_blocked.finished) + self.assert_session_state(client_id, "stale") + + # ...until we re-enable I/O + self.fs.set_clients_block(False) + + # ...when it should complete promptly + a = time.time() + self.wait_until_true(lambda: write_blocked.finished, self.ms_max_backoff * 2) + write_blocked.wait() # Already know we're finished, wait() to raise exception on errors + recovery_time = time.time() - a + log.info("recovery time: {0}".format(recovery_time)) + self.assert_session_state(client_id, "open") + + +class TestClientRecovery(CephFSTestCase): + REQUIRE_KCLIENT_REMOTE = True + CLIENTS_REQUIRED = 2 + + LOAD_SETTINGS = ["mds_reconnect_timeout", "ms_max_backoff"] + + # Environment references + mds_reconnect_timeout = None + ms_max_backoff = None + + def test_basic(self): + # Check that two clients come up healthy and see each others' files + # ===================================================== + self.mount_a.create_files() + self.mount_a.check_files() + self.mount_a.umount_wait() + + self.mount_b.check_files() + + self.mount_a.mount() + self.mount_a.wait_until_mounted() + + # Check that the admin socket interface is correctly reporting + # two sessions + # ===================================================== + ls_data = self._session_list() + self.assert_session_count(2, ls_data) + + self.assertSetEqual( + set([l['id'] for l in ls_data]), + {self.mount_a.get_global_id(), self.mount_b.get_global_id()} + ) + + def test_restart(self): + # Check that after an MDS restart both clients reconnect and continue + # to handle I/O + # ===================================================== + self.fs.mds_fail_restart() + self.fs.wait_for_state('up:active', timeout=MDS_RESTART_GRACE) + + self.mount_a.create_destroy() + self.mount_b.create_destroy() + + def _session_num_caps(self, client_id): + ls_data = self.fs.mds_asok(['session', 'ls']) + return int(self._session_by_id(ls_data).get(client_id, {'num_caps': None})['num_caps']) + + def test_reconnect_timeout(self): + # Reconnect timeout + # ================= + # Check that if I stop an MDS and a client goes away, the MDS waits + # for the reconnect period + self.fs.mds_stop() + self.fs.mds_fail() + + mount_a_client_id = self.mount_a.get_global_id() + self.mount_a.umount_wait(force=True) + + self.fs.mds_restart() + + self.fs.wait_for_state('up:reconnect', reject='up:active', timeout=MDS_RESTART_GRACE) + # Check that the MDS locally reports its state correctly + status = self.fs.mds_asok(['status']) + self.assertIn("reconnect_status", status) + + ls_data = self._session_list() + self.assert_session_count(2, ls_data) + + # The session for the dead client should have the 'reconnect' flag set + self.assertTrue(self.get_session(mount_a_client_id)['reconnecting']) + + # Wait for the reconnect state to clear, this should take the + # reconnect timeout period. + in_reconnect_for = self.fs.wait_for_state('up:active', timeout=self.mds_reconnect_timeout * 2) + # Check that the period we waited to enter active is within a factor + # of two of the reconnect timeout. + self.assertGreater(in_reconnect_for, self.mds_reconnect_timeout // 2, + "Should have been in reconnect phase for {0} but only took {1}".format( + self.mds_reconnect_timeout, in_reconnect_for + )) + + self.assert_session_count(1) + + # Check that the client that timed out during reconnect can + # mount again and do I/O + self.mount_a.mount() + self.mount_a.wait_until_mounted() + self.mount_a.create_destroy() + + self.assert_session_count(2) + + def test_reconnect_eviction(self): + # Eviction during reconnect + # ========================= + mount_a_client_id = self.mount_a.get_global_id() + + self.fs.mds_stop() + self.fs.mds_fail() + + # The mount goes away while the MDS is offline + self.mount_a.kill() + + # wait for it to die + time.sleep(5) + + self.fs.mds_restart() + + # Enter reconnect phase + self.fs.wait_for_state('up:reconnect', reject='up:active', timeout=MDS_RESTART_GRACE) + self.assert_session_count(2) + + # Evict the stuck client + self.fs.mds_asok(['session', 'evict', "%s" % mount_a_client_id]) + self.assert_session_count(1) + + # Observe that we proceed to active phase without waiting full reconnect timeout + evict_til_active = self.fs.wait_for_state('up:active', timeout=MDS_RESTART_GRACE) + # Once we evict the troublemaker, the reconnect phase should complete + # in well under the reconnect timeout. + self.assertLess(evict_til_active, self.mds_reconnect_timeout * 0.5, + "reconnect did not complete soon enough after eviction, took {0}".format( + evict_til_active + )) + + # We killed earlier so must clean up before trying to use again + self.mount_a.kill_cleanup() + + # Bring the client back + self.mount_a.mount() + self.mount_a.wait_until_mounted() + self.mount_a.create_destroy() + + def _test_stale_caps(self, write): + session_timeout = self.fs.get_var("session_timeout") + + # Capability release from stale session + # ===================================== + if write: + cap_holder = self.mount_a.open_background() + else: + self.mount_a.run_shell(["touch", "background_file"]) + self.mount_a.umount_wait() + self.mount_a.mount() + self.mount_a.wait_until_mounted() + cap_holder = self.mount_a.open_background(write=False) + + self.assert_session_count(2) + mount_a_gid = self.mount_a.get_global_id() + + # Wait for the file to be visible from another client, indicating + # that mount_a has completed its network ops + self.mount_b.wait_for_visible() + + # Simulate client death + self.mount_a.kill() + + # wait for it to die so it doesn't voluntarily release buffer cap + time.sleep(5) + + try: + # Now, after session_timeout seconds, the waiter should + # complete their operation when the MDS marks the holder's + # session stale. + cap_waiter = self.mount_b.write_background() + a = time.time() + cap_waiter.wait() + b = time.time() + + # Should have succeeded + self.assertEqual(cap_waiter.exitstatus, 0) + + if write: + self.assert_session_count(1) + else: + self.assert_session_state(mount_a_gid, "stale") + + cap_waited = b - a + log.info("cap_waiter waited {0}s".format(cap_waited)) + self.assertTrue(session_timeout / 2.0 <= cap_waited <= session_timeout * 2.0, + "Capability handover took {0}, expected approx {1}".format( + cap_waited, session_timeout + )) + + cap_holder.stdin.close() + try: + cap_holder.wait() + except (CommandFailedError, ConnectionLostError): + # We killed it (and possibly its node), so it raises an error + pass + finally: + # teardown() doesn't quite handle this case cleanly, so help it out + self.mount_a.kill_cleanup() + + self.mount_a.mount() + self.mount_a.wait_until_mounted() + + def test_stale_read_caps(self): + self._test_stale_caps(False) + + def test_stale_write_caps(self): + self._test_stale_caps(True) + + def test_evicted_caps(self): + # Eviction while holding a capability + # =================================== + + session_timeout = self.fs.get_var("session_timeout") + + # Take out a write capability on a file on client A, + # and then immediately kill it. + cap_holder = self.mount_a.open_background() + mount_a_client_id = self.mount_a.get_global_id() + + # Wait for the file to be visible from another client, indicating + # that mount_a has completed its network ops + self.mount_b.wait_for_visible() + + # Simulate client death + self.mount_a.kill() + + # wait for it to die so it doesn't voluntarily release buffer cap + time.sleep(5) + + try: + # The waiter should get stuck waiting for the capability + # held on the MDS by the now-dead client A + cap_waiter = self.mount_b.write_background() + time.sleep(5) + self.assertFalse(cap_waiter.finished) + + self.fs.mds_asok(['session', 'evict', "%s" % mount_a_client_id]) + # Now, because I evicted the old holder of the capability, it should + # immediately get handed over to the waiter + a = time.time() + cap_waiter.wait() + b = time.time() + cap_waited = b - a + log.info("cap_waiter waited {0}s".format(cap_waited)) + # This is the check that it happened 'now' rather than waiting + # for the session timeout + self.assertLess(cap_waited, session_timeout / 2.0, + "Capability handover took {0}, expected less than {1}".format( + cap_waited, session_timeout / 2.0 + )) + + cap_holder.stdin.close() + try: + cap_holder.wait() + except (CommandFailedError, ConnectionLostError): + # We killed it (and possibly its node), so it raises an error + pass + finally: + self.mount_a.kill_cleanup() + + self.mount_a.mount() + self.mount_a.wait_until_mounted() + + def test_trim_caps(self): + # Trim capability when reconnecting MDS + # =================================== + + count = 500 + # Create lots of files + for i in range(count): + self.mount_a.run_shell(["touch", "f{0}".format(i)]) + + # Populate mount_b's cache + self.mount_b.run_shell(["ls", "-l"]) + + client_id = self.mount_b.get_global_id() + num_caps = self._session_num_caps(client_id) + self.assertGreaterEqual(num_caps, count) + + # Restart MDS. client should trim its cache when reconnecting to the MDS + self.fs.mds_fail_restart() + self.fs.wait_for_state('up:active', timeout=MDS_RESTART_GRACE) + + num_caps = self._session_num_caps(client_id) + self.assertLess(num_caps, count, + "should have less than {0} capabilities, have {1}".format( + count, num_caps + )) + + def _is_flockable(self): + a_version_str = get_package_version(self.mount_a.client_remote, "fuse") + b_version_str = get_package_version(self.mount_b.client_remote, "fuse") + flock_version_str = "2.9" + + version_regex = re.compile(r"[0-9\.]+") + a_result = version_regex.match(a_version_str) + self.assertTrue(a_result) + b_result = version_regex.match(b_version_str) + self.assertTrue(b_result) + a_version = version.StrictVersion(a_result.group()) + b_version = version.StrictVersion(b_result.group()) + flock_version=version.StrictVersion(flock_version_str) + + if (a_version >= flock_version and b_version >= flock_version): + log.info("flock locks are available") + return True + else: + log.info("not testing flock locks, machines have versions {av} and {bv}".format( + av=a_version_str,bv=b_version_str)) + return False + + def test_filelock(self): + """ + Check that file lock doesn't get lost after an MDS restart + """ + + flockable = self._is_flockable() + lock_holder = self.mount_a.lock_background(do_flock=flockable) + + self.mount_b.wait_for_visible("background_file-2") + self.mount_b.check_filelock(do_flock=flockable) + + self.fs.mds_fail_restart() + self.fs.wait_for_state('up:active', timeout=MDS_RESTART_GRACE) + + self.mount_b.check_filelock(do_flock=flockable) + + # Tear down the background process + lock_holder.stdin.close() + try: + lock_holder.wait() + except (CommandFailedError, ConnectionLostError): + # We killed it, so it raises an error + pass + + def test_filelock_eviction(self): + """ + Check that file lock held by evicted client is given to + waiting client. + """ + if not self._is_flockable(): + self.skipTest("flock is not available") + + lock_holder = self.mount_a.lock_background() + self.mount_b.wait_for_visible("background_file-2") + self.mount_b.check_filelock() + + lock_taker = self.mount_b.lock_and_release() + # Check the taker is waiting (doesn't get it immediately) + time.sleep(2) + self.assertFalse(lock_holder.finished) + self.assertFalse(lock_taker.finished) + + try: + mount_a_client_id = self.mount_a.get_global_id() + self.fs.mds_asok(['session', 'evict', "%s" % mount_a_client_id]) + + # Evicting mount_a should let mount_b's attempt to take the lock + # succeed + self.wait_until_true(lambda: lock_taker.finished, timeout=10) + finally: + # teardown() doesn't quite handle this case cleanly, so help it out + self.mount_a.kill() + self.mount_a.kill_cleanup() + + # Bring the client back + self.mount_a.mount() + self.mount_a.wait_until_mounted() + + def test_dir_fsync(self): + self._test_fsync(True); + + def test_create_fsync(self): + self._test_fsync(False); + + def _test_fsync(self, dirfsync): + """ + That calls to fsync guarantee visibility of metadata to another + client immediately after the fsyncing client dies. + """ + + # Leave this guy out until he's needed + self.mount_b.umount_wait() + + # Create dir + child dentry on client A, and fsync the dir + path = os.path.join(self.mount_a.mountpoint, "subdir") + self.mount_a.run_python( + dedent(""" + import os + import time + + path = "{path}" + + print("Starting creation...") + start = time.time() + + os.mkdir(path) + dfd = os.open(path, os.O_DIRECTORY) + + fd = open(os.path.join(path, "childfile"), "w") + print("Finished creation in {{0}}s".format(time.time() - start)) + + print("Starting fsync...") + start = time.time() + if {dirfsync}: + os.fsync(dfd) + else: + os.fsync(fd) + print("Finished fsync in {{0}}s".format(time.time() - start)) + """.format(path=path,dirfsync=str(dirfsync))) + ) + + # Immediately kill the MDS and then client A + self.fs.mds_stop() + self.fs.mds_fail() + self.mount_a.kill() + self.mount_a.kill_cleanup() + + # Restart the MDS. Wait for it to come up, it'll have to time out in clientreplay + self.fs.mds_restart() + log.info("Waiting for reconnect...") + self.fs.wait_for_state("up:reconnect") + log.info("Waiting for active...") + self.fs.wait_for_state("up:active", timeout=MDS_RESTART_GRACE + self.mds_reconnect_timeout) + log.info("Reached active...") + + # Is the child dentry visible from mount B? + self.mount_b.mount() + self.mount_b.wait_until_mounted() + self.mount_b.run_shell(["ls", "subdir/childfile"]) + + def test_unmount_for_evicted_client(self): + """Test if client hangs on unmount after evicting the client.""" + mount_a_client_id = self.mount_a.get_global_id() + self.fs.mds_asok(['session', 'evict', "%s" % mount_a_client_id]) + + self.mount_a.umount_wait(require_clean=True, timeout=30) + + def test_stale_renew(self): + if not isinstance(self.mount_a, FuseMount): + raise SkipTest("Require FUSE client to handle signal STOP/CONT") + + session_timeout = self.fs.get_var("session_timeout") + + self.mount_a.run_shell(["mkdir", "testdir"]) + self.mount_a.run_shell(["touch", "testdir/file1"]) + # populate readdir cache + self.mount_a.run_shell(["ls", "testdir"]) + self.mount_b.run_shell(["ls", "testdir"]) + + # check if readdir cache is effective + initial_readdirs = self.fs.mds_asok(['perf', 'dump', 'mds_server', 'req_readdir_latency']) + self.mount_b.run_shell(["ls", "testdir"]) + current_readdirs = self.fs.mds_asok(['perf', 'dump', 'mds_server', 'req_readdir_latency']) + self.assertEqual(current_readdirs, initial_readdirs); + + mount_b_gid = self.mount_b.get_global_id() + mount_b_pid = self.mount_b.get_client_pid() + # stop ceph-fuse process of mount_b + self.mount_b.client_remote.run(args=["sudo", "kill", "-STOP", mount_b_pid]) + + self.assert_session_state(mount_b_gid, "open") + time.sleep(session_timeout * 1.5) # Long enough for MDS to consider session stale + + self.mount_a.run_shell(["touch", "testdir/file2"]) + self.assert_session_state(mount_b_gid, "stale") + + # resume ceph-fuse process of mount_b + self.mount_b.client_remote.run(args=["sudo", "kill", "-CONT", mount_b_pid]) + # Is the new file visible from mount_b? (caps become invalid after session stale) + self.mount_b.run_shell(["ls", "testdir/file2"]) + + def test_abort_conn(self): + """ + Check that abort_conn() skips closing mds sessions. + """ + if not isinstance(self.mount_a, FuseMount): + raise SkipTest("Testing libcephfs function") + + self.fs.mds_asok(['config', 'set', 'mds_defer_session_stale', 'false']) + session_timeout = self.fs.get_var("session_timeout") + + self.mount_a.umount_wait() + self.mount_b.umount_wait() + + gid_str = self.mount_a.run_python(dedent(""" + import cephfs as libcephfs + cephfs = libcephfs.LibCephFS(conffile='') + cephfs.mount() + client_id = cephfs.get_instance_id() + cephfs.abort_conn() + print(client_id) + """) + ) + gid = int(gid_str); + + self.assert_session_state(gid, "open") + time.sleep(session_timeout * 1.5) # Long enough for MDS to consider session stale + self.assert_session_state(gid, "stale") + + def test_dont_mark_unresponsive_client_stale(self): + """ + Test that an unresponsive client holding caps is not marked stale or + evicted unless another clients wants its caps. + """ + if not isinstance(self.mount_a, FuseMount): + self.skipTest("Require FUSE client to handle signal STOP/CONT") + + # XXX: To conduct this test we need at least two clients since a + # single client is never evcited by MDS. + SESSION_TIMEOUT = 30 + SESSION_AUTOCLOSE = 50 + time_at_beg = time.time() + mount_a_gid = self.mount_a.get_global_id() + _ = self.mount_a.client_pid + self.fs.set_var('session_timeout', SESSION_TIMEOUT) + self.fs.set_var('session_autoclose', SESSION_AUTOCLOSE) + self.assert_session_count(2, self.fs.mds_asok(['session', 'ls'])) + + # test that client holding cap not required by any other client is not + # marked stale when it becomes unresponsive. + self.mount_a.run_shell(['mkdir', 'dir']) + self.mount_a.send_signal('sigstop') + time.sleep(SESSION_TIMEOUT + 2) + self.assert_session_state(mount_a_gid, "open") + + # test that other clients have to wait to get the caps from + # unresponsive client until session_autoclose. + self.mount_b.run_shell(['stat', 'dir']) + self.assert_session_count(1, self.fs.mds_asok(['session', 'ls'])) + self.assertLess(time.time(), time_at_beg + SESSION_AUTOCLOSE) + + self.mount_a.send_signal('sigcont') + + def test_config_session_timeout(self): + self.fs.mds_asok(['config', 'set', 'mds_defer_session_stale', 'false']) + session_timeout = self.fs.get_var("session_timeout") + mount_a_gid = self.mount_a.get_global_id() + + self.fs.mds_asok(['session', 'config', '%s' % mount_a_gid, 'timeout', '%s' % (session_timeout * 2)]) + + self.mount_a.kill(); + + self.assert_session_count(2) + + time.sleep(session_timeout * 1.5) + self.assert_session_state(mount_a_gid, "open") + + time.sleep(session_timeout) + self.assert_session_count(1) + + self.mount_a.kill_cleanup() diff --git a/qa/tasks/cephfs/test_damage.py b/qa/tasks/cephfs/test_damage.py new file mode 100644 index 00000000..d03e027e --- /dev/null +++ b/qa/tasks/cephfs/test_damage.py @@ -0,0 +1,569 @@ +import json +import logging +import errno +import re +from teuthology.contextutil import MaxWhileTries +from teuthology.exceptions import CommandFailedError +from teuthology.orchestra.run import wait +from tasks.cephfs.fuse_mount import FuseMount +from tasks.cephfs.cephfs_test_case import CephFSTestCase, for_teuthology + +DAMAGED_ON_START = "damaged_on_start" +DAMAGED_ON_LS = "damaged_on_ls" +CRASHED = "server crashed" +NO_DAMAGE = "no damage" +READONLY = "readonly" +FAILED_CLIENT = "client failed" +FAILED_SERVER = "server failed" + +# An EIO in response to a stat from the client +EIO_ON_LS = "eio" + +# An EIO, but nothing in damage table (not ever what we expect) +EIO_NO_DAMAGE = "eio without damage entry" + + +log = logging.getLogger(__name__) + + +class TestDamage(CephFSTestCase): + def _simple_workload_write(self): + self.mount_a.run_shell(["mkdir", "subdir"]) + self.mount_a.write_n_mb("subdir/sixmegs", 6) + return self.mount_a.stat("subdir/sixmegs") + + def is_marked_damaged(self, rank): + mds_map = self.fs.get_mds_map() + return rank in mds_map['damaged'] + + @for_teuthology #459s + def test_object_deletion(self): + """ + That the MDS has a clean 'damaged' response to loss of any single metadata object + """ + + self._simple_workload_write() + + # Hmm, actually it would be nice to permute whether the metadata pool + # state contains sessions or not, but for the moment close this session + # to avoid waiting through reconnect on every MDS start. + self.mount_a.umount_wait() + for mds_name in self.fs.get_active_names(): + self.fs.mds_asok(["flush", "journal"], mds_name) + + self.fs.mds_stop() + self.fs.mds_fail() + + self.fs.rados(['export', '/tmp/metadata.bin']) + + def is_ignored(obj_id, dentry=None): + """ + A filter to avoid redundantly mutating many similar objects (e.g. + stray dirfrags) or similar dentries (e.g. stray dir dentries) + """ + if re.match("60.\.00000000", obj_id) and obj_id != "600.00000000": + return True + + if dentry and obj_id == "100.00000000": + if re.match("stray.+_head", dentry) and dentry != "stray0_head": + return True + + return False + + def get_path(obj_id, dentry=None): + """ + What filesystem path does this object or dentry correspond to? i.e. + what should I poke to see EIO after damaging it? + """ + + if obj_id == "1.00000000" and dentry == "subdir_head": + return "./subdir" + elif obj_id == "10000000000.00000000" and dentry == "sixmegs_head": + return "./subdir/sixmegs" + + # None means ls will do an "ls -R" in hope of seeing some errors + return None + + objects = self.fs.rados(["ls"]).split("\n") + objects = [o for o in objects if not is_ignored(o)] + + # Find all objects with an OMAP header + omap_header_objs = [] + for o in objects: + header = self.fs.rados(["getomapheader", o]) + # The rados CLI wraps the header output in a hex-printed style + header_bytes = int(re.match("header \((.+) bytes\)", header).group(1)) + if header_bytes > 0: + omap_header_objs.append(o) + + # Find all OMAP key/vals + omap_keys = [] + for o in objects: + keys_str = self.fs.rados(["listomapkeys", o]) + if keys_str: + for key in keys_str.split("\n"): + if not is_ignored(o, key): + omap_keys.append((o, key)) + + # Find objects that have data in their bodies + data_objects = [] + for obj_id in objects: + stat_out = self.fs.rados(["stat", obj_id]) + size = int(re.match(".+, size (.+)$", stat_out).group(1)) + if size > 0: + data_objects.append(obj_id) + + # Define the various forms of damage we will inflict + class MetadataMutation(object): + def __init__(self, obj_id_, desc_, mutate_fn_, expectation_, ls_path=None): + self.obj_id = obj_id_ + self.desc = desc_ + self.mutate_fn = mutate_fn_ + self.expectation = expectation_ + if ls_path is None: + self.ls_path = "." + else: + self.ls_path = ls_path + + def __eq__(self, other): + return self.desc == other.desc + + def __hash__(self): + return hash(self.desc) + + junk = "deadbeef" * 10 + mutations = [] + + # Removals + for o in objects: + if o in [ + # JournalPointers are auto-replaced if missing (same path as upgrade) + "400.00000000", + # Missing dirfrags for non-system dirs result in empty directory + "10000000000.00000000", + # PurgeQueue is auto-created if not found on startup + "500.00000000", + # open file table is auto-created if not found on startup + "mds0_openfiles.0" + ]: + expectation = NO_DAMAGE + else: + expectation = DAMAGED_ON_START + + log.info("Expectation on rm '{0}' will be '{1}'".format( + o, expectation + )) + + mutations.append(MetadataMutation( + o, + "Delete {0}".format(o), + lambda o=o: self.fs.rados(["rm", o]), + expectation + )) + + # Blatant corruptions + for obj_id in data_objects: + if obj_id == "500.00000000": + # purge queue corruption results in read-only FS + mutations.append(MetadataMutation( + obj_id, + "Corrupt {0}".format(obj_id), + lambda o=obj_id: self.fs.rados(["put", o, "-"], stdin_data=junk), + READONLY + )) + else: + mutations.append(MetadataMutation( + obj_id, + "Corrupt {0}".format(obj_id), + lambda o=obj_id: self.fs.rados(["put", o, "-"], stdin_data=junk), + DAMAGED_ON_START + )) + + # Truncations + for o in data_objects: + if o == "500.00000000": + # The PurgeQueue is allowed to be empty: Journaler interprets + # an empty header object as an empty journal. + expectation = NO_DAMAGE + else: + expectation = DAMAGED_ON_START + + mutations.append( + MetadataMutation( + o, + "Truncate {0}".format(o), + lambda o=o: self.fs.rados(["truncate", o, "0"]), + expectation + )) + + # OMAP value corruptions + for o, k in omap_keys: + if o.startswith("100."): + # Anything in rank 0's 'mydir' + expectation = DAMAGED_ON_START + else: + expectation = EIO_ON_LS + + mutations.append( + MetadataMutation( + o, + "Corrupt omap key {0}:{1}".format(o, k), + lambda o=o,k=k: self.fs.rados(["setomapval", o, k, junk]), + expectation, + get_path(o, k) + ) + ) + + # OMAP header corruptions + for o in omap_header_objs: + if re.match("60.\.00000000", o) \ + or o in ["1.00000000", "100.00000000", "mds0_sessionmap"]: + expectation = DAMAGED_ON_START + else: + expectation = NO_DAMAGE + + log.info("Expectation on corrupt header '{0}' will be '{1}'".format( + o, expectation + )) + + mutations.append( + MetadataMutation( + o, + "Corrupt omap header on {0}".format(o), + lambda o=o: self.fs.rados(["setomapheader", o, junk]), + expectation + ) + ) + + results = {} + + for mutation in mutations: + log.info("Applying mutation '{0}'".format(mutation.desc)) + + # Reset MDS state + self.mount_a.umount_wait(force=True) + self.fs.mds_stop() + self.fs.mds_fail() + self.fs.mon_manager.raw_cluster_cmd('mds', 'repaired', '0') + + # Reset RADOS pool state + self.fs.rados(['import', '/tmp/metadata.bin']) + + # Inject the mutation + mutation.mutate_fn() + + # Try starting the MDS + self.fs.mds_restart() + + # How long we'll wait between starting a daemon and expecting + # it to make it through startup, and potentially declare itself + # damaged to the mon cluster. + startup_timeout = 60 + + if mutation.expectation not in (EIO_ON_LS, DAMAGED_ON_LS, NO_DAMAGE): + if mutation.expectation == DAMAGED_ON_START: + # The MDS may pass through active before making it to damaged + try: + self.wait_until_true(lambda: self.is_marked_damaged(0), startup_timeout) + except RuntimeError: + pass + + # Wait for MDS to either come up or go into damaged state + try: + self.wait_until_true(lambda: self.is_marked_damaged(0) or self.fs.are_daemons_healthy(), startup_timeout) + except RuntimeError: + crashed = False + # Didn't make it to healthy or damaged, did it crash? + for daemon_id, daemon in self.fs.mds_daemons.items(): + if daemon.proc and daemon.proc.finished: + crashed = True + log.error("Daemon {0} crashed!".format(daemon_id)) + daemon.proc = None # So that subsequent stop() doesn't raise error + if not crashed: + # Didn't go health, didn't go damaged, didn't crash, so what? + raise + else: + log.info("Result: Mutation '{0}' led to crash".format(mutation.desc)) + results[mutation] = CRASHED + continue + if self.is_marked_damaged(0): + log.info("Result: Mutation '{0}' led to DAMAGED state".format(mutation.desc)) + results[mutation] = DAMAGED_ON_START + continue + else: + log.info("Mutation '{0}' did not prevent MDS startup, attempting ls...".format(mutation.desc)) + else: + try: + self.wait_until_true(self.fs.are_daemons_healthy, 60) + except RuntimeError: + log.info("Result: Mutation '{0}' should have left us healthy, actually not.".format(mutation.desc)) + if self.is_marked_damaged(0): + results[mutation] = DAMAGED_ON_START + else: + results[mutation] = FAILED_SERVER + continue + log.info("Daemons came up after mutation '{0}', proceeding to ls".format(mutation.desc)) + + # MDS is up, should go damaged on ls or client mount + self.mount_a.mount() + self.mount_a.wait_until_mounted() + if mutation.ls_path == ".": + proc = self.mount_a.run_shell(["ls", "-R", mutation.ls_path], wait=False) + else: + proc = self.mount_a.stat(mutation.ls_path, wait=False) + + if mutation.expectation == DAMAGED_ON_LS: + try: + self.wait_until_true(lambda: self.is_marked_damaged(0), 60) + log.info("Result: Mutation '{0}' led to DAMAGED state after ls".format(mutation.desc)) + results[mutation] = DAMAGED_ON_LS + except RuntimeError: + if self.fs.are_daemons_healthy(): + log.error("Result: Failed to go damaged on mutation '{0}', actually went active".format( + mutation.desc)) + results[mutation] = NO_DAMAGE + else: + log.error("Result: Failed to go damaged on mutation '{0}'".format(mutation.desc)) + results[mutation] = FAILED_SERVER + elif mutation.expectation == READONLY: + proc = self.mount_a.run_shell(["mkdir", "foo"], wait=False) + try: + proc.wait() + except CommandFailedError: + stderr = proc.stderr.getvalue() + log.info(stderr) + if "Read-only file system".lower() in stderr.lower(): + pass + else: + raise + else: + try: + wait([proc], 20) + log.info("Result: Mutation '{0}' did not caused DAMAGED state".format(mutation.desc)) + results[mutation] = NO_DAMAGE + except MaxWhileTries: + log.info("Result: Failed to complete client IO on mutation '{0}'".format(mutation.desc)) + results[mutation] = FAILED_CLIENT + except CommandFailedError as e: + if e.exitstatus == errno.EIO: + log.info("Result: EIO on client") + results[mutation] = EIO_ON_LS + else: + log.info("Result: unexpected error {0} on client".format(e)) + results[mutation] = FAILED_CLIENT + + if mutation.expectation == EIO_ON_LS: + # EIOs mean something handled by DamageTable: assert that it has + # been populated + damage = json.loads( + self.fs.mon_manager.raw_cluster_cmd( + 'tell', 'mds.{0}'.format(self.fs.get_active_names()[0]), "damage", "ls", '--format=json-pretty')) + if len(damage) == 0: + results[mutation] = EIO_NO_DAMAGE + + failures = [(mutation, result) for (mutation, result) in results.items() if mutation.expectation != result] + if failures: + log.error("{0} mutations had unexpected outcomes:".format(len(failures))) + for mutation, result in failures: + log.error(" Expected '{0}' actually '{1}' from '{2}'".format( + mutation.expectation, result, mutation.desc + )) + raise RuntimeError("{0} mutations had unexpected outcomes".format(len(failures))) + else: + log.info("All {0} mutations had expected outcomes".format(len(mutations))) + + def test_damaged_dentry(self): + # Damage to dentrys is interesting because it leaves the + # directory's `complete` flag in a subtle state where + # we have marked the dir complete in order that folks + # can access it, but in actual fact there is a dentry + # missing + self.mount_a.run_shell(["mkdir", "subdir/"]) + + self.mount_a.run_shell(["touch", "subdir/file_undamaged"]) + self.mount_a.run_shell(["touch", "subdir/file_to_be_damaged"]) + + subdir_ino = self.mount_a.path_to_ino("subdir") + + self.mount_a.umount_wait() + for mds_name in self.fs.get_active_names(): + self.fs.mds_asok(["flush", "journal"], mds_name) + + self.fs.mds_stop() + self.fs.mds_fail() + + # Corrupt a dentry + junk = "deadbeef" * 10 + dirfrag_obj = "{0:x}.00000000".format(subdir_ino) + self.fs.rados(["setomapval", dirfrag_obj, "file_to_be_damaged_head", junk]) + + # Start up and try to list it + self.fs.mds_restart() + self.fs.wait_for_daemons() + + self.mount_a.mount() + self.mount_a.wait_until_mounted() + dentries = self.mount_a.ls("subdir/") + + # The damaged guy should have disappeared + self.assertEqual(dentries, ["file_undamaged"]) + + # I should get ENOENT if I try and read it normally, because + # the dir is considered complete + try: + self.mount_a.stat("subdir/file_to_be_damaged", wait=True) + except CommandFailedError as e: + self.assertEqual(e.exitstatus, errno.ENOENT) + else: + raise AssertionError("Expected ENOENT") + + # The fact that there is damaged should have bee recorded + damage = json.loads( + self.fs.mon_manager.raw_cluster_cmd( + 'tell', 'mds.{0}'.format(self.fs.get_active_names()[0]), + "damage", "ls", '--format=json-pretty')) + self.assertEqual(len(damage), 1) + damage_id = damage[0]['id'] + + # If I try to create a dentry with the same name as the damaged guy + # then that should be forbidden + try: + self.mount_a.touch("subdir/file_to_be_damaged") + except CommandFailedError as e: + self.assertEqual(e.exitstatus, errno.EIO) + else: + raise AssertionError("Expected EIO") + + # Attempting that touch will clear the client's complete flag, now + # when I stat it I'll get EIO instead of ENOENT + try: + self.mount_a.stat("subdir/file_to_be_damaged", wait=True) + except CommandFailedError as e: + if isinstance(self.mount_a, FuseMount): + self.assertEqual(e.exitstatus, errno.EIO) + else: + # Kernel client handles this case differently + self.assertEqual(e.exitstatus, errno.ENOENT) + else: + raise AssertionError("Expected EIO") + + nfiles = self.mount_a.getfattr("./subdir", "ceph.dir.files") + self.assertEqual(nfiles, "2") + + self.mount_a.umount_wait() + + # Now repair the stats + scrub_json = self.fs.rank_tell(["scrub", "start", "/subdir", "repair"]) + log.info(json.dumps(scrub_json, indent=2)) + + self.assertEqual(scrub_json["passed_validation"], False) + self.assertEqual(scrub_json["raw_stats"]["checked"], True) + self.assertEqual(scrub_json["raw_stats"]["passed"], False) + + # Check that the file count is now correct + self.mount_a.mount() + self.mount_a.wait_until_mounted() + nfiles = self.mount_a.getfattr("./subdir", "ceph.dir.files") + self.assertEqual(nfiles, "1") + + # Clean up the omap object + self.fs.rados(["setomapval", dirfrag_obj, "file_to_be_damaged_head", junk]) + + # Clean up the damagetable entry + self.fs.mon_manager.raw_cluster_cmd( + 'tell', 'mds.{0}'.format(self.fs.get_active_names()[0]), + "damage", "rm", "{did}".format(did=damage_id)) + + # Now I should be able to create a file with the same name as the + # damaged guy if I want. + self.mount_a.touch("subdir/file_to_be_damaged") + + def test_open_ino_errors(self): + """ + That errors encountered during opening inos are properly propagated + """ + + self.mount_a.run_shell(["mkdir", "dir1"]) + self.mount_a.run_shell(["touch", "dir1/file1"]) + self.mount_a.run_shell(["mkdir", "dir2"]) + self.mount_a.run_shell(["touch", "dir2/file2"]) + self.mount_a.run_shell(["mkdir", "testdir"]) + self.mount_a.run_shell(["ln", "dir1/file1", "testdir/hardlink1"]) + self.mount_a.run_shell(["ln", "dir2/file2", "testdir/hardlink2"]) + + file1_ino = self.mount_a.path_to_ino("dir1/file1") + file2_ino = self.mount_a.path_to_ino("dir2/file2") + dir2_ino = self.mount_a.path_to_ino("dir2") + + # Ensure everything is written to backing store + self.mount_a.umount_wait() + self.fs.mds_asok(["flush", "journal"]) + + # Drop everything from the MDS cache + self.mds_cluster.mds_stop() + self.fs.journal_tool(['journal', 'reset'], 0) + self.mds_cluster.mds_fail_restart() + self.fs.wait_for_daemons() + + self.mount_a.mount() + + # Case 1: un-decodeable backtrace + + # Validate that the backtrace is present and decodable + self.fs.read_backtrace(file1_ino) + # Go corrupt the backtrace of alpha/target (used for resolving + # bravo/hardlink). + self.fs._write_data_xattr(file1_ino, "parent", "rhubarb") + + # Check that touching the hardlink gives EIO + ran = self.mount_a.run_shell(["stat", "testdir/hardlink1"], wait=False) + try: + ran.wait() + except CommandFailedError: + self.assertTrue("Input/output error" in ran.stderr.getvalue()) + + # Check that an entry is created in the damage table + damage = json.loads( + self.fs.mon_manager.raw_cluster_cmd( + 'tell', 'mds.{0}'.format(self.fs.get_active_names()[0]), + "damage", "ls", '--format=json-pretty')) + self.assertEqual(len(damage), 1) + self.assertEqual(damage[0]['damage_type'], "backtrace") + self.assertEqual(damage[0]['ino'], file1_ino) + + self.fs.mon_manager.raw_cluster_cmd( + 'tell', 'mds.{0}'.format(self.fs.get_active_names()[0]), + "damage", "rm", str(damage[0]['id'])) + + + # Case 2: missing dirfrag for the target inode + + self.fs.rados(["rm", "{0:x}.00000000".format(dir2_ino)]) + + # Check that touching the hardlink gives EIO + ran = self.mount_a.run_shell(["stat", "testdir/hardlink2"], wait=False) + try: + ran.wait() + except CommandFailedError: + self.assertTrue("Input/output error" in ran.stderr.getvalue()) + + # Check that an entry is created in the damage table + damage = json.loads( + self.fs.mon_manager.raw_cluster_cmd( + 'tell', 'mds.{0}'.format(self.fs.get_active_names()[0]), + "damage", "ls", '--format=json-pretty')) + self.assertEqual(len(damage), 2) + if damage[0]['damage_type'] == "backtrace" : + self.assertEqual(damage[0]['ino'], file2_ino) + self.assertEqual(damage[1]['damage_type'], "dir_frag") + self.assertEqual(damage[1]['ino'], dir2_ino) + else: + self.assertEqual(damage[0]['damage_type'], "dir_frag") + self.assertEqual(damage[0]['ino'], dir2_ino) + self.assertEqual(damage[1]['damage_type'], "backtrace") + self.assertEqual(damage[1]['ino'], file2_ino) + + for entry in damage: + self.fs.mon_manager.raw_cluster_cmd( + 'tell', 'mds.{0}'.format(self.fs.get_active_names()[0]), + "damage", "rm", str(entry['id'])) diff --git a/qa/tasks/cephfs/test_data_scan.py b/qa/tasks/cephfs/test_data_scan.py new file mode 100644 index 00000000..cbd5109a --- /dev/null +++ b/qa/tasks/cephfs/test_data_scan.py @@ -0,0 +1,695 @@ + +""" +Test our tools for recovering metadata from the data pool +""" +import json + +import logging +import os +import time +import traceback + +from io import BytesIO +from collections import namedtuple, defaultdict +from textwrap import dedent + +from teuthology.orchestra.run import CommandFailedError +from tasks.cephfs.cephfs_test_case import CephFSTestCase, for_teuthology + +log = logging.getLogger(__name__) + + +ValidationError = namedtuple("ValidationError", ["exception", "backtrace"]) + + +class Workload(object): + def __init__(self, filesystem, mount): + self._mount = mount + self._filesystem = filesystem + self._initial_state = None + + # Accumulate backtraces for every failed validation, and return them. Backtraces + # are rather verbose, but we only see them when something breaks, and they + # let us see which check failed without having to decorate each check with + # a string + self._errors = [] + + def assert_equal(self, a, b): + try: + if a != b: + raise AssertionError("{0} != {1}".format(a, b)) + except AssertionError as e: + self._errors.append( + ValidationError(e, traceback.format_exc(3)) + ) + + def write(self): + """ + Write the workload files to the mount + """ + raise NotImplementedError() + + def validate(self): + """ + Read from the mount and validate that the workload files are present (i.e. have + survived or been reconstructed from the test scenario) + """ + raise NotImplementedError() + + def damage(self): + """ + Damage the filesystem pools in ways that will be interesting to recover from. By + default just wipe everything in the metadata pool + """ + # Delete every object in the metadata pool + objects = self._filesystem.rados(["ls"]).split("\n") + for o in objects: + self._filesystem.rados(["rm", o]) + + def flush(self): + """ + Called after client unmount, after write: flush whatever you want + """ + self._filesystem.mds_asok(["flush", "journal"]) + + +class SimpleWorkload(Workload): + """ + Single file, single directory, check that it gets recovered and so does its size + """ + def write(self): + self._mount.run_shell(["mkdir", "subdir"]) + self._mount.write_n_mb("subdir/sixmegs", 6) + self._initial_state = self._mount.stat("subdir/sixmegs") + + def validate(self): + self._mount.run_shell(["ls", "subdir"]) + st = self._mount.stat("subdir/sixmegs") + self.assert_equal(st['st_size'], self._initial_state['st_size']) + return self._errors + + +class MovedFile(Workload): + def write(self): + # Create a file whose backtrace disagrees with his eventual position + # in the metadata. We will see that he gets reconstructed in his + # original position according to his backtrace. + self._mount.run_shell(["mkdir", "subdir_alpha"]) + self._mount.run_shell(["mkdir", "subdir_bravo"]) + self._mount.write_n_mb("subdir_alpha/sixmegs", 6) + self._filesystem.mds_asok(["flush", "journal"]) + self._mount.run_shell(["mv", "subdir_alpha/sixmegs", "subdir_bravo/sixmegs"]) + self._initial_state = self._mount.stat("subdir_bravo/sixmegs") + + def flush(self): + pass + + def validate(self): + self.assert_equal(self._mount.ls(), ["subdir_alpha"]) + st = self._mount.stat("subdir_alpha/sixmegs") + self.assert_equal(st['st_size'], self._initial_state['st_size']) + return self._errors + + +class BacktracelessFile(Workload): + def write(self): + self._mount.run_shell(["mkdir", "subdir"]) + self._mount.write_n_mb("subdir/sixmegs", 6) + self._initial_state = self._mount.stat("subdir/sixmegs") + + def flush(self): + # Never flush metadata, so backtrace won't be written + pass + + def validate(self): + ino_name = "%x" % self._initial_state["st_ino"] + + # The inode should be linked into lost+found because we had no path for it + self.assert_equal(self._mount.ls(), ["lost+found"]) + self.assert_equal(self._mount.ls("lost+found"), [ino_name]) + st = self._mount.stat("lost+found/{ino_name}".format(ino_name=ino_name)) + + # We might not have got the name or path, but we should still get the size + self.assert_equal(st['st_size'], self._initial_state['st_size']) + + return self._errors + + +class StripedStashedLayout(Workload): + def __init__(self, fs, m): + super(StripedStashedLayout, self).__init__(fs, m) + + # Nice small stripes so we can quickly do our writes+validates + self.sc = 4 + self.ss = 65536 + self.os = 262144 + + self.interesting_sizes = [ + # Exactly stripe_count objects will exist + self.os * self.sc, + # Fewer than stripe_count objects will exist + self.os * self.sc // 2, + self.os * (self.sc - 1) + self.os // 2, + self.os * (self.sc - 1) + self.os // 2 - 1, + self.os * (self.sc + 1) + self.os // 2, + self.os * (self.sc + 1) + self.os // 2 + 1, + # More than stripe_count objects will exist + self.os * self.sc + self.os * self.sc // 2 + ] + + def write(self): + # Create a dir with a striped layout set on it + self._mount.run_shell(["mkdir", "stripey"]) + + self._mount.setfattr("./stripey", "ceph.dir.layout", + "stripe_unit={ss} stripe_count={sc} object_size={os} pool={pool}".format( + ss=self.ss, os=self.os, sc=self.sc, + pool=self._filesystem.get_data_pool_name() + )) + + # Write files, then flush metadata so that its layout gets written into an xattr + for i, n_bytes in enumerate(self.interesting_sizes): + self._mount.write_test_pattern("stripey/flushed_file_{0}".format(i), n_bytes) + # This is really just validating the validator + self._mount.validate_test_pattern("stripey/flushed_file_{0}".format(i), n_bytes) + self._filesystem.mds_asok(["flush", "journal"]) + + # Write another file in the same way, but this time don't flush the metadata, + # so that it won't have the layout xattr + self._mount.write_test_pattern("stripey/unflushed_file", 1024 * 512) + self._mount.validate_test_pattern("stripey/unflushed_file", 1024 * 512) + + self._initial_state = { + "unflushed_ino": self._mount.path_to_ino("stripey/unflushed_file") + } + + def flush(self): + # Pass because we already selectively flushed during write + pass + + def validate(self): + # The first files should have been recovered into its original location + # with the correct layout: read back correct data + for i, n_bytes in enumerate(self.interesting_sizes): + try: + self._mount.validate_test_pattern("stripey/flushed_file_{0}".format(i), n_bytes) + except CommandFailedError as e: + self._errors.append( + ValidationError("File {0} (size {1}): {2}".format(i, n_bytes, e), traceback.format_exc(3)) + ) + + # The unflushed file should have been recovered into lost+found without + # the correct layout: read back junk + ino_name = "%x" % self._initial_state["unflushed_ino"] + self.assert_equal(self._mount.ls("lost+found"), [ino_name]) + try: + self._mount.validate_test_pattern(os.path.join("lost+found", ino_name), 1024 * 512) + except CommandFailedError: + pass + else: + self._errors.append( + ValidationError("Unexpectedly valid data in unflushed striped file", "") + ) + + return self._errors + + +class ManyFilesWorkload(Workload): + def __init__(self, filesystem, mount, file_count): + super(ManyFilesWorkload, self).__init__(filesystem, mount) + self.file_count = file_count + + def write(self): + self._mount.run_shell(["mkdir", "subdir"]) + for n in range(0, self.file_count): + self._mount.write_test_pattern("subdir/{0}".format(n), 6 * 1024 * 1024) + + def validate(self): + for n in range(0, self.file_count): + try: + self._mount.validate_test_pattern("subdir/{0}".format(n), 6 * 1024 * 1024) + except CommandFailedError as e: + self._errors.append( + ValidationError("File {0}: {1}".format(n, e), traceback.format_exc(3)) + ) + + return self._errors + + +class MovedDir(Workload): + def write(self): + # Create a nested dir that we will then move. Two files with two different + # backtraces referring to the moved dir, claiming two different locations for + # it. We will see that only one backtrace wins and the dir ends up with + # single linkage. + self._mount.run_shell(["mkdir", "-p", "grandmother/parent"]) + self._mount.write_n_mb("grandmother/parent/orig_pos_file", 1) + self._filesystem.mds_asok(["flush", "journal"]) + self._mount.run_shell(["mkdir", "grandfather"]) + self._mount.run_shell(["mv", "grandmother/parent", "grandfather"]) + self._mount.write_n_mb("grandfather/parent/new_pos_file", 2) + self._filesystem.mds_asok(["flush", "journal"]) + + self._initial_state = ( + self._mount.stat("grandfather/parent/orig_pos_file"), + self._mount.stat("grandfather/parent/new_pos_file") + ) + + def validate(self): + root_files = self._mount.ls() + self.assert_equal(len(root_files), 1) + self.assert_equal(root_files[0] in ["grandfather", "grandmother"], True) + winner = root_files[0] + st_opf = self._mount.stat("{0}/parent/orig_pos_file".format(winner)) + st_npf = self._mount.stat("{0}/parent/new_pos_file".format(winner)) + + self.assert_equal(st_opf['st_size'], self._initial_state[0]['st_size']) + self.assert_equal(st_npf['st_size'], self._initial_state[1]['st_size']) + + +class MissingZerothObject(Workload): + def write(self): + self._mount.run_shell(["mkdir", "subdir"]) + self._mount.write_n_mb("subdir/sixmegs", 6) + self._initial_state = self._mount.stat("subdir/sixmegs") + + def damage(self): + super(MissingZerothObject, self).damage() + zeroth_id = "{0:x}.00000000".format(self._initial_state['st_ino']) + self._filesystem.rados(["rm", zeroth_id], pool=self._filesystem.get_data_pool_name()) + + def validate(self): + st = self._mount.stat("lost+found/{0:x}".format(self._initial_state['st_ino'])) + self.assert_equal(st['st_size'], self._initial_state['st_size']) + + +class NonDefaultLayout(Workload): + """ + Check that the reconstruction copes with files that have a different + object size in their layout + """ + def write(self): + self._mount.run_shell(["touch", "datafile"]) + self._mount.setfattr("./datafile", "ceph.file.layout.object_size", "8388608") + self._mount.run_shell(["dd", "if=/dev/urandom", "of=./datafile", "bs=1M", "count=32"]) + self._initial_state = self._mount.stat("datafile") + + def validate(self): + # Check we got the layout reconstructed properly + object_size = int(self._mount.getfattr( + "./datafile", "ceph.file.layout.object_size")) + self.assert_equal(object_size, 8388608) + + # Check we got the file size reconstructed properly + st = self._mount.stat("datafile") + self.assert_equal(st['st_size'], self._initial_state['st_size']) + + +class TestDataScan(CephFSTestCase): + MDSS_REQUIRED = 2 + + def is_marked_damaged(self, rank): + mds_map = self.fs.get_mds_map() + return rank in mds_map['damaged'] + + def _rebuild_metadata(self, workload, workers=1): + """ + That when all objects in metadata pool are removed, we can rebuild a metadata pool + based on the contents of a data pool, and a client can see and read our files. + """ + + # First, inject some files + + workload.write() + + # Unmount the client and flush the journal: the tool should also cope with + # situations where there is dirty metadata, but we'll test that separately + self.mount_a.umount_wait() + workload.flush() + + # Stop the MDS + self.fs.mds_stop() + self.fs.mds_fail() + + # After recovery, we need the MDS to not be strict about stats (in production these options + # are off by default, but in QA we need to explicitly disable them) + self.fs.set_ceph_conf('mds', 'mds verify scatter', False) + self.fs.set_ceph_conf('mds', 'mds debug scatterstat', False) + + # Apply any data damage the workload wants + workload.damage() + + # Reset the MDS map in case multiple ranks were in play: recovery procedure + # only understands how to rebuild metadata under rank 0 + self.fs.mon_manager.raw_cluster_cmd('fs', 'reset', self.fs.name, + '--yes-i-really-mean-it') + + self.fs.mds_restart() + + def get_state(mds_id): + info = self.mds_cluster.get_mds_info(mds_id) + return info['state'] if info is not None else None + + self.wait_until_true(lambda: self.is_marked_damaged(0), 60) + for mds_id in self.fs.mds_ids: + self.wait_until_equal( + lambda: get_state(mds_id), + "up:standby", + timeout=60) + + self.fs.table_tool([self.fs.name + ":0", "reset", "session"]) + self.fs.table_tool([self.fs.name + ":0", "reset", "snap"]) + self.fs.table_tool([self.fs.name + ":0", "reset", "inode"]) + + # Run the recovery procedure + if False: + with self.assertRaises(CommandFailedError): + # Normal reset should fail when no objects are present, we'll use --force instead + self.fs.journal_tool(["journal", "reset"], 0) + + self.fs.journal_tool(["journal", "reset", "--force"], 0) + self.fs.data_scan(["init"]) + self.fs.data_scan(["scan_extents", self.fs.get_data_pool_name()], worker_count=workers) + self.fs.data_scan(["scan_inodes", self.fs.get_data_pool_name()], worker_count=workers) + + # Mark the MDS repaired + self.fs.mon_manager.raw_cluster_cmd('mds', 'repaired', '0') + + # Start the MDS + self.fs.mds_restart() + self.fs.wait_for_daemons() + log.info(str(self.mds_cluster.status())) + + # Mount a client + self.mount_a.mount() + self.mount_a.wait_until_mounted() + + # See that the files are present and correct + errors = workload.validate() + if errors: + log.error("Validation errors found: {0}".format(len(errors))) + for e in errors: + log.error(e.exception) + log.error(e.backtrace) + raise AssertionError("Validation failed, first error: {0}\n{1}".format( + errors[0].exception, errors[0].backtrace + )) + + def test_rebuild_simple(self): + self._rebuild_metadata(SimpleWorkload(self.fs, self.mount_a)) + + def test_rebuild_moved_file(self): + self._rebuild_metadata(MovedFile(self.fs, self.mount_a)) + + def test_rebuild_backtraceless(self): + self._rebuild_metadata(BacktracelessFile(self.fs, self.mount_a)) + + def test_rebuild_moved_dir(self): + self._rebuild_metadata(MovedDir(self.fs, self.mount_a)) + + def test_rebuild_missing_zeroth(self): + self._rebuild_metadata(MissingZerothObject(self.fs, self.mount_a)) + + def test_rebuild_nondefault_layout(self): + self._rebuild_metadata(NonDefaultLayout(self.fs, self.mount_a)) + + def test_stashed_layout(self): + self._rebuild_metadata(StripedStashedLayout(self.fs, self.mount_a)) + + def _dirfrag_keys(self, object_id): + keys_str = self.fs.rados(["listomapkeys", object_id]) + if keys_str: + return keys_str.split("\n") + else: + return [] + + def test_fragmented_injection(self): + """ + That when injecting a dentry into a fragmented directory, we put it in the right fragment. + """ + + file_count = 100 + file_names = ["%s" % n for n in range(0, file_count)] + + # Make sure and disable dirfrag auto merging and splitting + self.fs.set_ceph_conf('mds', 'mds bal merge size', 0) + self.fs.set_ceph_conf('mds', 'mds bal split size', 100 * file_count) + + # Create a directory of `file_count` files, each named after its + # decimal number and containing the string of its decimal number + self.mount_a.run_python(dedent(""" + import os + path = os.path.join("{path}", "subdir") + os.mkdir(path) + for n in range(0, {file_count}): + open(os.path.join(path, "%s" % n), 'w').write("%s" % n) + """.format( + path=self.mount_a.mountpoint, + file_count=file_count + ))) + + dir_ino = self.mount_a.path_to_ino("subdir") + + # Only one MDS should be active! + self.assertEqual(len(self.fs.get_active_names()), 1) + + # Ensure that one directory is fragmented + mds_id = self.fs.get_active_names()[0] + self.fs.mds_asok(["dirfrag", "split", "/subdir", "0/0", "1"], mds_id) + + # Flush journal and stop MDS + self.mount_a.umount_wait() + self.fs.mds_asok(["flush", "journal"], mds_id) + self.fs.mds_stop() + self.fs.mds_fail() + + # Pick a dentry and wipe out its key + # Because I did a 1 bit split, I know one frag will be named .01000000 + frag_obj_id = "{0:x}.01000000".format(dir_ino) + keys = self._dirfrag_keys(frag_obj_id) + victim_key = keys[7] # arbitrary choice + log.info("victim_key={0}".format(victim_key)) + victim_dentry = victim_key.split("_head")[0] + self.fs.rados(["rmomapkey", frag_obj_id, victim_key]) + + # Start filesystem back up, observe that the file appears to be gone in an `ls` + self.fs.mds_restart() + self.fs.wait_for_daemons() + self.mount_a.mount() + self.mount_a.wait_until_mounted() + files = self.mount_a.run_shell(["ls", "subdir/"]).stdout.getvalue().strip().split("\n") + self.assertListEqual(sorted(files), sorted(list(set(file_names) - set([victim_dentry])))) + + # Stop the filesystem + self.mount_a.umount_wait() + self.fs.mds_stop() + self.fs.mds_fail() + + # Run data-scan, observe that it inserts our dentry back into the correct fragment + # by checking the omap now has the dentry's key again + self.fs.data_scan(["scan_extents", self.fs.get_data_pool_name()]) + self.fs.data_scan(["scan_inodes", self.fs.get_data_pool_name()]) + self.fs.data_scan(["scan_links"]) + self.assertIn(victim_key, self._dirfrag_keys(frag_obj_id)) + + # Start the filesystem and check that the dentry we deleted is now once again visible + # and points to the correct file data. + self.fs.mds_restart() + self.fs.wait_for_daemons() + self.mount_a.mount() + self.mount_a.wait_until_mounted() + out = self.mount_a.run_shell(["cat", "subdir/{0}".format(victim_dentry)]).stdout.getvalue().strip() + self.assertEqual(out, victim_dentry) + + # Finally, close the loop by checking our injected dentry survives a merge + mds_id = self.fs.get_active_names()[0] + self.mount_a.ls("subdir") # Do an ls to ensure both frags are in cache so the merge will work + self.fs.mds_asok(["dirfrag", "merge", "/subdir", "0/0"], mds_id) + self.fs.mds_asok(["flush", "journal"], mds_id) + frag_obj_id = "{0:x}.00000000".format(dir_ino) + keys = self._dirfrag_keys(frag_obj_id) + self.assertListEqual(sorted(keys), sorted(["%s_head" % f for f in file_names])) + + # run scrub to update and make sure rstat.rbytes info in subdir inode and dirfrag + # are matched + out_json = self.fs.rank_tell(["scrub", "start", "/subdir", "repair", "recursive"]) + self.assertNotEqual(out_json, None) + + # Remove the whole 'sudbdir' directory + self.mount_a.run_shell(["rm", "-rf", "subdir/"]) + + @for_teuthology + def test_parallel_execution(self): + self._rebuild_metadata(ManyFilesWorkload(self.fs, self.mount_a, 25), workers=7) + + def test_pg_files(self): + """ + That the pg files command tells us which files are associated with + a particular PG + """ + file_count = 20 + self.mount_a.run_shell(["mkdir", "mydir"]) + self.mount_a.create_n_files("mydir/myfile", file_count) + + # Some files elsewhere in the system that we will ignore + # to check that the tool is filtering properly + self.mount_a.run_shell(["mkdir", "otherdir"]) + self.mount_a.create_n_files("otherdir/otherfile", file_count) + + pgs_to_files = defaultdict(list) + # Rough (slow) reimplementation of the logic + for i in range(0, file_count): + file_path = "mydir/myfile_{0}".format(i) + ino = self.mount_a.path_to_ino(file_path) + obj = "{0:x}.{1:08x}".format(ino, 0) + pgid = json.loads(self.fs.mon_manager.raw_cluster_cmd( + "osd", "map", self.fs.get_data_pool_name(), obj, + "--format=json-pretty" + ))['pgid'] + pgs_to_files[pgid].append(file_path) + log.info("{0}: {1}".format(file_path, pgid)) + + pg_count = self.fs.pgs_per_fs_pool + for pg_n in range(0, pg_count): + pg_str = "{0}.{1}".format(self.fs.get_data_pool_id(), pg_n) + out = self.fs.data_scan(["pg_files", "mydir", pg_str]) + lines = [l for l in out.split("\n") if l] + log.info("{0}: {1}".format(pg_str, lines)) + self.assertSetEqual(set(lines), set(pgs_to_files[pg_str])) + + def test_rebuild_linkage(self): + """ + The scan_links command fixes linkage errors + """ + self.mount_a.run_shell(["mkdir", "testdir1"]) + self.mount_a.run_shell(["mkdir", "testdir2"]) + dir1_ino = self.mount_a.path_to_ino("testdir1") + dir2_ino = self.mount_a.path_to_ino("testdir2") + dirfrag1_oid = "{0:x}.00000000".format(dir1_ino) + dirfrag2_oid = "{0:x}.00000000".format(dir2_ino) + + self.mount_a.run_shell(["touch", "testdir1/file1"]) + self.mount_a.run_shell(["ln", "testdir1/file1", "testdir1/link1"]) + self.mount_a.run_shell(["ln", "testdir1/file1", "testdir2/link2"]) + + mds_id = self.fs.get_active_names()[0] + self.fs.mds_asok(["flush", "journal"], mds_id) + + dirfrag1_keys = self._dirfrag_keys(dirfrag1_oid) + + # introduce duplicated primary link + file1_key = "file1_head" + self.assertIn(file1_key, dirfrag1_keys) + file1_omap_data = self.fs.rados(["getomapval", dirfrag1_oid, file1_key, '-'], + stdout_data=BytesIO()) + self.fs.rados(["setomapval", dirfrag2_oid, file1_key], stdin_data=file1_omap_data) + self.assertIn(file1_key, self._dirfrag_keys(dirfrag2_oid)) + + # remove a remote link, make inode link count incorrect + link1_key = 'link1_head' + self.assertIn(link1_key, dirfrag1_keys) + self.fs.rados(["rmomapkey", dirfrag1_oid, link1_key]) + + # increase good primary link's version + self.mount_a.run_shell(["touch", "testdir1/file1"]) + self.mount_a.umount_wait() + + self.fs.mds_asok(["flush", "journal"], mds_id) + self.fs.mds_stop() + self.fs.mds_fail() + + # repair linkage errors + self.fs.data_scan(["scan_links"]) + + # primary link in testdir2 was deleted? + self.assertNotIn(file1_key, self._dirfrag_keys(dirfrag2_oid)) + + self.fs.mds_restart() + self.fs.wait_for_daemons() + + self.mount_a.mount() + self.mount_a.wait_until_mounted() + + # link count was adjusted? + file1_nlink = self.mount_a.path_to_nlink("testdir1/file1") + self.assertEqual(file1_nlink, 2) + + def test_rebuild_inotable(self): + """ + The scan_links command repair inotables + """ + self.fs.set_max_mds(2) + self.fs.wait_for_daemons() + + active_mds_names = self.fs.get_active_names() + mds0_id = active_mds_names[0] + mds1_id = active_mds_names[1] + + self.mount_a.run_shell(["mkdir", "dir1"]) + dir_ino = self.mount_a.path_to_ino("dir1") + self.mount_a.setfattr("dir1", "ceph.dir.pin", "1") + # wait for subtree migration + + file_ino = 0; + while True: + time.sleep(1) + # allocate an inode from mds.1 + self.mount_a.run_shell(["touch", "dir1/file1"]) + file_ino = self.mount_a.path_to_ino("dir1/file1") + if file_ino >= (2 << 40): + break + self.mount_a.run_shell(["rm", "-f", "dir1/file1"]) + + self.mount_a.umount_wait() + + self.fs.mds_asok(["flush", "journal"], mds0_id) + self.fs.mds_asok(["flush", "journal"], mds1_id) + self.mds_cluster.mds_stop() + + self.fs.rados(["rm", "mds0_inotable"]) + self.fs.rados(["rm", "mds1_inotable"]) + + self.fs.data_scan(["scan_links", "--filesystem", self.fs.name]) + + mds0_inotable = json.loads(self.fs.table_tool([self.fs.name + ":0", "show", "inode"])) + self.assertGreaterEqual( + mds0_inotable['0']['data']['inotable']['free'][0]['start'], dir_ino) + + mds1_inotable = json.loads(self.fs.table_tool([self.fs.name + ":1", "show", "inode"])) + self.assertGreaterEqual( + mds1_inotable['1']['data']['inotable']['free'][0]['start'], file_ino) + + def test_rebuild_snaptable(self): + """ + The scan_links command repair snaptable + """ + self.fs.set_allow_new_snaps(True) + + self.mount_a.run_shell(["mkdir", "dir1"]) + self.mount_a.run_shell(["mkdir", "dir1/.snap/s1"]) + self.mount_a.run_shell(["mkdir", "dir1/.snap/s2"]) + self.mount_a.run_shell(["rmdir", "dir1/.snap/s2"]) + + self.mount_a.umount_wait() + + mds0_id = self.fs.get_active_names()[0] + self.fs.mds_asok(["flush", "journal"], mds0_id) + + # wait for mds to update removed snaps + time.sleep(10) + + old_snaptable = json.loads(self.fs.table_tool([self.fs.name + ":0", "show", "snap"])) + # stamps may have minor difference + for item in old_snaptable['snapserver']['snaps']: + del item['stamp'] + + self.fs.rados(["rm", "mds_snaptable"]) + self.fs.data_scan(["scan_links", "--filesystem", self.fs.name]) + + new_snaptable = json.loads(self.fs.table_tool([self.fs.name + ":0", "show", "snap"])) + for item in new_snaptable['snapserver']['snaps']: + del item['stamp'] + self.assertGreaterEqual( + new_snaptable['snapserver']['last_snap'], old_snaptable['snapserver']['last_snap']) + self.assertEqual( + new_snaptable['snapserver']['snaps'], old_snaptable['snapserver']['snaps']) diff --git a/qa/tasks/cephfs/test_dump_tree.py b/qa/tasks/cephfs/test_dump_tree.py new file mode 100644 index 00000000..48a2c6f0 --- /dev/null +++ b/qa/tasks/cephfs/test_dump_tree.py @@ -0,0 +1,66 @@ +from tasks.cephfs.cephfs_test_case import CephFSTestCase +import random +import os + +class TestDumpTree(CephFSTestCase): + def get_paths_to_ino(self): + inos = {} + p = self.mount_a.run_shell(["find", "./"]) + paths = p.stdout.getvalue().strip().split() + for path in paths: + inos[path] = self.mount_a.path_to_ino(path, False) + + return inos + + def populate(self): + self.mount_a.run_shell(["git", "clone", + "https://github.com/ceph/ceph-qa-suite"]) + + def test_basic(self): + self.mount_a.run_shell(["mkdir", "parent"]) + self.mount_a.run_shell(["mkdir", "parent/child"]) + self.mount_a.run_shell(["touch", "parent/child/file"]) + self.mount_a.run_shell(["mkdir", "parent/child/grandchild"]) + self.mount_a.run_shell(["touch", "parent/child/grandchild/file"]) + + inos = self.get_paths_to_ino() + tree = self.fs.mds_asok(["dump", "tree", "/parent/child", "1"]) + + target_inos = [inos["./parent/child"], inos["./parent/child/file"], + inos["./parent/child/grandchild"]] + + for ino in tree: + del target_inos[target_inos.index(ino['ino'])] # don't catch! + + assert(len(target_inos) == 0) + + def test_random(self): + random.seed(0) + + self.populate() + inos = self.get_paths_to_ino() + target = random.sample(inos.keys(), 1)[0] + + if target != "./": + target = os.path.dirname(target) + + subtree = [path for path in inos.keys() if path.startswith(target)] + target_inos = [inos[path] for path in subtree] + tree = self.fs.mds_asok(["dump", "tree", target[1:]]) + + for ino in tree: + del target_inos[target_inos.index(ino['ino'])] # don't catch! + + assert(len(target_inos) == 0) + + target_depth = target.count('/') + maxdepth = max([path.count('/') for path in subtree]) - target_depth + depth = random.randint(0, maxdepth) + target_inos = [inos[path] for path in subtree \ + if path.count('/') <= depth + target_depth] + tree = self.fs.mds_asok(["dump", "tree", target[1:], str(depth)]) + + for ino in tree: + del target_inos[target_inos.index(ino['ino'])] # don't catch! + + assert(len(target_inos) == 0) diff --git a/qa/tasks/cephfs/test_exports.py b/qa/tasks/cephfs/test_exports.py new file mode 100644 index 00000000..abaf92e6 --- /dev/null +++ b/qa/tasks/cephfs/test_exports.py @@ -0,0 +1,176 @@ +import logging +import time +from tasks.cephfs.fuse_mount import FuseMount +from tasks.cephfs.cephfs_test_case import CephFSTestCase + +log = logging.getLogger(__name__) + +class TestExports(CephFSTestCase): + MDSS_REQUIRED = 2 + CLIENTS_REQUIRED = 2 + + def test_export_pin(self): + self.fs.set_max_mds(2) + self.fs.wait_for_daemons() + + status = self.fs.status() + + self.mount_a.run_shell(["mkdir", "-p", "1/2/3"]) + self._wait_subtrees(status, 0, []) + + # NOP + self.mount_a.setfattr("1", "ceph.dir.pin", "-1") + self._wait_subtrees(status, 0, []) + + # NOP (rank < -1) + self.mount_a.setfattr("1", "ceph.dir.pin", "-2341") + self._wait_subtrees(status, 0, []) + + # pin /1 to rank 1 + self.mount_a.setfattr("1", "ceph.dir.pin", "1") + self._wait_subtrees(status, 1, [('/1', 1)]) + + # Check export_targets is set properly + status = self.fs.status() + log.info(status) + r0 = status.get_rank(self.fs.id, 0) + self.assertTrue(sorted(r0['export_targets']) == [1]) + + # redundant pin /1/2 to rank 1 + self.mount_a.setfattr("1/2", "ceph.dir.pin", "1") + self._wait_subtrees(status, 1, [('/1', 1), ('/1/2', 1)]) + + # change pin /1/2 to rank 0 + self.mount_a.setfattr("1/2", "ceph.dir.pin", "0") + self._wait_subtrees(status, 1, [('/1', 1), ('/1/2', 0)]) + self._wait_subtrees(status, 0, [('/1', 1), ('/1/2', 0)]) + + # change pin /1/2/3 to (presently) non-existent rank 2 + self.mount_a.setfattr("1/2/3", "ceph.dir.pin", "2") + self._wait_subtrees(status, 0, [('/1', 1), ('/1/2', 0)]) + self._wait_subtrees(status, 1, [('/1', 1), ('/1/2', 0)]) + + # change pin /1/2 back to rank 1 + self.mount_a.setfattr("1/2", "ceph.dir.pin", "1") + self._wait_subtrees(status, 1, [('/1', 1), ('/1/2', 1)]) + + # add another directory pinned to 1 + self.mount_a.run_shell(["mkdir", "-p", "1/4/5"]) + self.mount_a.setfattr("1/4/5", "ceph.dir.pin", "1") + self._wait_subtrees(status, 1, [('/1', 1), ('/1/2', 1), ('/1/4/5', 1)]) + + # change pin /1 to 0 + self.mount_a.setfattr("1", "ceph.dir.pin", "0") + self._wait_subtrees(status, 0, [('/1', 0), ('/1/2', 1), ('/1/4/5', 1)]) + + # change pin /1/2 to default (-1); does the subtree root properly respect it's parent pin? + self.mount_a.setfattr("1/2", "ceph.dir.pin", "-1") + self._wait_subtrees(status, 0, [('/1', 0), ('/1/4/5', 1)]) + + if len(list(status.get_standbys())): + self.fs.set_max_mds(3) + self.fs.wait_for_state('up:active', rank=2) + self._wait_subtrees(status, 0, [('/1', 0), ('/1/4/5', 1), ('/1/2/3', 2)]) + + # Check export_targets is set properly + status = self.fs.status() + log.info(status) + r0 = status.get_rank(self.fs.id, 0) + self.assertTrue(sorted(r0['export_targets']) == [1,2]) + r1 = status.get_rank(self.fs.id, 1) + self.assertTrue(sorted(r1['export_targets']) == [0]) + r2 = status.get_rank(self.fs.id, 2) + self.assertTrue(sorted(r2['export_targets']) == []) + + # Test rename + self.mount_a.run_shell(["mkdir", "-p", "a/b", "aa/bb"]) + self.mount_a.setfattr("a", "ceph.dir.pin", "1") + self.mount_a.setfattr("aa/bb", "ceph.dir.pin", "0") + if (len(self.fs.get_active_names()) > 2): + self._wait_subtrees(status, 0, [('/1', 0), ('/1/4/5', 1), ('/1/2/3', 2), ('/a', 1), ('/aa/bb', 0)]) + else: + self._wait_subtrees(status, 0, [('/1', 0), ('/1/4/5', 1), ('/a', 1), ('/aa/bb', 0)]) + self.mount_a.run_shell(["mv", "aa", "a/b/"]) + if (len(self.fs.get_active_names()) > 2): + self._wait_subtrees(status, 0, [('/1', 0), ('/1/4/5', 1), ('/1/2/3', 2), ('/a', 1), ('/a/b/aa/bb', 0)]) + else: + self._wait_subtrees(status, 0, [('/1', 0), ('/1/4/5', 1), ('/a', 1), ('/a/b/aa/bb', 0)]) + + def test_export_pin_getfattr(self): + self.fs.set_max_mds(2) + self.fs.wait_for_daemons() + + status = self.fs.status() + + self.mount_a.run_shell(["mkdir", "-p", "1/2/3"]) + self._wait_subtrees(status, 0, []) + + # pin /1 to rank 0 + self.mount_a.setfattr("1", "ceph.dir.pin", "1") + self._wait_subtrees(status, 1, [('/1', 1)]) + + # pin /1/2 to rank 1 + self.mount_a.setfattr("1/2", "ceph.dir.pin", "1") + self._wait_subtrees(status, 1, [('/1', 1), ('/1/2', 1)]) + + # change pin /1/2 to rank 0 + self.mount_a.setfattr("1/2", "ceph.dir.pin", "0") + self._wait_subtrees(status, 1, [('/1', 1), ('/1/2', 0)]) + self._wait_subtrees(status, 0, [('/1', 1), ('/1/2', 0)]) + + # change pin /1/2/3 to (presently) non-existent rank 2 + self.mount_a.setfattr("1/2/3", "ceph.dir.pin", "2") + self._wait_subtrees(status, 0, [('/1', 1), ('/1/2', 0)]) + + if len(list(status.get_standbys())): + self.fs.set_max_mds(3) + self.fs.wait_for_state('up:active', rank=2) + self._wait_subtrees(status, 0, [('/1', 1), ('/1/2', 0), ('/1/2/3', 2)]) + + if not isinstance(self.mount_a, FuseMount): + p = self.mount_a.client_remote.sh('uname -r', wait=True) + dir_pin = self.mount_a.getfattr("1", "ceph.dir.pin") + log.debug("mount.getfattr('1','ceph.dir.pin'): %s " % dir_pin) + if str(p) < "5" and not(dir_pin): + self.skipTest("Kernel does not support getting the extended attribute ceph.dir.pin") + self.assertEqual(self.mount_a.getfattr("1", "ceph.dir.pin"), '1') + self.assertEqual(self.mount_a.getfattr("1/2", "ceph.dir.pin"), '0') + if (len(self.fs.get_active_names()) > 2): + self.assertEqual(self.mount_a.getfattr("1/2/3", "ceph.dir.pin"), '2') + + def test_session_race(self): + """ + Test session creation race. + + See: https://tracker.ceph.com/issues/24072#change-113056 + """ + + self.fs.set_max_mds(2) + status = self.fs.wait_for_daemons() + + rank1 = self.fs.get_rank(rank=1, status=status) + + # Create a directory that is pre-exported to rank 1 + self.mount_a.run_shell(["mkdir", "-p", "a/aa"]) + self.mount_a.setfattr("a", "ceph.dir.pin", "1") + self._wait_subtrees(status, 1, [('/a', 1)]) + + # Now set the mds config to allow the race + self.fs.rank_asok(["config", "set", "mds_inject_migrator_session_race", "true"], rank=1) + + # Now create another directory and try to export it + self.mount_b.run_shell(["mkdir", "-p", "b/bb"]) + self.mount_b.setfattr("b", "ceph.dir.pin", "1") + + time.sleep(5) + + # Now turn off the race so that it doesn't wait again + self.fs.rank_asok(["config", "set", "mds_inject_migrator_session_race", "false"], rank=1) + + # Now try to create a session with rank 1 by accessing a dir known to + # be there, if buggy, this should cause the rank 1 to crash: + self.mount_b.run_shell(["ls", "a"]) + + # Check if rank1 changed (standby tookover?) + new_rank1 = self.fs.get_rank(rank=1) + self.assertEqual(rank1['gid'], new_rank1['gid']) diff --git a/qa/tasks/cephfs/test_failover.py b/qa/tasks/cephfs/test_failover.py new file mode 100644 index 00000000..c87afbf6 --- /dev/null +++ b/qa/tasks/cephfs/test_failover.py @@ -0,0 +1,638 @@ +import time +import signal +import logging +from unittest import case, SkipTest +from random import randint +from six.moves import range + +from tasks.cephfs.cephfs_test_case import CephFSTestCase +from teuthology.exceptions import CommandFailedError +from tasks.cephfs.fuse_mount import FuseMount + +log = logging.getLogger(__name__) + + +class TestClusterResize(CephFSTestCase): + CLIENTS_REQUIRED = 1 + MDSS_REQUIRED = 3 + + def grow(self, n): + grace = float(self.fs.get_config("mds_beacon_grace", service_type="mon")) + + fscid = self.fs.id + status = self.fs.status() + log.info("status = {0}".format(status)) + + original_ranks = set([info['gid'] for info in status.get_ranks(fscid)]) + _ = set([info['gid'] for info in status.get_standbys()]) + + oldmax = self.fs.get_var('max_mds') + self.assertTrue(n > oldmax) + self.fs.set_max_mds(n) + + log.info("Waiting for cluster to grow.") + status = self.fs.wait_for_daemons(timeout=60+grace*2) + ranks = set([info['gid'] for info in status.get_ranks(fscid)]) + self.assertTrue(original_ranks.issubset(ranks) and len(ranks) == n) + return status + + def shrink(self, n): + grace = float(self.fs.get_config("mds_beacon_grace", service_type="mon")) + + fscid = self.fs.id + status = self.fs.status() + log.info("status = {0}".format(status)) + + original_ranks = set([info['gid'] for info in status.get_ranks(fscid)]) + _ = set([info['gid'] for info in status.get_standbys()]) + + oldmax = self.fs.get_var('max_mds') + self.assertTrue(n < oldmax) + self.fs.set_max_mds(n) + + # Wait until the monitor finishes stopping ranks >= n + log.info("Waiting for cluster to shink.") + status = self.fs.wait_for_daemons(timeout=60+grace*2) + ranks = set([info['gid'] for info in status.get_ranks(fscid)]) + self.assertTrue(ranks.issubset(original_ranks) and len(ranks) == n) + return status + + + def test_grow(self): + """ + That the MDS cluster grows after increasing max_mds. + """ + + # Need all my standbys up as well as the active daemons + # self.wait_for_daemon_start() necessary? + + self.grow(2) + self.grow(3) + + + def test_shrink(self): + """ + That the MDS cluster shrinks automatically after decreasing max_mds. + """ + + self.grow(3) + self.shrink(1) + + def test_up_less_than_max(self): + """ + That a health warning is generated when max_mds is greater than active count. + """ + + status = self.fs.status() + mdss = [info['gid'] for info in status.get_all()] + self.fs.set_max_mds(len(mdss)+1) + self.wait_for_health("MDS_UP_LESS_THAN_MAX", 30) + self.shrink(2) + self.wait_for_health_clear(30) + + def test_down_health(self): + """ + That marking a FS down does not generate a health warning + """ + + self.mount_a.umount_wait() + + self.fs.set_down() + try: + self.wait_for_health("", 30) + raise RuntimeError("got health warning?") + except RuntimeError as e: + if "Timed out after" in str(e): + pass + else: + raise + + def test_down_twice(self): + """ + That marking a FS down twice does not wipe old_max_mds. + """ + + self.mount_a.umount_wait() + + self.grow(2) + self.fs.set_down() + self.fs.wait_for_daemons() + self.fs.set_down(False) + self.assertEqual(self.fs.get_var("max_mds"), 2) + self.fs.wait_for_daemons(timeout=60) + + def test_down_grow(self): + """ + That setting max_mds undoes down. + """ + + self.mount_a.umount_wait() + + self.fs.set_down() + self.fs.wait_for_daemons() + self.grow(2) + self.fs.wait_for_daemons() + + def test_down(self): + """ + That down setting toggles and sets max_mds appropriately. + """ + + self.mount_a.umount_wait() + + self.fs.set_down() + self.fs.wait_for_daemons() + self.assertEqual(self.fs.get_var("max_mds"), 0) + self.fs.set_down(False) + self.assertEqual(self.fs.get_var("max_mds"), 1) + self.fs.wait_for_daemons() + self.assertEqual(self.fs.get_var("max_mds"), 1) + + def test_hole(self): + """ + Test that a hole cannot be created in the FS ranks. + """ + + fscid = self.fs.id + + self.grow(2) + + self.fs.set_max_mds(1) + log.info("status = {0}".format(self.fs.status())) + + self.fs.set_max_mds(3) + # Don't wait for rank 1 to stop + + self.fs.set_max_mds(2) + # Prevent another MDS from taking rank 1 + # XXX This is a little racy because rank 1 may have stopped and a + # standby assigned to rank 1 before joinable=0 is set. + self.fs.set_joinable(False) # XXX keep in mind changing max_mds clears this flag + + try: + status = self.fs.wait_for_daemons(timeout=90) + raise RuntimeError("should not be able to successfully shrink cluster!") + except: + # could not shrink to max_mds=2 and reach 2 actives (because joinable=False) + status = self.fs.status() + ranks = set([info['rank'] for info in status.get_ranks(fscid)]) + self.assertTrue(ranks == set([0])) + finally: + log.info("status = {0}".format(status)) + + def test_thrash(self): + """ + Test that thrashing max_mds does not fail. + """ + + max_mds = 2 + for i in range(0, 100): + self.fs.set_max_mds(max_mds) + max_mds = (max_mds+1)%3+1 + + self.fs.wait_for_daemons(timeout=90) + +class TestFailover(CephFSTestCase): + CLIENTS_REQUIRED = 1 + MDSS_REQUIRED = 2 + + def test_simple(self): + """ + That when the active MDS is killed, a standby MDS is promoted into + its rank after the grace period. + + This is just a simple unit test, the harder cases are covered + in thrashing tests. + """ + + # Need all my standbys up as well as the active daemons + self.wait_for_daemon_start() + + (original_active, ) = self.fs.get_active_names() + original_standbys = self.mds_cluster.get_standby_daemons() + + # Kill the rank 0 daemon's physical process + self.fs.mds_stop(original_active) + + grace = float(self.fs.get_config("mds_beacon_grace", service_type="mon")) + + # Wait until the monitor promotes his replacement + def promoted(): + active = self.fs.get_active_names() + return active and active[0] in original_standbys + + log.info("Waiting for promotion of one of the original standbys {0}".format( + original_standbys)) + self.wait_until_true( + promoted, + timeout=grace*2) + + # Start the original rank 0 daemon up again, see that he becomes a standby + self.fs.mds_restart(original_active) + self.wait_until_true( + lambda: original_active in self.mds_cluster.get_standby_daemons(), + timeout=60 # Approximately long enough for MDS to start and mon to notice + ) + + def test_client_abort(self): + """ + That a client will respect fuse_require_active_mds and error out + when the cluster appears to be unavailable. + """ + + if not isinstance(self.mount_a, FuseMount): + raise SkipTest("Requires FUSE client to inject client metadata") + + require_active = self.fs.get_config("fuse_require_active_mds", service_type="mon").lower() == "true" + if not require_active: + raise case.SkipTest("fuse_require_active_mds is not set") + + grace = float(self.fs.get_config("mds_beacon_grace", service_type="mon")) + + # Check it's not laggy to begin with + (original_active, ) = self.fs.get_active_names() + self.assertNotIn("laggy_since", self.fs.status().get_mds(original_active)) + + self.mounts[0].umount_wait() + + # Control: that we can mount and unmount usually, while the cluster is healthy + self.mounts[0].mount() + self.mounts[0].wait_until_mounted() + self.mounts[0].umount_wait() + + # Stop the daemon processes + self.fs.mds_stop() + + # Wait for everyone to go laggy + def laggy(): + mdsmap = self.fs.get_mds_map() + for info in mdsmap['info'].values(): + if "laggy_since" not in info: + return False + + return True + + self.wait_until_true(laggy, grace * 2) + with self.assertRaises(CommandFailedError): + self.mounts[0].mount() + + def test_standby_count_wanted(self): + """ + That cluster health warnings are generated by insufficient standbys available. + """ + + # Need all my standbys up as well as the active daemons + self.wait_for_daemon_start() + + grace = float(self.fs.get_config("mds_beacon_grace", service_type="mon")) + + standbys = self.mds_cluster.get_standby_daemons() + self.assertGreaterEqual(len(standbys), 1) + self.fs.mon_manager.raw_cluster_cmd('fs', 'set', self.fs.name, 'standby_count_wanted', str(len(standbys))) + + # Kill a standby and check for warning + victim = standbys.pop() + self.fs.mds_stop(victim) + log.info("waiting for insufficient standby daemon warning") + self.wait_for_health("MDS_INSUFFICIENT_STANDBY", grace*2) + + # restart the standby, see that he becomes a standby, check health clears + self.fs.mds_restart(victim) + self.wait_until_true( + lambda: victim in self.mds_cluster.get_standby_daemons(), + timeout=60 # Approximately long enough for MDS to start and mon to notice + ) + self.wait_for_health_clear(timeout=30) + + # Set it one greater than standbys ever seen + standbys = self.mds_cluster.get_standby_daemons() + self.assertGreaterEqual(len(standbys), 1) + self.fs.mon_manager.raw_cluster_cmd('fs', 'set', self.fs.name, 'standby_count_wanted', str(len(standbys)+1)) + log.info("waiting for insufficient standby daemon warning") + self.wait_for_health("MDS_INSUFFICIENT_STANDBY", grace*2) + + # Set it to 0 + self.fs.mon_manager.raw_cluster_cmd('fs', 'set', self.fs.name, 'standby_count_wanted', '0') + self.wait_for_health_clear(timeout=30) + + def test_discontinuous_mdsmap(self): + """ + That discontinuous mdsmap does not affect failover. + See http://tracker.ceph.com/issues/24856. + """ + self.fs.set_max_mds(2) + status = self.fs.wait_for_daemons() + + self.mount_a.umount_wait() + + grace = float(self.fs.get_config("mds_beacon_grace", service_type="mon")) + monc_timeout = float(self.fs.get_config("mon_client_ping_timeout", service_type="mds")) + + mds_0 = self.fs.get_rank(rank=0, status=status) + self.fs.rank_freeze(True, rank=0) # prevent failover + self.fs.rank_signal(signal.SIGSTOP, rank=0, status=status) + self.wait_until_true( + lambda: "laggy_since" in self.fs.get_rank(), + timeout=grace * 2 + ) + + self.fs.rank_fail(rank=1) + self.fs.wait_for_state('up:resolve', rank=1, timeout=30) + + # Make sure of mds_0's monitor connection gets reset + time.sleep(monc_timeout * 2) + + # Continue rank 0, it will get discontinuous mdsmap + self.fs.rank_signal(signal.SIGCONT, rank=0) + self.wait_until_true( + lambda: "laggy_since" not in self.fs.get_rank(rank=0), + timeout=grace * 2 + ) + + # mds.b will be stuck at 'reconnect' state if snapserver gets confused + # by discontinuous mdsmap + self.fs.wait_for_state('up:active', rank=1, timeout=30) + self.assertEqual(mds_0['gid'], self.fs.get_rank(rank=0)['gid']) + self.fs.rank_freeze(False, rank=0) + +class TestStandbyReplay(CephFSTestCase): + MDSS_REQUIRED = 4 + + def _confirm_no_replay(self): + status = self.fs.status() + _ = len(list(status.get_standbys())) + self.assertEqual(0, len(list(self.fs.get_replays(status=status)))) + return status + + def _confirm_single_replay(self, full=True, status=None, retries=3): + status = self.fs.wait_for_daemons(status=status) + ranks = sorted(self.fs.get_mds_map(status=status)['in']) + replays = list(self.fs.get_replays(status=status)) + checked_replays = set() + for rank in ranks: + has_replay = False + for replay in replays: + if replay['rank'] == rank: + self.assertFalse(has_replay) + has_replay = True + checked_replays.add(replay['gid']) + if full and not has_replay: + if retries <= 0: + raise RuntimeError("rank "+str(rank)+" has no standby-replay follower") + else: + retries = retries-1 + time.sleep(2) + self.assertEqual(checked_replays, set(info['gid'] for info in replays)) + return status + + def _check_replay_takeover(self, status, rank=0): + replay = self.fs.get_replay(rank=rank, status=status) + new_status = self.fs.wait_for_daemons() + new_active = self.fs.get_rank(rank=rank, status=new_status) + if replay: + self.assertEqual(replay['gid'], new_active['gid']) + else: + # double check takeover came from a standby (or some new daemon via restart) + found = False + for info in status.get_standbys(): + if info['gid'] == new_active['gid']: + found = True + break + if not found: + for info in status.get_all(): + self.assertNotEqual(info['gid'], new_active['gid']) + return new_status + + def test_standby_replay_singleton(self): + """ + That only one MDS becomes standby-replay. + """ + + self._confirm_no_replay() + self.fs.set_allow_standby_replay(True) + time.sleep(30) + self._confirm_single_replay() + + def test_standby_replay_singleton_fail(self): + """ + That failures don't violate singleton constraint. + """ + + self._confirm_no_replay() + self.fs.set_allow_standby_replay(True) + status = self._confirm_single_replay() + + for i in range(10): + time.sleep(randint(1, 5)) + self.fs.rank_restart(status=status) + status = self._check_replay_takeover(status) + status = self._confirm_single_replay(status=status) + + for i in range(10): + time.sleep(randint(1, 5)) + self.fs.rank_fail() + status = self._check_replay_takeover(status) + status = self._confirm_single_replay(status=status) + + def test_standby_replay_singleton_fail_multimds(self): + """ + That failures don't violate singleton constraint with multiple actives. + """ + + status = self._confirm_no_replay() + new_max_mds = randint(2, len(list(status.get_standbys()))) + self.fs.set_max_mds(new_max_mds) + self.fs.wait_for_daemons() # wait for actives to come online! + self.fs.set_allow_standby_replay(True) + status = self._confirm_single_replay(full=False) + + for i in range(10): + time.sleep(randint(1, 5)) + victim = randint(0, new_max_mds-1) + self.fs.rank_restart(rank=victim, status=status) + status = self._check_replay_takeover(status, rank=victim) + status = self._confirm_single_replay(status=status, full=False) + + for i in range(10): + time.sleep(randint(1, 5)) + victim = randint(0, new_max_mds-1) + self.fs.rank_fail(rank=victim) + status = self._check_replay_takeover(status, rank=victim) + status = self._confirm_single_replay(status=status, full=False) + + def test_standby_replay_failure(self): + """ + That the failure of a standby-replay daemon happens cleanly + and doesn't interrupt anything else. + """ + + status = self._confirm_no_replay() + self.fs.set_max_mds(1) + self.fs.set_allow_standby_replay(True) + status = self._confirm_single_replay() + + for i in range(10): + time.sleep(randint(1, 5)) + victim = self.fs.get_replay(status=status) + self.fs.mds_restart(mds_id=victim['name']) + status = self._confirm_single_replay(status=status) + + def test_rank_stopped(self): + """ + That when a rank is STOPPED, standby replays for + that rank get torn down + """ + + status = self._confirm_no_replay() + standby_count = len(list(status.get_standbys())) + self.fs.set_max_mds(2) + self.fs.set_allow_standby_replay(True) + status = self._confirm_single_replay() + + self.fs.set_max_mds(1) # stop rank 1 + + status = self._confirm_single_replay() + self.assertTrue(standby_count, len(list(status.get_standbys()))) + + +class TestMultiFilesystems(CephFSTestCase): + CLIENTS_REQUIRED = 2 + MDSS_REQUIRED = 4 + + # We'll create our own filesystems and start our own daemons + REQUIRE_FILESYSTEM = False + + def setUp(self): + super(TestMultiFilesystems, self).setUp() + self.mds_cluster.mon_manager.raw_cluster_cmd("fs", "flag", "set", + "enable_multiple", "true", + "--yes-i-really-mean-it") + + def _setup_two(self): + fs_a = self.mds_cluster.newfs("alpha") + fs_b = self.mds_cluster.newfs("bravo") + + self.mds_cluster.mds_restart() + + # Wait for both filesystems to go healthy + fs_a.wait_for_daemons() + fs_b.wait_for_daemons() + + # Reconfigure client auth caps + for mount in self.mounts: + self.mds_cluster.mon_manager.raw_cluster_cmd_result( + 'auth', 'caps', "client.{0}".format(mount.client_id), + 'mds', 'allow', + 'mon', 'allow r', + 'osd', 'allow rw pool={0}, allow rw pool={1}'.format( + fs_a.get_data_pool_name(), fs_b.get_data_pool_name())) + + return fs_a, fs_b + + def test_clients(self): + fs_a, fs_b = self._setup_two() + + # Mount a client on fs_a + self.mount_a.mount(mount_fs_name=fs_a.name) + self.mount_a.write_n_mb("pad.bin", 1) + self.mount_a.write_n_mb("test.bin", 2) + a_created_ino = self.mount_a.path_to_ino("test.bin") + self.mount_a.create_files() + + # Mount a client on fs_b + self.mount_b.mount(mount_fs_name=fs_b.name) + self.mount_b.write_n_mb("test.bin", 1) + b_created_ino = self.mount_b.path_to_ino("test.bin") + self.mount_b.create_files() + + # Check that a non-default filesystem mount survives an MDS + # failover (i.e. that map subscription is continuous, not + # just the first time), reproduces #16022 + old_fs_b_mds = fs_b.get_active_names()[0] + self.mds_cluster.mds_stop(old_fs_b_mds) + self.mds_cluster.mds_fail(old_fs_b_mds) + fs_b.wait_for_daemons() + background = self.mount_b.write_background() + # Raise exception if the write doesn't finish (i.e. if client + # has not kept up with MDS failure) + try: + self.wait_until_true(lambda: background.finished, timeout=30) + except RuntimeError: + # The mount is stuck, we'll have to force it to fail cleanly + background.stdin.close() + self.mount_b.umount_wait(force=True) + raise + + self.mount_a.umount_wait() + self.mount_b.umount_wait() + + # See that the client's files went into the correct pool + self.assertTrue(fs_a.data_objects_present(a_created_ino, 1024 * 1024)) + self.assertTrue(fs_b.data_objects_present(b_created_ino, 1024 * 1024)) + + def test_standby(self): + fs_a, fs_b = self._setup_two() + + # Assert that the remaining two MDS daemons are now standbys + a_daemons = fs_a.get_active_names() + b_daemons = fs_b.get_active_names() + self.assertEqual(len(a_daemons), 1) + self.assertEqual(len(b_daemons), 1) + original_a = a_daemons[0] + original_b = b_daemons[0] + expect_standby_daemons = set(self.mds_cluster.mds_ids) - (set(a_daemons) | set(b_daemons)) + + # Need all my standbys up as well as the active daemons + self.wait_for_daemon_start() + self.assertEqual(expect_standby_daemons, self.mds_cluster.get_standby_daemons()) + + # Kill fs_a's active MDS, see a standby take over + self.mds_cluster.mds_stop(original_a) + self.mds_cluster.mon_manager.raw_cluster_cmd("mds", "fail", original_a) + self.wait_until_equal(lambda: len(fs_a.get_active_names()), 1, 30, + reject_fn=lambda v: v > 1) + # Assert that it's a *different* daemon that has now appeared in the map for fs_a + self.assertNotEqual(fs_a.get_active_names()[0], original_a) + + # Kill fs_b's active MDS, see a standby take over + self.mds_cluster.mds_stop(original_b) + self.mds_cluster.mon_manager.raw_cluster_cmd("mds", "fail", original_b) + self.wait_until_equal(lambda: len(fs_b.get_active_names()), 1, 30, + reject_fn=lambda v: v > 1) + # Assert that it's a *different* daemon that has now appeared in the map for fs_a + self.assertNotEqual(fs_b.get_active_names()[0], original_b) + + # Both of the original active daemons should be gone, and all standbys used up + self.assertEqual(self.mds_cluster.get_standby_daemons(), set()) + + # Restart the ones I killed, see them reappear as standbys + self.mds_cluster.mds_restart(original_a) + self.mds_cluster.mds_restart(original_b) + self.wait_until_true( + lambda: {original_a, original_b} == self.mds_cluster.get_standby_daemons(), + timeout=30 + ) + + def test_grow_shrink(self): + # Usual setup... + fs_a, fs_b = self._setup_two() + + # Increase max_mds on fs_b, see a standby take up the role + fs_b.set_max_mds(2) + self.wait_until_equal(lambda: len(fs_b.get_active_names()), 2, 30, + reject_fn=lambda v: v > 2 or v < 1) + + # Increase max_mds on fs_a, see a standby take up the role + fs_a.set_max_mds(2) + self.wait_until_equal(lambda: len(fs_a.get_active_names()), 2, 30, + reject_fn=lambda v: v > 2 or v < 1) + + # Shrink fs_b back to 1, see a daemon go back to standby + fs_b.set_max_mds(1) + self.wait_until_equal(lambda: len(fs_b.get_active_names()), 1, 30, + reject_fn=lambda v: v > 2 or v < 1) + + # Grow fs_a up to 3, see the former fs_b daemon join it. + fs_a.set_max_mds(3) + self.wait_until_equal(lambda: len(fs_a.get_active_names()), 3, 60, + reject_fn=lambda v: v > 3 or v < 2) diff --git a/qa/tasks/cephfs/test_flush.py b/qa/tasks/cephfs/test_flush.py new file mode 100644 index 00000000..ee0b1c92 --- /dev/null +++ b/qa/tasks/cephfs/test_flush.py @@ -0,0 +1,113 @@ + +from textwrap import dedent +from tasks.cephfs.cephfs_test_case import CephFSTestCase +from tasks.cephfs.filesystem import ObjectNotFound, ROOT_INO + + +class TestFlush(CephFSTestCase): + def test_flush(self): + self.mount_a.run_shell(["mkdir", "mydir"]) + self.mount_a.run_shell(["touch", "mydir/alpha"]) + dir_ino = self.mount_a.path_to_ino("mydir") + file_ino = self.mount_a.path_to_ino("mydir/alpha") + + # Unmount the client so that it isn't still holding caps + self.mount_a.umount_wait() + + # Before flush, the dirfrag object does not exist + with self.assertRaises(ObjectNotFound): + self.fs.list_dirfrag(dir_ino) + + # Before flush, the file's backtrace has not been written + with self.assertRaises(ObjectNotFound): + self.fs.read_backtrace(file_ino) + + # Before flush, there are no dentries in the root + self.assertEqual(self.fs.list_dirfrag(ROOT_INO), []) + + # Execute flush + flush_data = self.fs.mds_asok(["flush", "journal"]) + self.assertEqual(flush_data['return_code'], 0) + + # After flush, the dirfrag object has been created + dir_list = self.fs.list_dirfrag(dir_ino) + self.assertEqual(dir_list, ["alpha_head"]) + + # And the 'mydir' dentry is in the root + self.assertEqual(self.fs.list_dirfrag(ROOT_INO), ['mydir_head']) + + # ...and the data object has its backtrace + backtrace = self.fs.read_backtrace(file_ino) + self.assertEqual(['alpha', 'mydir'], [a['dname'] for a in backtrace['ancestors']]) + self.assertEqual([dir_ino, 1], [a['dirino'] for a in backtrace['ancestors']]) + self.assertEqual(file_ino, backtrace['ino']) + + # ...and the journal is truncated to just a single subtreemap from the + # newly created segment + summary_output = self.fs.journal_tool(["event", "get", "summary"], 0) + try: + self.assertEqual(summary_output, + dedent( + """ + Events by type: + SUBTREEMAP: 1 + Errors: 0 + """ + ).strip()) + except AssertionError: + # In some states, flushing the journal will leave you + # an extra event from locks a client held. This is + # correct behaviour: the MDS is flushing the journal, + # it's just that new events are getting added too. + # In this case, we should nevertheless see a fully + # empty journal after a second flush. + self.assertEqual(summary_output, + dedent( + """ + Events by type: + SUBTREEMAP: 1 + UPDATE: 1 + Errors: 0 + """ + ).strip()) + flush_data = self.fs.mds_asok(["flush", "journal"]) + self.assertEqual(flush_data['return_code'], 0) + self.assertEqual(self.fs.journal_tool(["event", "get", "summary"], 0), + dedent( + """ + Events by type: + SUBTREEMAP: 1 + Errors: 0 + """ + ).strip()) + + # Now for deletion! + # We will count the RADOS deletions and MDS file purges, to verify that + # the expected behaviour is happening as a result of the purge + initial_dels = self.fs.mds_asok(['perf', 'dump', 'objecter'])['objecter']['osdop_delete'] + initial_purges = self.fs.mds_asok(['perf', 'dump', 'mds_cache'])['mds_cache']['strays_enqueued'] + + # Use a client to delete a file + self.mount_a.mount() + self.mount_a.wait_until_mounted() + self.mount_a.run_shell(["rm", "-rf", "mydir"]) + + # Flush the journal so that the directory inode can be purged + flush_data = self.fs.mds_asok(["flush", "journal"]) + self.assertEqual(flush_data['return_code'], 0) + + # We expect to see a single file purge + self.wait_until_true( + lambda: self.fs.mds_asok(['perf', 'dump', 'mds_cache'])['mds_cache']['strays_enqueued'] - initial_purges >= 2, + 60) + + # We expect two deletions, one of the dirfrag and one of the backtrace + self.wait_until_true( + lambda: self.fs.mds_asok(['perf', 'dump', 'objecter'])['objecter']['osdop_delete'] - initial_dels >= 2, + 60) # timeout is fairly long to allow for tick+rados latencies + + with self.assertRaises(ObjectNotFound): + self.fs.list_dirfrag(dir_ino) + with self.assertRaises(ObjectNotFound): + self.fs.read_backtrace(file_ino) + self.assertEqual(self.fs.list_dirfrag(ROOT_INO), []) diff --git a/qa/tasks/cephfs/test_forward_scrub.py b/qa/tasks/cephfs/test_forward_scrub.py new file mode 100644 index 00000000..cc861b38 --- /dev/null +++ b/qa/tasks/cephfs/test_forward_scrub.py @@ -0,0 +1,298 @@ + +""" +Test that the forward scrub functionality can traverse metadata and apply +requested tags, on well formed metadata. + +This is *not* the real testing for forward scrub, which will need to test +how the functionality responds to damaged metadata. + +""" +import json + +import logging +import six + +from collections import namedtuple +from io import BytesIO +from textwrap import dedent + +from teuthology.orchestra.run import CommandFailedError +from tasks.cephfs.cephfs_test_case import CephFSTestCase + +import struct + +log = logging.getLogger(__name__) + + +ValidationError = namedtuple("ValidationError", ["exception", "backtrace"]) + + +class TestForwardScrub(CephFSTestCase): + MDSS_REQUIRED = 1 + + def _read_str_xattr(self, pool, obj, attr): + """ + Read a ceph-encoded string from a rados xattr + """ + output = self.fs.rados(["getxattr", obj, attr], pool=pool, + stdout_data=BytesIO()) + strlen = struct.unpack('i', output[0:4])[0] + return six.ensure_str(output[4:(4 + strlen)], encoding='ascii') + + def _get_paths_to_ino(self): + inos = {} + p = self.mount_a.run_shell(["find", "./"]) + paths = p.stdout.getvalue().strip().split() + for path in paths: + inos[path] = self.mount_a.path_to_ino(path) + + return inos + + def test_apply_tag(self): + self.mount_a.run_shell(["mkdir", "parentdir"]) + self.mount_a.run_shell(["mkdir", "parentdir/childdir"]) + self.mount_a.run_shell(["touch", "rfile"]) + self.mount_a.run_shell(["touch", "parentdir/pfile"]) + self.mount_a.run_shell(["touch", "parentdir/childdir/cfile"]) + + # Build a structure mapping path to inode, as we will later want + # to check object by object and objects are named after ino number + inos = self._get_paths_to_ino() + + # Flush metadata: this is a friendly test of forward scrub so we're skipping + # the part where it's meant to cope with dirty metadata + self.mount_a.umount_wait() + self.fs.mds_asok(["flush", "journal"]) + + tag = "mytag" + + # Execute tagging forward scrub + self.fs.mds_asok(["tag", "path", "/parentdir", tag]) + # Wait for completion + import time + time.sleep(10) + # FIXME watching clog isn't a nice mechanism for this, once we have a ScrubMap we'll + # watch that instead + + # Check that dirs were tagged + for dirpath in ["./parentdir", "./parentdir/childdir"]: + self.assertTagged(inos[dirpath], tag, self.fs.get_metadata_pool_name()) + + # Check that files were tagged + for filepath in ["./parentdir/pfile", "./parentdir/childdir/cfile"]: + self.assertTagged(inos[filepath], tag, self.fs.get_data_pool_name()) + + # This guy wasn't in the tag path, shouldn't have been tagged + self.assertUntagged(inos["./rfile"]) + + def assertUntagged(self, ino): + file_obj_name = "{0:x}.00000000".format(ino) + with self.assertRaises(CommandFailedError): + self._read_str_xattr( + self.fs.get_data_pool_name(), + file_obj_name, + "scrub_tag" + ) + + def assertTagged(self, ino, tag, pool): + file_obj_name = "{0:x}.00000000".format(ino) + wrote = self._read_str_xattr( + pool, + file_obj_name, + "scrub_tag" + ) + self.assertEqual(wrote, tag) + + def _validate_linkage(self, expected): + inos = self._get_paths_to_ino() + try: + self.assertDictEqual(inos, expected) + except AssertionError: + log.error("Expected: {0}".format(json.dumps(expected, indent=2))) + log.error("Actual: {0}".format(json.dumps(inos, indent=2))) + raise + + def test_orphan_scan(self): + # Create some files whose metadata we will flush + self.mount_a.run_python(dedent(""" + import os + mount_point = "{mount_point}" + parent = os.path.join(mount_point, "parent") + os.mkdir(parent) + flushed = os.path.join(parent, "flushed") + os.mkdir(flushed) + for f in ["alpha", "bravo", "charlie"]: + open(os.path.join(flushed, f), 'w').write(f) + """.format(mount_point=self.mount_a.mountpoint))) + + inos = self._get_paths_to_ino() + + # Flush journal + # Umount before flush to avoid cap releases putting + # things we don't want in the journal later. + self.mount_a.umount_wait() + self.fs.mds_asok(["flush", "journal"]) + + # Create a new inode that's just in the log, i.e. would + # look orphaned to backward scan if backward scan wisnae + # respectin' tha scrub_tag xattr. + self.mount_a.mount() + self.mount_a.run_shell(["mkdir", "parent/unflushed"]) + self.mount_a.run_shell(["dd", "if=/dev/urandom", + "of=./parent/unflushed/jfile", + "bs=1M", "count=8"]) + inos["./parent/unflushed"] = self.mount_a.path_to_ino("./parent/unflushed") + inos["./parent/unflushed/jfile"] = self.mount_a.path_to_ino("./parent/unflushed/jfile") + self.mount_a.umount_wait() + + # Orphan an inode by deleting its dentry + # Our victim will be.... bravo. + self.mount_a.umount_wait() + self.fs.mds_stop() + self.fs.mds_fail() + self.fs.set_ceph_conf('mds', 'mds verify scatter', False) + self.fs.set_ceph_conf('mds', 'mds debug scatterstat', False) + frag_obj_id = "{0:x}.00000000".format(inos["./parent/flushed"]) + self.fs.rados(["rmomapkey", frag_obj_id, "bravo_head"]) + + self.fs.mds_restart() + self.fs.wait_for_daemons() + + # See that the orphaned file is indeed missing from a client's POV + self.mount_a.mount() + damaged_state = self._get_paths_to_ino() + self.assertNotIn("./parent/flushed/bravo", damaged_state) + self.mount_a.umount_wait() + + # Run a tagging forward scrub + tag = "mytag123" + self.fs.mds_asok(["tag", "path", "/parent", tag]) + + # See that the orphan wisnae tagged + self.assertUntagged(inos['./parent/flushed/bravo']) + + # See that the flushed-metadata-and-still-present files are tagged + self.assertTagged(inos['./parent/flushed/alpha'], tag, self.fs.get_data_pool_name()) + self.assertTagged(inos['./parent/flushed/charlie'], tag, self.fs.get_data_pool_name()) + + # See that journalled-but-not-flushed file *was* tagged + self.assertTagged(inos['./parent/unflushed/jfile'], tag, self.fs.get_data_pool_name()) + + # Run cephfs-data-scan targeting only orphans + self.fs.mds_stop() + self.fs.mds_fail() + self.fs.data_scan(["scan_extents", self.fs.get_data_pool_name()]) + self.fs.data_scan([ + "scan_inodes", + "--filter-tag", tag, + self.fs.get_data_pool_name() + ]) + + # After in-place injection stats should be kosher again + self.fs.set_ceph_conf('mds', 'mds verify scatter', True) + self.fs.set_ceph_conf('mds', 'mds debug scatterstat', True) + + # And we should have all the same linkage we started with, + # and no lost+found, and no extra inodes! + self.fs.mds_restart() + self.fs.wait_for_daemons() + self.mount_a.mount() + self._validate_linkage(inos) + + def _stash_inotable(self): + # Get all active ranks + ranks = self.fs.get_all_mds_rank() + + inotable_dict = {} + for rank in ranks: + inotable_oid = "mds{rank:d}_".format(rank=rank) + "inotable" + print("Trying to fetch inotable object: " + inotable_oid) + + #self.fs.get_metadata_object("InoTable", "mds0_inotable") + inotable_raw = self.fs.get_metadata_object_raw(inotable_oid) + inotable_dict[inotable_oid] = inotable_raw + return inotable_dict + + def test_inotable_sync(self): + self.mount_a.write_n_mb("file1_sixmegs", 6) + + # Flush journal + self.mount_a.umount_wait() + self.fs.mds_asok(["flush", "journal"]) + + inotable_copy = self._stash_inotable() + + self.mount_a.mount() + + self.mount_a.write_n_mb("file2_sixmegs", 6) + self.mount_a.write_n_mb("file3_sixmegs", 6) + + inos = self._get_paths_to_ino() + + # Flush journal + self.mount_a.umount_wait() + self.fs.mds_asok(["flush", "journal"]) + + self.mount_a.umount_wait() + + with self.assert_cluster_log("inode table repaired", invert_match=True): + out_json = self.fs.rank_tell(["scrub", "start", "/", "repair", "recursive"]) + self.assertNotEqual(out_json, None) + + self.mds_cluster.mds_stop() + self.mds_cluster.mds_fail() + + # Truncate the journal (to ensure the inotable on disk + # is all that will be in the InoTable in memory) + + self.fs.journal_tool(["event", "splice", + "--inode={0}".format(inos["./file2_sixmegs"]), "summary"], 0) + + self.fs.journal_tool(["event", "splice", + "--inode={0}".format(inos["./file3_sixmegs"]), "summary"], 0) + + # Revert to old inotable. + for key, value in inotable_copy.items(): + self.fs.put_metadata_object_raw(key, value) + + self.mds_cluster.mds_restart() + self.fs.wait_for_daemons() + + with self.assert_cluster_log("inode table repaired"): + out_json = self.fs.rank_tell(["scrub", "start", "/", "repair", "recursive"]) + self.assertNotEqual(out_json, None) + + self.mds_cluster.mds_stop() + table_text = self.fs.table_tool(["0", "show", "inode"]) + table = json.loads(table_text) + self.assertGreater( + table['0']['data']['inotable']['free'][0]['start'], + inos['./file3_sixmegs']) + + def test_backtrace_repair(self): + """ + That the MDS can repair an inodes backtrace in the data pool + if it is found to be damaged. + """ + # Create a file for subsequent checks + self.mount_a.run_shell(["mkdir", "parent_a"]) + self.mount_a.run_shell(["touch", "parent_a/alpha"]) + file_ino = self.mount_a.path_to_ino("parent_a/alpha") + + # That backtrace and layout are written after initial flush + self.fs.mds_asok(["flush", "journal"]) + backtrace = self.fs.read_backtrace(file_ino) + self.assertEqual(['alpha', 'parent_a'], + [a['dname'] for a in backtrace['ancestors']]) + + # Go corrupt the backtrace + self.fs._write_data_xattr(file_ino, "parent", + "oh i'm sorry did i overwrite your xattr?") + + with self.assert_cluster_log("bad backtrace on inode"): + out_json = self.fs.rank_tell(["scrub", "start", "/", "repair", "recursive"]) + self.assertNotEqual(out_json, None) + self.fs.mds_asok(["flush", "journal"]) + backtrace = self.fs.read_backtrace(file_ino) + self.assertEqual(['alpha', 'parent_a'], + [a['dname'] for a in backtrace['ancestors']]) diff --git a/qa/tasks/cephfs/test_fragment.py b/qa/tasks/cephfs/test_fragment.py new file mode 100644 index 00000000..0ed5da28 --- /dev/null +++ b/qa/tasks/cephfs/test_fragment.py @@ -0,0 +1,229 @@ + + +from tasks.cephfs.cephfs_test_case import CephFSTestCase +from teuthology.orchestra import run + +import logging +log = logging.getLogger(__name__) + + +class TestFragmentation(CephFSTestCase): + CLIENTS_REQUIRED = 1 + MDSS_REQUIRED = 1 + + def get_splits(self): + return self.fs.mds_asok(['perf', 'dump', 'mds'])['mds']['dir_split'] + + def get_merges(self): + return self.fs.mds_asok(['perf', 'dump', 'mds'])['mds']['dir_merge'] + + def get_dir_ino(self, path): + dir_cache = self.fs.read_cache(path, 0) + dir_ino = None + dir_inono = self.mount_a.path_to_ino(path.strip("/")) + for ino in dir_cache: + if ino['ino'] == dir_inono: + dir_ino = ino + break + self.assertIsNotNone(dir_ino) + return dir_ino + + def _configure(self, **kwargs): + """ + Apply kwargs as MDS configuration settings, enable dirfrags + and restart the MDSs. + """ + + for k, v in kwargs.items(): + self.ceph_cluster.set_ceph_conf("mds", k, v.__str__()) + + self.mds_cluster.mds_fail_restart() + self.fs.wait_for_daemons() + + def test_oversize(self): + """ + That a directory is split when it becomes too large. + """ + + split_size = 20 + merge_size = 5 + + self._configure( + mds_bal_split_size=split_size, + mds_bal_merge_size=merge_size, + mds_bal_split_bits=1 + ) + + self.assertEqual(self.get_splits(), 0) + + self.mount_a.create_n_files("splitdir/file", split_size + 1) + + self.wait_until_true( + lambda: self.get_splits() == 1, + timeout=30 + ) + + frags = self.get_dir_ino("/splitdir")['dirfrags'] + self.assertEqual(len(frags), 2) + self.assertEqual(frags[0]['dirfrag'], "0x10000000000.0*") + self.assertEqual(frags[1]['dirfrag'], "0x10000000000.1*") + self.assertEqual( + sum([len(f['dentries']) for f in frags]), + split_size + 1 + ) + + self.assertEqual(self.get_merges(), 0) + + self.mount_a.run_shell(["rm", "-f", run.Raw("splitdir/file*")]) + + self.wait_until_true( + lambda: self.get_merges() == 1, + timeout=30 + ) + + self.assertEqual(len(self.get_dir_ino("/splitdir")["dirfrags"]), 1) + + def test_rapid_creation(self): + """ + That the fast-splitting limit of 1.5x normal limit is + applied when creating dentries quickly. + """ + + split_size = 100 + merge_size = 1 + + self._configure( + mds_bal_split_size=split_size, + mds_bal_merge_size=merge_size, + mds_bal_split_bits=3, + mds_bal_fragment_size_max=int(split_size * 1.5 + 2) + ) + + # We test this only at a single split level. If a client was sending + # IO so fast that it hit a second split before the first split + # was complete, it could violate mds_bal_fragment_size_max -- there + # is a window where the child dirfrags of a split are unfrozen + # (so they can grow), but still have STATE_FRAGMENTING (so they + # can't be split). + + # By writing 4x the split size when the split bits are set + # to 3 (i.e. 4-ways), I am reasonably sure to see precisely + # one split. The test is to check whether that split + # happens soon enough that the client doesn't exceed + # 2x the split_size (the "immediate" split mode should + # kick in at 1.5x the split size). + + self.assertEqual(self.get_splits(), 0) + self.mount_a.create_n_files("splitdir/file", split_size * 4) + self.wait_until_equal( + self.get_splits, + 1, + reject_fn=lambda s: s > 1, + timeout=30 + ) + + def test_deep_split(self): + """ + That when the directory grows many times larger than split size, + the fragments get split again. + """ + + split_size = 100 + merge_size = 1 # i.e. don't merge frag unless its empty + split_bits = 1 + + branch_factor = 2**split_bits + + # Arbitrary: how many levels shall we try fragmenting before + # ending the test? + max_depth = 5 + + self._configure( + mds_bal_split_size=split_size, + mds_bal_merge_size=merge_size, + mds_bal_split_bits=split_bits + ) + + # Each iteration we will create another level of fragments. The + # placement of dentries into fragments is by hashes (i.e. pseudo + # random), so we rely on statistics to get the behaviour that + # by writing about 1.5x as many dentries as the split_size times + # the number of frags, we will get them all to exceed their + # split size and trigger a split. + depth = 0 + files_written = 0 + splits_expected = 0 + while depth < max_depth: + log.info("Writing files for depth {0}".format(depth)) + target_files = branch_factor**depth * int(split_size * 1.5) + create_files = target_files - files_written + + self.ceph_cluster.mon_manager.raw_cluster_cmd("log", + "{0} Writing {1} files (depth={2})".format( + self.__class__.__name__, create_files, depth + )) + self.mount_a.create_n_files("splitdir/file_{0}".format(depth), + create_files) + self.ceph_cluster.mon_manager.raw_cluster_cmd("log", + "{0} Done".format(self.__class__.__name__)) + + files_written += create_files + log.info("Now have {0} files".format(files_written)) + + splits_expected += branch_factor**depth + log.info("Waiting to see {0} splits".format(splits_expected)) + try: + self.wait_until_equal( + self.get_splits, + splits_expected, + timeout=30, + reject_fn=lambda x: x > splits_expected + ) + + frags = self.get_dir_ino("/splitdir")['dirfrags'] + self.assertEqual(len(frags), branch_factor**(depth+1)) + self.assertEqual( + sum([len(f['dentries']) for f in frags]), + target_files + ) + except: + # On failures, log what fragmentation we actually ended + # up with. This block is just for logging, at the end + # we raise the exception again. + frags = self.get_dir_ino("/splitdir")['dirfrags'] + log.info("depth={0} splits_expected={1} files_written={2}".format( + depth, splits_expected, files_written + )) + log.info("Dirfrags:") + for f in frags: + log.info("{0}: {1}".format( + f['dirfrag'], len(f['dentries']) + )) + raise + + depth += 1 + + # Remember the inode number because we will be checking for + # objects later. + dir_inode_no = self.mount_a.path_to_ino("splitdir") + + self.mount_a.run_shell(["rm", "-rf", "splitdir/"]) + self.mount_a.umount_wait() + + self.fs.mds_asok(['flush', 'journal']) + + # Wait for all strays to purge + self.wait_until_equal( + lambda: self.fs.mds_asok(['perf', 'dump', 'mds_cache'] + )['mds_cache']['num_strays'], + 0, + timeout=1200 + ) + # Check that the metadata pool objects for all the myriad + # child fragments are gone + metadata_objs = self.fs.rados(["ls"]) + frag_objs = [] + for o in metadata_objs: + if o.startswith("{0:x}.".format(dir_inode_no)): + frag_objs.append(o) + self.assertListEqual(frag_objs, []) diff --git a/qa/tasks/cephfs/test_full.py b/qa/tasks/cephfs/test_full.py new file mode 100644 index 00000000..21470c87 --- /dev/null +++ b/qa/tasks/cephfs/test_full.py @@ -0,0 +1,398 @@ + + +import json +import logging +import os +from textwrap import dedent +import time +from teuthology.orchestra.run import CommandFailedError +from tasks.cephfs.fuse_mount import FuseMount +from tasks.cephfs.cephfs_test_case import CephFSTestCase + + +log = logging.getLogger(__name__) + + +class FullnessTestCase(CephFSTestCase): + CLIENTS_REQUIRED = 2 + + # Subclasses define whether they're filling whole cluster or just data pool + data_only = False + + # Subclasses define how many bytes should be written to achieve fullness + pool_capacity = None + fill_mb = None + + # Subclasses define what fullness means to them + def is_full(self): + raise NotImplementedError() + + def setUp(self): + CephFSTestCase.setUp(self) + + mds_status = self.fs.rank_asok(["status"]) + + # Capture the initial OSD map epoch for later use + self.initial_osd_epoch = mds_status['osdmap_epoch_barrier'] + + def test_barrier(self): + """ + That when an OSD epoch barrier is set on an MDS, subsequently + issued capabilities cause clients to update their OSD map to that + epoch. + """ + + # script that sync up client with MDS OSD map barrier. The barrier should + # be updated by cap flush ack message. + pyscript = dedent(""" + import os + fd = os.open("{path}", os.O_CREAT | os.O_RDWR, 0O600) + os.fchmod(fd, 0O666) + os.fsync(fd) + os.close(fd) + """) + + # Sync up client with initial MDS OSD map barrier. + path = os.path.join(self.mount_a.mountpoint, "foo") + self.mount_a.run_python(pyscript.format(path=path)) + + # Grab mounts' initial OSD epochs: later we will check that + # it hasn't advanced beyond this point. + mount_a_initial_epoch, mount_a_initial_barrier = self.mount_a.get_osd_epoch() + + # Freshly mounted at start of test, should be up to date with OSD map + self.assertGreaterEqual(mount_a_initial_epoch, self.initial_osd_epoch) + + # Set and unset a flag to cause OSD epoch to increment + self.fs.mon_manager.raw_cluster_cmd("osd", "set", "pause") + self.fs.mon_manager.raw_cluster_cmd("osd", "unset", "pause") + + out = self.fs.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json").strip() + new_epoch = json.loads(out)['epoch'] + self.assertNotEqual(self.initial_osd_epoch, new_epoch) + + # Do a metadata operation on clients, witness that they end up with + # the old OSD map from startup time (nothing has prompted client + # to update its map) + path = os.path.join(self.mount_a.mountpoint, "foo") + self.mount_a.run_python(pyscript.format(path=path)) + mount_a_epoch, mount_a_barrier = self.mount_a.get_osd_epoch() + self.assertEqual(mount_a_epoch, mount_a_initial_epoch) + self.assertEqual(mount_a_barrier, mount_a_initial_barrier) + + # Set a barrier on the MDS + self.fs.rank_asok(["osdmap", "barrier", new_epoch.__str__()]) + + # Sync up client with new MDS OSD map barrier + path = os.path.join(self.mount_a.mountpoint, "baz") + self.mount_a.run_python(pyscript.format(path=path)) + mount_a_epoch, mount_a_barrier = self.mount_a.get_osd_epoch() + self.assertEqual(mount_a_barrier, new_epoch) + + # Some time passes here because the metadata part of the operation + # completes immediately, while the resulting OSD map update happens + # asynchronously (it's an Objecter::_maybe_request_map) as a result + # of seeing the new epoch barrier. + self.wait_until_true( + lambda: self.mount_a.get_osd_epoch()[0] >= new_epoch, + timeout=30) + + def _data_pool_name(self): + data_pool_names = self.fs.get_data_pool_names() + if len(data_pool_names) > 1: + raise RuntimeError("This test can't handle multiple data pools") + else: + return data_pool_names[0] + + def _test_full(self, easy_case): + """ + - That a client trying to write data to a file is prevented + from doing so with an -EFULL result + - That they are also prevented from creating new files by the MDS. + - That they may delete another file to get the system healthy again + + :param easy_case: if true, delete a successfully written file to + free up space. else, delete the file that experienced + the failed write. + """ + + osd_mon_report_interval = int(self.fs.get_config("osd_mon_report_interval", service_type='osd')) + + log.info("Writing {0}MB should fill this cluster".format(self.fill_mb)) + + # Fill up the cluster. This dd may or may not fail, as it depends on + # how soon the cluster recognises its own fullness + self.mount_a.write_n_mb("large_file_a", self.fill_mb // 2) + try: + self.mount_a.write_n_mb("large_file_b", self.fill_mb // 2) + except CommandFailedError: + log.info("Writing file B failed (full status happened already)") + assert self.is_full() + else: + log.info("Writing file B succeeded (full status will happen soon)") + self.wait_until_true(lambda: self.is_full(), + timeout=osd_mon_report_interval * 5) + + # Attempting to write more data should give me ENOSPC + with self.assertRaises(CommandFailedError) as ar: + self.mount_a.write_n_mb("large_file_b", 50, seek=self.fill_mb // 2) + self.assertEqual(ar.exception.exitstatus, 1) # dd returns 1 on "No space" + + # Wait for the MDS to see the latest OSD map so that it will reliably + # be applying the policy of rejecting non-deletion metadata operations + # while in the full state. + osd_epoch = json.loads(self.fs.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['epoch'] + self.wait_until_true( + lambda: self.fs.rank_asok(['status'])['osdmap_epoch'] >= osd_epoch, + timeout=10) + + if not self.data_only: + with self.assertRaises(CommandFailedError): + self.mount_a.write_n_mb("small_file_1", 0) + + # Clear out some space + if easy_case: + self.mount_a.run_shell(['rm', '-f', 'large_file_a']) + self.mount_a.run_shell(['rm', '-f', 'large_file_b']) + else: + # In the hard case it is the file that filled the system. + # Before the new #7317 (ENOSPC, epoch barrier) changes, this + # would fail because the last objects written would be + # stuck in the client cache as objecter operations. + self.mount_a.run_shell(['rm', '-f', 'large_file_b']) + self.mount_a.run_shell(['rm', '-f', 'large_file_a']) + + # Here we are waiting for two things to happen: + # * The MDS to purge the stray folder and execute object deletions + # * The OSDs to inform the mon that they are no longer full + self.wait_until_true(lambda: not self.is_full(), + timeout=osd_mon_report_interval * 5) + + # Wait for the MDS to see the latest OSD map so that it will reliably + # be applying the free space policy + osd_epoch = json.loads(self.fs.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['epoch'] + self.wait_until_true( + lambda: self.fs.rank_asok(['status'])['osdmap_epoch'] >= osd_epoch, + timeout=10) + + # Now I should be able to write again + self.mount_a.write_n_mb("large_file", 50, seek=0) + + # Ensure that the MDS keeps its OSD epoch barrier across a restart + + def test_full_different_file(self): + self._test_full(True) + + def test_full_same_file(self): + self._test_full(False) + + def _remote_write_test(self, template): + """ + Run some remote python in a way that's useful for + testing free space behaviour (see test_* methods using this) + """ + file_path = os.path.join(self.mount_a.mountpoint, "full_test_file") + + # Enough to trip the full flag + osd_mon_report_interval = int(self.fs.get_config("osd_mon_report_interval", service_type='osd')) + mon_tick_interval = int(self.fs.get_config("mon_tick_interval", service_type="mon")) + + # Sufficient data to cause RADOS cluster to go 'full' + log.info("pool capacity {0}, {1}MB should be enough to fill it".format(self.pool_capacity, self.fill_mb)) + + # Long enough for RADOS cluster to notice it is full and set flag on mons + # (report_interval for mon to learn PG stats, tick interval for it to update OSD map, + # factor of 1.5 for I/O + network latency in committing OSD map and distributing it + # to the OSDs) + full_wait = (osd_mon_report_interval + mon_tick_interval) * 1.5 + + # Configs for this test should bring this setting down in order to + # run reasonably quickly + if osd_mon_report_interval > 10: + log.warning("This test may run rather slowly unless you decrease" + "osd_mon_report_interval (5 is a good setting)!") + + self.mount_a.run_python(template.format( + fill_mb=self.fill_mb, + file_path=file_path, + full_wait=full_wait, + is_fuse=isinstance(self.mount_a, FuseMount) + )) + + def test_full_fclose(self): + # A remote script which opens a file handle, fills up the filesystem, and then + # checks that ENOSPC errors on buffered writes appear correctly as errors in fsync + remote_script = dedent(""" + import time + import datetime + import subprocess + import os + + # Write some buffered data through before going full, all should be well + print("writing some data through which we expect to succeed") + bytes = 0 + f = os.open("{file_path}", os.O_WRONLY | os.O_CREAT) + bytes += os.write(f, b'a' * 512 * 1024) + os.fsync(f) + print("fsync'ed data successfully, will now attempt to fill fs") + + # Okay, now we're going to fill up the filesystem, and then keep + # writing until we see an error from fsync. As long as we're doing + # buffered IO, the error should always only appear from fsync and not + # from write + full = False + + for n in range(0, int({fill_mb} * 0.9)): + bytes += os.write(f, b'x' * 1024 * 1024) + print("wrote {{0}} bytes via buffered write, may repeat".format(bytes)) + print("done writing {{0}} bytes".format(bytes)) + + # OK, now we should sneak in under the full condition + # due to the time it takes the OSDs to report to the + # mons, and get a successful fsync on our full-making data + os.fsync(f) + print("successfully fsync'ed prior to getting full state reported") + + # buffered write, add more dirty data to the buffer + print("starting buffered write") + try: + for n in range(0, int({fill_mb} * 0.2)): + bytes += os.write(f, b'x' * 1024 * 1024) + print("sleeping a bit as we've exceeded 90% of our expected full ratio") + time.sleep({full_wait}) + except OSError: + pass; + + print("wrote, now waiting 30s and then doing a close we expect to fail") + + # Wait long enough for a background flush that should fail + time.sleep(30) + + if {is_fuse}: + # ...and check that the failed background flush is reflected in fclose + try: + os.close(f) + except OSError: + print("close() returned an error as expected") + else: + raise RuntimeError("close() failed to raise error") + else: + # The kernel cephfs client does not raise errors on fclose + os.close(f) + + os.unlink("{file_path}") + """) + self._remote_write_test(remote_script) + + def test_full_fsync(self): + """ + That when the full flag is encountered during asynchronous + flushes, such that an fwrite() succeeds but an fsync/fclose() + should return the ENOSPC error. + """ + + # A remote script which opens a file handle, fills up the filesystem, and then + # checks that ENOSPC errors on buffered writes appear correctly as errors in fsync + remote_script = dedent(""" + import time + import datetime + import subprocess + import os + + # Write some buffered data through before going full, all should be well + print("writing some data through which we expect to succeed") + bytes = 0 + f = os.open("{file_path}", os.O_WRONLY | os.O_CREAT) + bytes += os.write(f, b'a' * 4096) + os.fsync(f) + print("fsync'ed data successfully, will now attempt to fill fs") + + # Okay, now we're going to fill up the filesystem, and then keep + # writing until we see an error from fsync. As long as we're doing + # buffered IO, the error should always only appear from fsync and not + # from write + full = False + + for n in range(0, int({fill_mb} * 1.1)): + try: + bytes += os.write(f, b'x' * 1024 * 1024) + print("wrote bytes via buffered write, moving on to fsync") + except OSError as e: + print("Unexpected error %s from write() instead of fsync()" % e) + raise + + try: + os.fsync(f) + print("fsync'ed successfully") + except OSError as e: + print("Reached fullness after %.2f MB" % (bytes / (1024.0 * 1024.0))) + full = True + break + else: + print("Not full yet after %.2f MB" % (bytes / (1024.0 * 1024.0))) + + if n > {fill_mb} * 0.9: + # Be cautious in the last region where we expect to hit + # the full condition, so that we don't overshoot too dramatically + print("sleeping a bit as we've exceeded 90% of our expected full ratio") + time.sleep({full_wait}) + + if not full: + raise RuntimeError("Failed to reach fullness after writing %d bytes" % bytes) + + # close() should not raise an error because we already caught it in + # fsync. There shouldn't have been any more writeback errors + # since then because all IOs got cancelled on the full flag. + print("calling close") + os.close(f) + print("close() did not raise error") + + os.unlink("{file_path}") + """) + + self._remote_write_test(remote_script) + + +class TestQuotaFull(FullnessTestCase): + """ + Test per-pool fullness, which indicates quota limits exceeded + """ + pool_capacity = 1024 * 1024 * 32 # arbitrary low-ish limit + fill_mb = pool_capacity // (1024 * 1024) + + # We are only testing quota handling on the data pool, not the metadata + # pool. + data_only = True + + def setUp(self): + super(TestQuotaFull, self).setUp() + + pool_name = self.fs.get_data_pool_name() + self.fs.mon_manager.raw_cluster_cmd("osd", "pool", "set-quota", pool_name, + "max_bytes", "{0}".format(self.pool_capacity)) + + def is_full(self): + return self.fs.is_full() + + +class TestClusterFull(FullnessTestCase): + """ + Test data pool fullness, which indicates that an OSD has become too full + """ + pool_capacity = None + REQUIRE_MEMSTORE = True + + def setUp(self): + super(TestClusterFull, self).setUp() + + if self.pool_capacity is None: + max_avail = self.fs.get_pool_df(self._data_pool_name())['max_avail'] + full_ratio = float(self.fs.get_config("mon_osd_full_ratio", service_type="mon")) + TestClusterFull.pool_capacity = int(max_avail * full_ratio) + TestClusterFull.fill_mb = (self.pool_capacity // (1024 * 1024)) + + def is_full(self): + return self.fs.is_full() + +# Hide the parent class so that unittest.loader doesn't try to run it. +del globals()['FullnessTestCase'] diff --git a/qa/tasks/cephfs/test_journal_migration.py b/qa/tasks/cephfs/test_journal_migration.py new file mode 100644 index 00000000..8863b371 --- /dev/null +++ b/qa/tasks/cephfs/test_journal_migration.py @@ -0,0 +1,100 @@ + +from tasks.cephfs.cephfs_test_case import CephFSTestCase +from tasks.workunit import task as workunit + +JOURNAL_FORMAT_LEGACY = 0 +JOURNAL_FORMAT_RESILIENT = 1 + + +class TestJournalMigration(CephFSTestCase): + CLIENTS_REQUIRED = 1 + MDSS_REQUIRED = 2 + + def test_journal_migration(self): + old_journal_version = JOURNAL_FORMAT_LEGACY + new_journal_version = JOURNAL_FORMAT_RESILIENT + + self.mount_a.umount_wait() + self.fs.mds_stop() + + # Create a filesystem using the older journal format. + self.fs.set_ceph_conf('mds', 'mds journal format', old_journal_version) + self.fs.mds_restart() + self.fs.recreate() + + # Enable standby replay, to cover the bug case #8811 where + # a standby replay might mistakenly end up trying to rewrite + # the journal at the same time as an active daemon. + self.fs.set_allow_standby_replay(True) + + status = self.fs.wait_for_daemons() + + self.assertTrue(self.fs.get_replay(status=status) is not None) + + # Do some client work so that the log is populated with something. + with self.mount_a.mounted(): + self.mount_a.create_files() + self.mount_a.check_files() # sanity, this should always pass + + # Run a more substantial workunit so that the length of the log to be + # coverted is going span at least a few segments + workunit(self.ctx, { + 'clients': { + "client.{0}".format(self.mount_a.client_id): ["suites/fsstress.sh"], + }, + "timeout": "3h" + }) + + # Modify the ceph.conf to ask the MDS to use the new journal format. + self.fs.set_ceph_conf('mds', 'mds journal format', new_journal_version) + + # Restart the MDS. + self.fs.mds_fail_restart() + + # This ensures that all daemons come up into a valid state + status = self.fs.wait_for_daemons() + + # Check that files created in the initial client workload are still visible + # in a client mount. + with self.mount_a.mounted(): + self.mount_a.check_files() + + # Verify that the journal really has been rewritten. + journal_version = self.fs.get_journal_version() + if journal_version != new_journal_version: + raise RuntimeError("Journal was not upgraded, version should be {0} but is {1}".format( + new_journal_version, journal_version() + )) + + # Verify that cephfs-journal-tool can now read the rewritten journal + inspect_out = self.fs.journal_tool(["journal", "inspect"], 0) + if not inspect_out.endswith(": OK"): + raise RuntimeError("Unexpected journal-tool result: '{0}'".format( + inspect_out + )) + + self.fs.journal_tool(["event", "get", "json", + "--path", "/tmp/journal.json"], 0) + p = self.fs.tool_remote.sh([ + "python3", + "-c", + "import json; print(len(json.load(open('/tmp/journal.json'))))" + ]) + event_count = int(p.strip()) + if event_count < 1000: + # Approximate value of "lots", expected from having run fsstress + raise RuntimeError("Unexpectedly few journal events: {0}".format(event_count)) + + # Do some client work to check that writing the log is still working + with self.mount_a.mounted(): + workunit(self.ctx, { + 'clients': { + "client.{0}".format(self.mount_a.client_id): ["fs/misc/trivial_sync.sh"], + }, + "timeout": "3h" + }) + + # Check that both an active and a standby replay are still up + status = self.fs.status() + self.assertEqual(len(list(self.fs.get_replays(status=status))), 1) + self.assertEqual(len(list(self.fs.get_ranks(status=status))), 1) diff --git a/qa/tasks/cephfs/test_journal_repair.py b/qa/tasks/cephfs/test_journal_repair.py new file mode 100644 index 00000000..a52455d7 --- /dev/null +++ b/qa/tasks/cephfs/test_journal_repair.py @@ -0,0 +1,447 @@ + +""" +Test our tools for recovering the content of damaged journals +""" + +import json +import logging +from textwrap import dedent +import time + +from teuthology.exceptions import CommandFailedError, ConnectionLostError +from tasks.cephfs.filesystem import ObjectNotFound, ROOT_INO +from tasks.cephfs.cephfs_test_case import CephFSTestCase, for_teuthology +from tasks.workunit import task as workunit + +log = logging.getLogger(__name__) + + +class TestJournalRepair(CephFSTestCase): + MDSS_REQUIRED = 2 + + def test_inject_to_empty(self): + """ + That when some dentries in the journal but nothing is in + the backing store, we correctly populate the backing store + from the journalled dentries. + """ + + # Inject metadata operations + self.mount_a.run_shell(["touch", "rootfile"]) + self.mount_a.run_shell(["mkdir", "subdir"]) + self.mount_a.run_shell(["touch", "subdir/subdirfile"]) + # There are several different paths for handling hardlinks, depending + # on whether an existing dentry (being overwritten) is also a hardlink + self.mount_a.run_shell(["mkdir", "linkdir"]) + + # Test inode -> remote transition for a dentry + self.mount_a.run_shell(["touch", "linkdir/link0"]) + self.mount_a.run_shell(["rm", "-f", "linkdir/link0"]) + self.mount_a.run_shell(["ln", "subdir/subdirfile", "linkdir/link0"]) + + # Test nothing -> remote transition + self.mount_a.run_shell(["ln", "subdir/subdirfile", "linkdir/link1"]) + + # Test remote -> inode transition + self.mount_a.run_shell(["ln", "subdir/subdirfile", "linkdir/link2"]) + self.mount_a.run_shell(["rm", "-f", "linkdir/link2"]) + self.mount_a.run_shell(["touch", "linkdir/link2"]) + + # Test remote -> diff remote transition + self.mount_a.run_shell(["ln", "subdir/subdirfile", "linkdir/link3"]) + self.mount_a.run_shell(["rm", "-f", "linkdir/link3"]) + self.mount_a.run_shell(["ln", "rootfile", "linkdir/link3"]) + + # Test an empty directory + self.mount_a.run_shell(["mkdir", "subdir/subsubdir"]) + self.mount_a.run_shell(["sync"]) + + # Before we unmount, make a note of the inode numbers, later we will + # check that they match what we recover from the journal + rootfile_ino = self.mount_a.path_to_ino("rootfile") + subdir_ino = self.mount_a.path_to_ino("subdir") + linkdir_ino = self.mount_a.path_to_ino("linkdir") + subdirfile_ino = self.mount_a.path_to_ino("subdir/subdirfile") + subsubdir_ino = self.mount_a.path_to_ino("subdir/subsubdir") + + self.mount_a.umount_wait() + + # Stop the MDS + self.fs.mds_stop() + self.fs.mds_fail() + + # Now, the journal should contain the operations, but the backing + # store shouldn't + with self.assertRaises(ObjectNotFound): + self.fs.list_dirfrag(subdir_ino) + self.assertEqual(self.fs.list_dirfrag(ROOT_INO), []) + + # Execute the dentry recovery, this should populate the backing store + self.fs.journal_tool(['event', 'recover_dentries', 'list'], 0) + + # Dentries in ROOT_INO are present + self.assertEqual(sorted(self.fs.list_dirfrag(ROOT_INO)), sorted(['rootfile_head', 'subdir_head', 'linkdir_head'])) + self.assertEqual(self.fs.list_dirfrag(subdir_ino), ['subdirfile_head', 'subsubdir_head']) + self.assertEqual(sorted(self.fs.list_dirfrag(linkdir_ino)), + sorted(['link0_head', 'link1_head', 'link2_head', 'link3_head'])) + + # Now check the MDS can read what we wrote: truncate the journal + # and start the mds. + self.fs.journal_tool(['journal', 'reset'], 0) + self.fs.mds_fail_restart() + self.fs.wait_for_daemons() + + # List files + self.mount_a.mount() + self.mount_a.wait_until_mounted() + + # First ls -R to populate MDCache, such that hardlinks will + # resolve properly (recover_dentries does not create backtraces, + # so ordinarily hardlinks to inodes that happen not to have backtraces + # will be invisible in readdir). + # FIXME: hook in forward scrub here to regenerate backtraces + proc = self.mount_a.run_shell(['ls', '-R']) + self.mount_a.umount_wait() # remount to clear client cache before our second ls + self.mount_a.mount() + self.mount_a.wait_until_mounted() + + proc = self.mount_a.run_shell(['ls', '-R']) + self.assertEqual(proc.stdout.getvalue().strip(), + dedent(""" + .: + linkdir + rootfile + subdir + + ./linkdir: + link0 + link1 + link2 + link3 + + ./subdir: + subdirfile + subsubdir + + ./subdir/subsubdir: + """).strip()) + + # Check the correct inos were preserved by path + self.assertEqual(rootfile_ino, self.mount_a.path_to_ino("rootfile")) + self.assertEqual(subdir_ino, self.mount_a.path_to_ino("subdir")) + self.assertEqual(subdirfile_ino, self.mount_a.path_to_ino("subdir/subdirfile")) + self.assertEqual(subsubdir_ino, self.mount_a.path_to_ino("subdir/subsubdir")) + + # Check that the hard link handling came out correctly + self.assertEqual(self.mount_a.path_to_ino("linkdir/link0"), subdirfile_ino) + self.assertEqual(self.mount_a.path_to_ino("linkdir/link1"), subdirfile_ino) + self.assertNotEqual(self.mount_a.path_to_ino("linkdir/link2"), subdirfile_ino) + self.assertEqual(self.mount_a.path_to_ino("linkdir/link3"), rootfile_ino) + + # Create a new file, ensure it is not issued the same ino as one of the + # recovered ones + self.mount_a.run_shell(["touch", "afterwards"]) + new_ino = self.mount_a.path_to_ino("afterwards") + self.assertNotIn(new_ino, [rootfile_ino, subdir_ino, subdirfile_ino]) + + # Check that we can do metadata ops in the recovered directory + self.mount_a.run_shell(["touch", "subdir/subsubdir/subsubdirfile"]) + + @for_teuthology # 308s + def test_reset(self): + """ + That after forcibly modifying the backing store, we can get back into + a good state by resetting the MDSMap. + + The scenario is that we have two active MDSs, and we lose the journals. Once + we have completely lost confidence in the integrity of the metadata, we want to + return the system to a single-MDS state to go into a scrub to recover what we + can. + """ + + # Set max_mds to 2 + self.fs.set_max_mds(2) + + # See that we have two active MDSs + self.wait_until_equal(lambda: len(self.fs.get_active_names()), 2, 30, + reject_fn=lambda v: v > 2 or v < 1) + active_mds_names = self.fs.get_active_names() + + # Switch off any unneeded MDS daemons + for unneeded_mds in set(self.mds_cluster.mds_ids) - set(active_mds_names): + self.mds_cluster.mds_stop(unneeded_mds) + self.mds_cluster.mds_fail(unneeded_mds) + + # Create a dir on each rank + self.mount_a.run_shell(["mkdir", "alpha"]) + self.mount_a.run_shell(["mkdir", "bravo"]) + self.mount_a.setfattr("alpha/", "ceph.dir.pin", "0") + self.mount_a.setfattr("bravo/", "ceph.dir.pin", "1") + + def subtrees_assigned(): + got_subtrees = self.fs.mds_asok(["get", "subtrees"], mds_id=active_mds_names[0]) + + for s in got_subtrees: + if s['dir']['path'] == '/bravo': + if s['auth_first'] == 1: + return True + else: + # Should not happen + raise RuntimeError("/bravo is subtree but not rank 1!") + + return False + + # Ensure the pinning has taken effect and the /bravo dir is now + # migrated to rank 1. + self.wait_until_true(subtrees_assigned, 30) + + # Do some IO (this should be split across ranks according to + # the rank-pinned dirs) + self.mount_a.create_n_files("alpha/file", 1000) + self.mount_a.create_n_files("bravo/file", 1000) + + # Flush the journals so that we have some backing store data + # belonging to one MDS, and some to the other MDS. + for mds_name in active_mds_names: + self.fs.mds_asok(["flush", "journal"], mds_name) + + # Stop (hard) the second MDS daemon + self.fs.mds_stop(active_mds_names[1]) + + # Wipe out the tables for MDS rank 1 so that it is broken and can't start + # (this is the simulated failure that we will demonstrate that the disaster + # recovery tools can get us back from) + self.fs.erase_metadata_objects(prefix="mds1_") + + # Try to access files from the client + blocked_ls = self.mount_a.run_shell(["ls", "-R"], wait=False) + + # Check that this "ls -R" blocked rather than completing: indicates + # it got stuck trying to access subtrees which were on the now-dead MDS. + log.info("Sleeping to check ls is blocked...") + time.sleep(60) + self.assertFalse(blocked_ls.finished) + + # This mount is now useless because it will depend on MDS rank 1, and MDS rank 1 + # is not coming back. Kill it. + log.info("Killing mount, it's blocked on the MDS we killed") + self.mount_a.kill() + self.mount_a.kill_cleanup() + try: + # Now that the mount is dead, the ls -R should error out. + blocked_ls.wait() + except (CommandFailedError, ConnectionLostError): + # The ConnectionLostError case is for kernel client, where + # killing the mount also means killing the node. + pass + + # See that the second MDS will crash when it starts and tries to + # acquire rank 1 + damaged_id = active_mds_names[1] + self.fs.mds_restart(damaged_id) + + # The daemon taking the damaged rank should start starting, then + # restart back into standby after asking the mon to mark the rank + # damaged. + def is_marked_damaged(): + mds_map = self.fs.get_mds_map() + return 1 in mds_map['damaged'] + + self.wait_until_true(is_marked_damaged, 60) + + def get_state(): + info = self.mds_cluster.get_mds_info(damaged_id) + return info['state'] if info is not None else None + + self.wait_until_equal( + get_state, + "up:standby", + timeout=60) + + self.fs.mds_stop(damaged_id) + self.fs.mds_fail(damaged_id) + + # Now give up and go through a disaster recovery procedure + self.fs.mds_stop(active_mds_names[0]) + self.fs.mds_fail(active_mds_names[0]) + # Invoke recover_dentries quietly, because otherwise log spews millions of lines + self.fs.journal_tool(["event", "recover_dentries", "summary"], 0, quiet=True) + self.fs.journal_tool(["event", "recover_dentries", "summary"], 1, quiet=True) + self.fs.table_tool(["0", "reset", "session"]) + self.fs.journal_tool(["journal", "reset"], 0) + self.fs.erase_mds_objects(1) + self.fs.mon_manager.raw_cluster_cmd('fs', 'reset', self.fs.name, + '--yes-i-really-mean-it') + + # Bring an MDS back online, mount a client, and see that we can walk the full + # filesystem tree again + self.fs.mds_fail_restart(active_mds_names[0]) + self.wait_until_equal(lambda: self.fs.get_active_names(), [active_mds_names[0]], 30, + reject_fn=lambda v: len(v) > 1) + self.mount_a.mount() + self.mount_a.run_shell(["ls", "-R"], wait=True) + + def test_table_tool(self): + active_mdss = self.fs.get_active_names() + self.assertEqual(len(active_mdss), 1) + mds_name = active_mdss[0] + + self.mount_a.run_shell(["touch", "foo"]) + self.fs.mds_asok(["flush", "journal"], mds_name) + + log.info(self.fs.table_tool(["all", "show", "inode"])) + log.info(self.fs.table_tool(["all", "show", "snap"])) + log.info(self.fs.table_tool(["all", "show", "session"])) + + # Inode table should always be the same because initial state + # and choice of inode are deterministic. + # Should see one inode consumed + self.assertEqual( + json.loads(self.fs.table_tool(["all", "show", "inode"])), + {"0": { + "data": { + "version": 2, + "inotable": { + "projected_free": [ + {"start": 1099511628777, + "len": 1099511626775}], + "free": [ + {"start": 1099511628777, + "len": 1099511626775}]}}, + "result": 0}} + + ) + + # Should see one session + session_data = json.loads(self.fs.table_tool( + ["all", "show", "session"])) + self.assertEqual(len(session_data["0"]["data"]["sessions"]), 1) + self.assertEqual(session_data["0"]["result"], 0) + + # Should see no snaps + self.assertEqual( + json.loads(self.fs.table_tool(["all", "show", "snap"])), + {"version": 1, + "snapserver": {"last_snap": 1, + "last_created": 1, + "last_destroyed": 1, + "pending_noop": [], + "snaps": [], + "need_to_purge": {}, + "pending_update": [], + "pending_destroy": []}, + "result": 0} + ) + + # Reset everything + for table in ["session", "inode", "snap"]: + self.fs.table_tool(["all", "reset", table]) + + log.info(self.fs.table_tool(["all", "show", "inode"])) + log.info(self.fs.table_tool(["all", "show", "snap"])) + log.info(self.fs.table_tool(["all", "show", "session"])) + + # Should see 0 sessions + session_data = json.loads(self.fs.table_tool( + ["all", "show", "session"])) + self.assertEqual(len(session_data["0"]["data"]["sessions"]), 0) + self.assertEqual(session_data["0"]["result"], 0) + + # Should see entire inode range now marked free + self.assertEqual( + json.loads(self.fs.table_tool(["all", "show", "inode"])), + {"0": {"data": {"version": 1, + "inotable": {"projected_free": [ + {"start": 1099511627776, + "len": 1099511627776}], + "free": [ + {"start": 1099511627776, + "len": 1099511627776}]}}, + "result": 0}} + ) + + # Should see no snaps + self.assertEqual( + json.loads(self.fs.table_tool(["all", "show", "snap"])), + {"version": 1, + "snapserver": {"last_snap": 1, + "last_created": 1, + "last_destroyed": 1, + "pending_noop": [], + "snaps": [], + "need_to_purge": {}, + "pending_update": [], + "pending_destroy": []}, + "result": 0} + ) + + def test_table_tool_take_inos(self): + initial_range_start = 1099511627776 + initial_range_len = 1099511627776 + # Initially a completely clear range + self.assertEqual( + json.loads(self.fs.table_tool(["all", "show", "inode"])), + {"0": {"data": {"version": 0, + "inotable": {"projected_free": [ + {"start": initial_range_start, + "len": initial_range_len}], + "free": [ + {"start": initial_range_start, + "len": initial_range_len}]}}, + "result": 0}} + ) + + # Remove some + self.assertEqual( + json.loads(self.fs.table_tool(["all", "take_inos", "{0}".format(initial_range_start + 100)])), + {"0": {"data": {"version": 1, + "inotable": {"projected_free": [ + {"start": initial_range_start + 101, + "len": initial_range_len - 101}], + "free": [ + {"start": initial_range_start + 101, + "len": initial_range_len - 101}]}}, + "result": 0}} + ) + + @for_teuthology # Hack: "for_teuthology" because .sh doesn't work outside teuth + def test_journal_smoke(self): + workunit(self.ctx, { + 'clients': { + "client.{0}".format(self.mount_a.client_id): [ + "fs/misc/trivial_sync.sh"], + }, + "timeout": "1h" + }) + + for mount in self.mounts: + mount.umount_wait() + + self.fs.mds_stop() + self.fs.mds_fail() + + # journal tool smoke + workunit(self.ctx, { + 'clients': { + "client.{0}".format(self.mount_a.client_id): [ + "suites/cephfs_journal_tool_smoke.sh"], + }, + "timeout": "1h" + }) + + + + self.fs.mds_restart() + self.fs.wait_for_daemons() + + self.mount_a.mount() + + # trivial sync moutn a + workunit(self.ctx, { + 'clients': { + "client.{0}".format(self.mount_a.client_id): [ + "fs/misc/trivial_sync.sh"], + }, + "timeout": "1h" + }) + diff --git a/qa/tasks/cephfs/test_mantle.py b/qa/tasks/cephfs/test_mantle.py new file mode 100644 index 00000000..6cd86ad1 --- /dev/null +++ b/qa/tasks/cephfs/test_mantle.py @@ -0,0 +1,109 @@ +from tasks.cephfs.cephfs_test_case import CephFSTestCase +import json +import logging + +log = logging.getLogger(__name__) +failure = "using old balancer; mantle failed for balancer=" +success = "mantle balancer version changed: " + +class TestMantle(CephFSTestCase): + def start_mantle(self): + self.wait_for_health_clear(timeout=30) + self.fs.set_max_mds(2) + self.wait_until_equal(lambda: len(self.fs.get_active_names()), 2, 30, + reject_fn=lambda v: v > 2 or v < 1) + + for m in self.fs.get_active_names(): + self.fs.mds_asok(['config', 'set', 'debug_objecter', '20'], mds_id=m) + self.fs.mds_asok(['config', 'set', 'debug_ms', '0'], mds_id=m) + self.fs.mds_asok(['config', 'set', 'debug_mds', '0'], mds_id=m) + self.fs.mds_asok(['config', 'set', 'debug_mds_balancer', '5'], mds_id=m) + + def push_balancer(self, obj, lua_code, expect): + self.fs.mon_manager.raw_cluster_cmd_result('fs', 'set', self.fs.name, 'balancer', obj) + self.fs.rados(["put", obj, "-"], stdin_data=lua_code) + with self.assert_cluster_log(failure + obj + " " + expect): + log.info("run a " + obj + " balancer that expects=" + expect) + + def test_version_empty(self): + self.start_mantle() + expect = " : (2) No such file or directory" + + ret = self.fs.mon_manager.raw_cluster_cmd_result('fs', 'set', self.fs.name, 'balancer') + assert(ret == 22) # EINVAL + + self.fs.mon_manager.raw_cluster_cmd_result('fs', 'set', self.fs.name, 'balancer', " ") + with self.assert_cluster_log(failure + " " + expect): pass + + def test_version_not_in_rados(self): + self.start_mantle() + expect = failure + "ghost.lua : (2) No such file or directory" + self.fs.mon_manager.raw_cluster_cmd_result('fs', 'set', self.fs.name, 'balancer', "ghost.lua") + with self.assert_cluster_log(expect): pass + + def test_balancer_invalid(self): + self.start_mantle() + expect = ": (22) Invalid argument" + + lua_code = "this is invalid lua code!" + self.push_balancer("invalid.lua", lua_code, expect) + + lua_code = "BAL_LOG()" + self.push_balancer("invalid_log.lua", lua_code, expect) + + lua_code = "BAL_LOG(0)" + self.push_balancer("invalid_log_again.lua", lua_code, expect) + + def test_balancer_valid(self): + self.start_mantle() + lua_code = "BAL_LOG(0, \"test\")\nreturn {3, 4}" + self.fs.mon_manager.raw_cluster_cmd_result('fs', 'set', self.fs.name, 'balancer', "valid.lua") + self.fs.rados(["put", "valid.lua", "-"], stdin_data=lua_code) + with self.assert_cluster_log(success + "valid.lua"): + log.info("run a valid.lua balancer") + + def test_return_invalid(self): + self.start_mantle() + expect = ": (22) Invalid argument" + + lua_code = "return \"hello\"" + self.push_balancer("string.lua", lua_code, expect) + + lua_code = "return 3" + self.push_balancer("number.lua", lua_code, expect) + + lua_code = "return {}" + self.push_balancer("dict_empty.lua", lua_code, expect) + + lua_code = "return {\"this\", \"is\", \"a\", \"test\"}" + self.push_balancer("dict_of_strings.lua", lua_code, expect) + + lua_code = "return {3, \"test\"}" + self.push_balancer("dict_of_mixed.lua", lua_code, expect) + + lua_code = "return {3}" + self.push_balancer("not_enough_numbers.lua", lua_code, expect) + + lua_code = "return {3, 4, 5, 6, 7, 8, 9}" + self.push_balancer("too_many_numbers.lua", lua_code, expect) + + def test_dead_osd(self): + self.start_mantle() + expect = " : (110) Connection timed out" + + # kill the OSDs so that the balancer pull from RADOS times out + osd_map = json.loads(self.fs.mon_manager.raw_cluster_cmd('osd', 'dump', '--format=json-pretty')) + for i in range(0, len(osd_map['osds'])): + self.fs.mon_manager.raw_cluster_cmd_result('osd', 'down', str(i)) + self.fs.mon_manager.raw_cluster_cmd_result('osd', 'out', str(i)) + + # trigger a pull from RADOS + self.fs.mon_manager.raw_cluster_cmd_result('fs', 'set', self.fs.name, 'balancer', "valid.lua") + + # make the timeout a little longer since dead OSDs spam ceph -w + with self.assert_cluster_log(failure + "valid.lua" + expect, timeout=30): + log.info("run a balancer that should timeout") + + # cleanup + for i in range(0, len(osd_map['osds'])): + self.fs.mon_manager.raw_cluster_cmd_result('osd', 'in', str(i)) diff --git a/qa/tasks/cephfs/test_misc.py b/qa/tasks/cephfs/test_misc.py new file mode 100644 index 00000000..cd72ac38 --- /dev/null +++ b/qa/tasks/cephfs/test_misc.py @@ -0,0 +1,291 @@ + +from unittest import SkipTest +from tasks.cephfs.fuse_mount import FuseMount +from tasks.cephfs.cephfs_test_case import CephFSTestCase +from teuthology.orchestra.run import CommandFailedError, ConnectionLostError +import errno +import time +import json +import logging + +log = logging.getLogger(__name__) + +class TestMisc(CephFSTestCase): + CLIENTS_REQUIRED = 2 + + def test_getattr_caps(self): + """ + Check if MDS recognizes the 'mask' parameter of open request. + The parameter allows client to request caps when opening file + """ + + if not isinstance(self.mount_a, FuseMount): + raise SkipTest("Require FUSE client") + + # Enable debug. Client will requests CEPH_CAP_XATTR_SHARED + # on lookup/open + self.mount_b.umount_wait() + self.set_conf('client', 'client debug getattr caps', 'true') + self.mount_b.mount() + self.mount_b.wait_until_mounted() + + # create a file and hold it open. MDS will issue CEPH_CAP_EXCL_* + # to mount_a + p = self.mount_a.open_background("testfile") + self.mount_b.wait_for_visible("testfile") + + # this triggers a lookup request and an open request. The debug + # code will check if lookup/open reply contains xattrs + self.mount_b.run_shell(["cat", "testfile"]) + + self.mount_a.kill_background(p) + + def test_root_rctime(self): + """ + Check that the root inode has a non-default rctime on startup. + """ + + t = time.time() + rctime = self.mount_a.getfattr(".", "ceph.dir.rctime") + log.info("rctime = {}".format(rctime)) + self.assertGreaterEqual(float(rctime), t - 10) + + def test_fs_new(self): + self.mount_a.umount_wait() + self.mount_b.umount_wait() + + data_pool_name = self.fs.get_data_pool_name() + + self.fs.mds_stop() + self.fs.mds_fail() + + self.fs.mon_manager.raw_cluster_cmd('fs', 'rm', self.fs.name, + '--yes-i-really-mean-it') + + self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'delete', + self.fs.metadata_pool_name, + self.fs.metadata_pool_name, + '--yes-i-really-really-mean-it') + self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', + self.fs.metadata_pool_name, + self.fs.pgs_per_fs_pool.__str__()) + + dummyfile = '/etc/fstab' + + self.fs.put_metadata_object_raw("key", dummyfile) + + def get_pool_df(fs, name): + try: + return fs.get_pool_df(name)['objects'] > 0 + except RuntimeError: + return False + + self.wait_until_true(lambda: get_pool_df(self.fs, self.fs.metadata_pool_name), timeout=30) + + try: + self.fs.mon_manager.raw_cluster_cmd('fs', 'new', self.fs.name, + self.fs.metadata_pool_name, + data_pool_name) + except CommandFailedError as e: + self.assertEqual(e.exitstatus, errno.EINVAL) + else: + raise AssertionError("Expected EINVAL") + + self.fs.mon_manager.raw_cluster_cmd('fs', 'new', self.fs.name, + self.fs.metadata_pool_name, + data_pool_name, "--force") + + self.fs.mon_manager.raw_cluster_cmd('fs', 'rm', self.fs.name, + '--yes-i-really-mean-it') + + + self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'delete', + self.fs.metadata_pool_name, + self.fs.metadata_pool_name, + '--yes-i-really-really-mean-it') + self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'create', + self.fs.metadata_pool_name, + self.fs.pgs_per_fs_pool.__str__()) + self.fs.mon_manager.raw_cluster_cmd('fs', 'new', self.fs.name, + self.fs.metadata_pool_name, + data_pool_name) + + def test_cap_revoke_nonresponder(self): + """ + Check that a client is evicted if it has not responded to cap revoke + request for configured number of seconds. + """ + session_timeout = self.fs.get_var("session_timeout") + eviction_timeout = session_timeout / 2.0 + + self.fs.mds_asok(['config', 'set', 'mds_cap_revoke_eviction_timeout', + str(eviction_timeout)]) + + cap_holder = self.mount_a.open_background() + + # Wait for the file to be visible from another client, indicating + # that mount_a has completed its network ops + self.mount_b.wait_for_visible() + + # Simulate client death + self.mount_a.kill() + + try: + # The waiter should get stuck waiting for the capability + # held on the MDS by the now-dead client A + cap_waiter = self.mount_b.write_background() + + a = time.time() + time.sleep(eviction_timeout) + cap_waiter.wait() + b = time.time() + cap_waited = b - a + log.info("cap_waiter waited {0}s".format(cap_waited)) + + # check if the cap is transferred before session timeout kicked in. + # this is a good enough check to ensure that the client got evicted + # by the cap auto evicter rather than transitioning to stale state + # and then getting evicted. + self.assertLess(cap_waited, session_timeout, + "Capability handover took {0}, expected less than {1}".format( + cap_waited, session_timeout + )) + + self.assertTrue(self.mount_a.is_blacklisted()) + cap_holder.stdin.close() + try: + cap_holder.wait() + except (CommandFailedError, ConnectionLostError): + # We killed it (and possibly its node), so it raises an error + pass + finally: + self.mount_a.kill_cleanup() + + self.mount_a.mount() + self.mount_a.wait_until_mounted() + + def test_filtered_df(self): + pool_name = self.fs.get_data_pool_name() + raw_df = self.fs.get_pool_df(pool_name) + raw_avail = float(raw_df["max_avail"]) + out = self.fs.mon_manager.raw_cluster_cmd('osd', 'pool', 'get', + pool_name, 'size', + '-f', 'json-pretty') + _ = json.loads(out) + + proc = self.mount_a.run_shell(['df', '.']) + output = proc.stdout.getvalue() + fs_avail = output.split('\n')[1].split()[3] + fs_avail = float(fs_avail) * 1024 + + ratio = raw_avail / fs_avail + assert 0.9 < ratio < 1.1 + + def test_dump_inode(self): + info = self.fs.mds_asok(['dump', 'inode', '1']) + assert(info['path'] == "/") + + def test_dump_inode_hexademical(self): + self.mount_a.run_shell(["mkdir", "-p", "foo"]) + ino = self.mount_a.path_to_ino("foo") + assert type(ino) is int + info = self.fs.mds_asok(['dump', 'inode', hex(ino)]) + assert info['path'] == "/foo" + + +class TestCacheDrop(CephFSTestCase): + CLIENTS_REQUIRED = 1 + + def _run_drop_cache_cmd(self, timeout=None): + result = None + mds_id = self.fs.get_lone_mds_id() + if timeout is not None: + result = self.fs.mon_manager.raw_cluster_cmd("tell", "mds.{0}".format(mds_id), + "cache", "drop", str(timeout)) + else: + result = self.fs.mon_manager.raw_cluster_cmd("tell", "mds.{0}".format(mds_id), + "cache", "drop") + return json.loads(result) + + def _setup(self, max_caps=20, threshold=400): + # create some files + self.mount_a.create_n_files("dc-dir/dc-file", 1000, sync=True) + + # Reduce this so the MDS doesn't rkcall the maximum for simple tests + self.fs.rank_asok(['config', 'set', 'mds_recall_max_caps', str(max_caps)]) + self.fs.rank_asok(['config', 'set', 'mds_recall_max_decay_threshold', str(threshold)]) + + def test_drop_cache_command(self): + """ + Basic test for checking drop cache command. + Confirm it halts without a timeout. + Note that the cache size post trimming is not checked here. + """ + mds_min_caps_per_client = int(self.fs.get_config("mds_min_caps_per_client")) + self._setup() + result = self._run_drop_cache_cmd() + self.assertEqual(result['client_recall']['return_code'], 0) + self.assertEqual(result['flush_journal']['return_code'], 0) + # It should take at least 1 second + self.assertGreater(result['duration'], 1) + self.assertGreaterEqual(result['trim_cache']['trimmed'], 1000-2*mds_min_caps_per_client) + + def test_drop_cache_command_timeout(self): + """ + Basic test for checking drop cache command. + Confirm recall halts early via a timeout. + Note that the cache size post trimming is not checked here. + """ + self._setup() + result = self._run_drop_cache_cmd(timeout=10) + self.assertEqual(result['client_recall']['return_code'], -errno.ETIMEDOUT) + self.assertEqual(result['flush_journal']['return_code'], 0) + self.assertGreater(result['duration'], 10) + self.assertGreaterEqual(result['trim_cache']['trimmed'], 100) # we did something, right? + + def test_drop_cache_command_dead_timeout(self): + """ + Check drop cache command with non-responding client using tell + interface. Note that the cache size post trimming is not checked + here. + """ + self._setup() + self.mount_a.kill() + # Note: recall is subject to the timeout. The journal flush will + # be delayed due to the client being dead. + result = self._run_drop_cache_cmd(timeout=5) + self.assertEqual(result['client_recall']['return_code'], -errno.ETIMEDOUT) + self.assertEqual(result['flush_journal']['return_code'], 0) + self.assertGreater(result['duration'], 5) + self.assertLess(result['duration'], 120) + # Note: result['trim_cache']['trimmed'] may be >0 because dropping the + # cache now causes the Locker to drive eviction of stale clients (a + # stale session will be autoclosed at mdsmap['session_timeout']). The + # particular operation causing this is journal flush which causes the + # MDS to wait wait for cap revoke. + #self.assertEqual(0, result['trim_cache']['trimmed']) + self.mount_a.kill_cleanup() + self.mount_a.mount() + self.mount_a.wait_until_mounted() + + def test_drop_cache_command_dead(self): + """ + Check drop cache command with non-responding client using tell + interface. Note that the cache size post trimming is not checked + here. + """ + self._setup() + self.mount_a.kill() + result = self._run_drop_cache_cmd() + self.assertEqual(result['client_recall']['return_code'], 0) + self.assertEqual(result['flush_journal']['return_code'], 0) + self.assertGreater(result['duration'], 5) + self.assertLess(result['duration'], 120) + # Note: result['trim_cache']['trimmed'] may be >0 because dropping the + # cache now causes the Locker to drive eviction of stale clients (a + # stale session will be autoclosed at mdsmap['session_timeout']). The + # particular operation causing this is journal flush which causes the + # MDS to wait wait for cap revoke. + self.mount_a.kill_cleanup() + self.mount_a.mount() + self.mount_a.wait_until_mounted() diff --git a/qa/tasks/cephfs/test_openfiletable.py b/qa/tasks/cephfs/test_openfiletable.py new file mode 100644 index 00000000..36e212d7 --- /dev/null +++ b/qa/tasks/cephfs/test_openfiletable.py @@ -0,0 +1,41 @@ +import time +from tasks.cephfs.cephfs_test_case import CephFSTestCase +from teuthology.exceptions import CommandFailedError +from tasks.cephfs.cephfs_test_case import CephFSTestCase, for_teuthology + +class OpenFileTable(CephFSTestCase): + CLIENTS_REQUIRED = 1 + MDSS_REQUIRED = 1 + + def test_max_items_per_obj(self): + """ + The maximum number of openfiles omap objects keys are now equal to + osd_deep_scrub_large_omap_object_key_threshold option. + """ + self.set_conf("mds", "osd_deep_scrub_large_omap_object_key_threshold", "5") + + self.fs.mds_restart() + self.fs.wait_for_daemons() + + # Write some bytes to a file + size_mb = 1 + + # Hold the file open + file_count = 8 + for i in range(0, file_count): + filename = "open_file{}".format(i) + p = self.mount_a.open_background(filename) + self.mount_a.write_n_mb(filename, size_mb) + + time.sleep(10) + + """ + With osd_deep_scrub_large_omap_object_key_threshold value as 5 and + opening 8 files we should have a new rados object with name + mds0_openfiles.1 to hold the extra keys. + """ + + stat_out = self.fs.rados(["stat", "mds0_openfiles.1"]) + + # Now close the file + self.mount_a.kill_background(p) diff --git a/qa/tasks/cephfs/test_pool_perm.py b/qa/tasks/cephfs/test_pool_perm.py new file mode 100644 index 00000000..a1f234a2 --- /dev/null +++ b/qa/tasks/cephfs/test_pool_perm.py @@ -0,0 +1,113 @@ +from textwrap import dedent +from teuthology.exceptions import CommandFailedError +from tasks.cephfs.cephfs_test_case import CephFSTestCase +import os + + +class TestPoolPerm(CephFSTestCase): + def test_pool_perm(self): + self.mount_a.run_shell(["touch", "test_file"]) + + file_path = os.path.join(self.mount_a.mountpoint, "test_file") + + remote_script = dedent(""" + import os + import errno + + fd = os.open("{path}", os.O_RDWR) + try: + if {check_read}: + ret = os.read(fd, 1024) + else: + os.write(fd, b'content') + except OSError as e: + if e.errno != errno.EPERM: + raise + else: + raise RuntimeError("client does not check permission of data pool") + """) + + client_name = "client.{0}".format(self.mount_a.client_id) + + # set data pool read only + self.fs.mon_manager.raw_cluster_cmd_result( + 'auth', 'caps', client_name, 'mds', 'allow', 'mon', 'allow r', 'osd', + 'allow r pool={0}'.format(self.fs.get_data_pool_name())) + + self.mount_a.umount_wait() + self.mount_a.mount() + self.mount_a.wait_until_mounted() + + # write should fail + self.mount_a.run_python(remote_script.format(path=file_path, check_read=str(False))) + + # set data pool write only + self.fs.mon_manager.raw_cluster_cmd_result( + 'auth', 'caps', client_name, 'mds', 'allow', 'mon', 'allow r', 'osd', + 'allow w pool={0}'.format(self.fs.get_data_pool_name())) + + self.mount_a.umount_wait() + self.mount_a.mount() + self.mount_a.wait_until_mounted() + + # read should fail + self.mount_a.run_python(remote_script.format(path=file_path, check_read=str(True))) + + def test_forbidden_modification(self): + """ + That a client who does not have the capability for setting + layout pools is prevented from doing so. + """ + + # Set up + client_name = "client.{0}".format(self.mount_a.client_id) + new_pool_name = "data_new" + self.fs.add_data_pool(new_pool_name) + + self.mount_a.run_shell(["touch", "layoutfile"]) + self.mount_a.run_shell(["mkdir", "layoutdir"]) + + # Set MDS 'rw' perms: missing 'p' means no setting pool layouts + self.fs.mon_manager.raw_cluster_cmd_result( + 'auth', 'caps', client_name, 'mds', 'allow rw', 'mon', 'allow r', + 'osd', + 'allow rw pool={0},allow rw pool={1}'.format( + self.fs.get_data_pool_names()[0], + self.fs.get_data_pool_names()[1], + )) + + self.mount_a.umount_wait() + self.mount_a.mount() + self.mount_a.wait_until_mounted() + + with self.assertRaises(CommandFailedError): + self.mount_a.setfattr("layoutfile", "ceph.file.layout.pool", + new_pool_name) + with self.assertRaises(CommandFailedError): + self.mount_a.setfattr("layoutdir", "ceph.dir.layout.pool", + new_pool_name) + self.mount_a.umount_wait() + + # Set MDS 'rwp' perms: should now be able to set layouts + self.fs.mon_manager.raw_cluster_cmd_result( + 'auth', 'caps', client_name, 'mds', 'allow rwp', 'mon', 'allow r', + 'osd', + 'allow rw pool={0},allow rw pool={1}'.format( + self.fs.get_data_pool_names()[0], + self.fs.get_data_pool_names()[1], + )) + self.mount_a.mount() + self.mount_a.wait_until_mounted() + self.mount_a.setfattr("layoutfile", "ceph.file.layout.pool", + new_pool_name) + self.mount_a.setfattr("layoutdir", "ceph.dir.layout.pool", + new_pool_name) + self.mount_a.umount_wait() + + def tearDown(self): + self.fs.mon_manager.raw_cluster_cmd_result( + 'auth', 'caps', "client.{0}".format(self.mount_a.client_id), + 'mds', 'allow', 'mon', 'allow r', 'osd', + 'allow rw pool={0}'.format(self.fs.get_data_pool_names()[0])) + super(TestPoolPerm, self).tearDown() + diff --git a/qa/tasks/cephfs/test_quota.py b/qa/tasks/cephfs/test_quota.py new file mode 100644 index 00000000..dcfda5e2 --- /dev/null +++ b/qa/tasks/cephfs/test_quota.py @@ -0,0 +1,106 @@ + +from tasks.cephfs.cephfs_test_case import CephFSTestCase + +from teuthology.exceptions import CommandFailedError + +class TestQuota(CephFSTestCase): + CLIENTS_REQUIRED = 2 + MDSS_REQUIRED = 1 + + def test_remote_update_getfattr(self): + """ + That quota changes made from one client are visible to another + client looking at ceph.quota xattrs + """ + self.mount_a.run_shell(["mkdir", "subdir"]) + + self.assertEqual( + self.mount_a.getfattr("./subdir", "ceph.quota.max_files"), + None) + self.assertEqual( + self.mount_b.getfattr("./subdir", "ceph.quota.max_files"), + None) + + self.mount_a.setfattr("./subdir", "ceph.quota.max_files", "10") + self.assertEqual( + self.mount_a.getfattr("./subdir", "ceph.quota.max_files"), + "10") + + # Should be visible as soon as setxattr operation completes on + # mds (we get here sooner because setfattr gets an early reply) + self.wait_until_equal( + lambda: self.mount_b.getfattr("./subdir", "ceph.quota.max_files"), + "10", timeout=10) + + def test_remote_update_df(self): + """ + That when a client modifies the quota on a directory used + as another client's root, the other client sees the change + reflected in their statfs output. + """ + + self.mount_b.umount_wait() + + self.mount_a.run_shell(["mkdir", "subdir"]) + + size_before = 1024 * 1024 * 128 + self.mount_a.setfattr("./subdir", "ceph.quota.max_bytes", + "%s" % size_before) + + self.mount_b.mount(mount_path="/subdir") + + self.assertDictEqual( + self.mount_b.df(), + { + "total": size_before, + "used": 0, + "available": size_before + }) + + size_after = 1024 * 1024 * 256 + self.mount_a.setfattr("./subdir", "ceph.quota.max_bytes", + "%s" % size_after) + + # Should be visible as soon as setxattr operation completes on + # mds (we get here sooner because setfattr gets an early reply) + self.wait_until_equal( + lambda: self.mount_b.df(), + { + "total": size_after, + "used": 0, + "available": size_after + }, + timeout=10 + ) + + def test_remote_update_write(self): + """ + That when a client modifies the quota on a directory used + as another client's root, the other client sees the effect + of the change when writing data. + """ + + self.mount_a.run_shell(["mkdir", "subdir_files"]) + self.mount_a.run_shell(["mkdir", "subdir_data"]) + + # Set some nice high quotas that mount_b's initial operations + # will be well within + self.mount_a.setfattr("./subdir_files", "ceph.quota.max_files", "100") + self.mount_a.setfattr("./subdir_data", "ceph.quota.max_bytes", "104857600") + + # Do some writes within my quota + self.mount_b.create_n_files("subdir_files/file", 20) + self.mount_b.write_n_mb("subdir_data/file", 20) + + # Set quotas lower than what mount_b already wrote, it should + # refuse to write more once it's seen them + self.mount_a.setfattr("./subdir_files", "ceph.quota.max_files", "10") + self.mount_a.setfattr("./subdir_data", "ceph.quota.max_bytes", "1048576") + + # Do some writes that would have been okay within the old quota, + # but are forbidden under the new quota + with self.assertRaises(CommandFailedError): + self.mount_b.create_n_files("subdir_files/file", 40) + with self.assertRaises(CommandFailedError): + self.mount_b.write_n_mb("subdir_data/file", 40) + diff --git a/qa/tasks/cephfs/test_readahead.py b/qa/tasks/cephfs/test_readahead.py new file mode 100644 index 00000000..31e7bf18 --- /dev/null +++ b/qa/tasks/cephfs/test_readahead.py @@ -0,0 +1,31 @@ +import logging +from tasks.cephfs.fuse_mount import FuseMount +from tasks.cephfs.cephfs_test_case import CephFSTestCase + +log = logging.getLogger(__name__) + + +class TestReadahead(CephFSTestCase): + def test_flush(self): + if not isinstance(self.mount_a, FuseMount): + self.skipTest("FUSE needed for measuring op counts") + + # Create 32MB file + self.mount_a.run_shell(["dd", "if=/dev/urandom", "of=foo", "bs=1M", "count=32"]) + + # Unmount and remount the client to flush cache + self.mount_a.umount_wait() + self.mount_a.mount() + self.mount_a.wait_until_mounted() + + initial_op_r = self.mount_a.admin_socket(['perf', 'dump', 'objecter'])['objecter']['op_r'] + self.mount_a.run_shell(["dd", "if=foo", "of=/dev/null", "bs=128k", "count=32"]) + op_r = self.mount_a.admin_socket(['perf', 'dump', 'objecter'])['objecter']['op_r'] + assert op_r >= initial_op_r + op_r -= initial_op_r + log.info("read operations: {0}".format(op_r)) + + # with exponentially increasing readahead, we should see fewer than 10 operations + # but this test simply checks if the client is doing a remote read for each local read + if op_r >= 32: + raise RuntimeError("readahead not working") diff --git a/qa/tasks/cephfs/test_recovery_pool.py b/qa/tasks/cephfs/test_recovery_pool.py new file mode 100644 index 00000000..36b4e58e --- /dev/null +++ b/qa/tasks/cephfs/test_recovery_pool.py @@ -0,0 +1,207 @@ +""" +Test our tools for recovering metadata from the data pool into an alternate pool +""" + +import logging +import traceback +from collections import namedtuple + +from teuthology.orchestra.run import CommandFailedError +from tasks.cephfs.cephfs_test_case import CephFSTestCase + +log = logging.getLogger(__name__) + + +ValidationError = namedtuple("ValidationError", ["exception", "backtrace"]) + + +class OverlayWorkload(object): + def __init__(self, orig_fs, recovery_fs, orig_mount, recovery_mount): + self._orig_fs = orig_fs + self._recovery_fs = recovery_fs + self._orig_mount = orig_mount + self._recovery_mount = recovery_mount + self._initial_state = None + + # Accumulate backtraces for every failed validation, and return them. Backtraces + # are rather verbose, but we only see them when something breaks, and they + # let us see which check failed without having to decorate each check with + # a string + self._errors = [] + + def assert_equal(self, a, b): + try: + if a != b: + raise AssertionError("{0} != {1}".format(a, b)) + except AssertionError as e: + self._errors.append( + ValidationError(e, traceback.format_exc(3)) + ) + + def write(self): + """ + Write the workload files to the mount + """ + raise NotImplementedError() + + def validate(self): + """ + Read from the mount and validate that the workload files are present (i.e. have + survived or been reconstructed from the test scenario) + """ + raise NotImplementedError() + + def damage(self): + """ + Damage the filesystem pools in ways that will be interesting to recover from. By + default just wipe everything in the metadata pool + """ + # Delete every object in the metadata pool + objects = self._orig_fs.rados(["ls"]).split("\n") + for o in objects: + self._orig_fs.rados(["rm", o]) + + def flush(self): + """ + Called after client unmount, after write: flush whatever you want + """ + self._orig_fs.mds_asok(["flush", "journal"]) + self._recovery_fs.mds_asok(["flush", "journal"]) + + +class SimpleOverlayWorkload(OverlayWorkload): + """ + Single file, single directory, check that it gets recovered and so does its size + """ + def write(self): + self._orig_mount.run_shell(["mkdir", "subdir"]) + self._orig_mount.write_n_mb("subdir/sixmegs", 6) + self._initial_state = self._orig_mount.stat("subdir/sixmegs") + + def validate(self): + self._recovery_mount.run_shell(["ls", "subdir"]) + st = self._recovery_mount.stat("subdir/sixmegs") + self.assert_equal(st['st_size'], self._initial_state['st_size']) + return self._errors + +class TestRecoveryPool(CephFSTestCase): + MDSS_REQUIRED = 2 + CLIENTS_REQUIRED = 2 + REQUIRE_RECOVERY_FILESYSTEM = True + + def is_marked_damaged(self, rank): + mds_map = self.fs.get_mds_map() + return rank in mds_map['damaged'] + + def _rebuild_metadata(self, workload, other_pool=None, workers=1): + """ + That when all objects in metadata pool are removed, we can rebuild a metadata pool + based on the contents of a data pool, and a client can see and read our files. + """ + + # First, inject some files + + workload.write() + + # Unmount the client and flush the journal: the tool should also cope with + # situations where there is dirty metadata, but we'll test that separately + self.mount_a.umount_wait() + self.mount_b.umount_wait() + workload.flush() + + # Create the alternate pool if requested + recovery_fs = self.recovery_fs.name + recovery_pool = self.recovery_fs.get_metadata_pool_name() + self.recovery_fs.data_scan(['init', '--force-init', + '--filesystem', recovery_fs, + '--alternate-pool', recovery_pool]) + self.recovery_fs.mon_manager.raw_cluster_cmd('-s') + self.recovery_fs.table_tool([recovery_fs + ":0", "reset", "session"]) + self.recovery_fs.table_tool([recovery_fs + ":0", "reset", "snap"]) + self.recovery_fs.table_tool([recovery_fs + ":0", "reset", "inode"]) + + # Stop the MDS + self.fs.mds_stop() + self.fs.mds_fail() + + # After recovery, we need the MDS to not be strict about stats (in production these options + # are off by default, but in QA we need to explicitly disable them) + self.fs.set_ceph_conf('mds', 'mds verify scatter', False) + self.fs.set_ceph_conf('mds', 'mds debug scatterstat', False) + + # Apply any data damage the workload wants + workload.damage() + + # Reset the MDS map in case multiple ranks were in play: recovery procedure + # only understands how to rebuild metadata under rank 0 + self.fs.mon_manager.raw_cluster_cmd('fs', 'reset', self.fs.name, + '--yes-i-really-mean-it') + + self.fs.table_tool([self.fs.name + ":0", "reset", "session"]) + self.fs.table_tool([self.fs.name + ":0", "reset", "snap"]) + self.fs.table_tool([self.fs.name + ":0", "reset", "inode"]) + + # Run the recovery procedure + if False: + with self.assertRaises(CommandFailedError): + # Normal reset should fail when no objects are present, we'll use --force instead + self.fs.journal_tool(["journal", "reset"], 0) + + self.fs.mds_stop() + self.fs.data_scan(['scan_extents', '--alternate-pool', + recovery_pool, '--filesystem', self.fs.name, + self.fs.get_data_pool_name()]) + self.fs.data_scan(['scan_inodes', '--alternate-pool', + recovery_pool, '--filesystem', self.fs.name, + '--force-corrupt', '--force-init', + self.fs.get_data_pool_name()]) + self.fs.journal_tool(['event', 'recover_dentries', 'list', + '--alternate-pool', recovery_pool], 0) + + self.fs.data_scan(['init', '--force-init', '--filesystem', + self.fs.name]) + self.fs.data_scan(['scan_inodes', '--filesystem', self.fs.name, + '--force-corrupt', '--force-init', + self.fs.get_data_pool_name()]) + self.fs.journal_tool(['event', 'recover_dentries', 'list'], 0) + + self.recovery_fs.journal_tool(['journal', 'reset', '--force'], 0) + self.fs.journal_tool(['journal', 'reset', '--force'], 0) + self.fs.mon_manager.raw_cluster_cmd('mds', 'repaired', + recovery_fs + ":0") + + # Mark the MDS repaired + self.fs.mon_manager.raw_cluster_cmd('mds', 'repaired', '0') + + # Start the MDS + self.fs.mds_restart() + self.recovery_fs.mds_restart() + self.fs.wait_for_daemons() + self.recovery_fs.wait_for_daemons() + status = self.recovery_fs.status() + for rank in self.recovery_fs.get_ranks(status=status): + self.fs.mon_manager.raw_cluster_cmd('tell', "mds." + rank['name'], + 'injectargs', '--debug-mds=20') + self.fs.rank_tell(['scrub', 'start', '/', 'recursive', 'repair'], rank=rank['rank'], status=status) + log.info(str(self.mds_cluster.status())) + + # Mount a client + self.mount_a.mount() + self.mount_b.mount(mount_fs_name=recovery_fs) + self.mount_a.wait_until_mounted() + self.mount_b.wait_until_mounted() + + # See that the files are present and correct + errors = workload.validate() + if errors: + log.error("Validation errors found: {0}".format(len(errors))) + for e in errors: + log.error(e.exception) + log.error(e.backtrace) + raise AssertionError("Validation failed, first error: {0}\n{1}".format( + errors[0].exception, errors[0].backtrace + )) + + def test_rebuild_simple(self): + self._rebuild_metadata(SimpleOverlayWorkload(self.fs, self.recovery_fs, + self.mount_a, self.mount_b)) diff --git a/qa/tasks/cephfs/test_scrub.py b/qa/tasks/cephfs/test_scrub.py new file mode 100644 index 00000000..1875b5f3 --- /dev/null +++ b/qa/tasks/cephfs/test_scrub.py @@ -0,0 +1,175 @@ +""" +Test CephFS scrub (distinct from OSD scrub) functionality +""" +import logging +from collections import namedtuple + +from tasks.cephfs.cephfs_test_case import CephFSTestCase + +log = logging.getLogger(__name__) + +ValidationError = namedtuple("ValidationError", ["exception", "backtrace"]) + + +class Workload(CephFSTestCase): + def __init__(self, filesystem, mount): + super().__init__() + self._mount = mount + self._filesystem = filesystem + self._initial_state = None + + # Accumulate backtraces for every failed validation, and return them. Backtraces + # are rather verbose, but we only see them when something breaks, and they + # let us see which check failed without having to decorate each check with + # a string + self._errors = [] + + def write(self): + """ + Write the workload files to the mount + """ + raise NotImplementedError() + + def validate(self): + """ + Read from the mount and validate that the workload files are present (i.e. have + survived or been reconstructed from the test scenario) + """ + raise NotImplementedError() + + def damage(self): + """ + Damage the filesystem pools in ways that will be interesting to recover from. By + default just wipe everything in the metadata pool + """ + # Delete every object in the metadata pool + objects = self._filesystem.rados(["ls"]).split("\n") + for o in objects: + self._filesystem.rados(["rm", o]) + + def flush(self): + """ + Called after client unmount, after write: flush whatever you want + """ + self._filesystem.mds_asok(["flush", "journal"]) + + +class BacktraceWorkload(Workload): + """ + Single file, single directory, wipe the backtrace and check it. + """ + def write(self): + self._mount.run_shell(["mkdir", "subdir"]) + self._mount.write_n_mb("subdir/sixmegs", 6) + + def validate(self): + st = self._mount.stat("subdir/sixmegs") + self._filesystem.mds_asok(["flush", "journal"]) + bt = self._filesystem.read_backtrace(st['st_ino']) + parent = bt['ancestors'][0]['dname'] + self.assertEqual(parent, 'sixmegs') + return self._errors + + def damage(self): + st = self._mount.stat("subdir/sixmegs") + self._filesystem.mds_asok(["flush", "journal"]) + self._filesystem._write_data_xattr(st['st_ino'], "parent", "") + + def create_files(self, nfiles=1000): + self._mount.create_n_files("scrub-new-files/file", nfiles) + + +class DupInodeWorkload(Workload): + """ + Duplicate an inode and try scrubbing it twice." + """ + + def write(self): + self._mount.run_shell(["mkdir", "parent"]) + self._mount.run_shell(["mkdir", "parent/child"]) + self._mount.write_n_mb("parent/parentfile", 6) + self._mount.write_n_mb("parent/child/childfile", 6) + + def damage(self): + temp_bin_path = "/tmp/10000000000.00000000_omap.bin" + self._mount.umount_wait() + self._filesystem.mds_asok(["flush", "journal"]) + self._filesystem.mds_stop() + self._filesystem.rados(["getomapval", "10000000000.00000000", + "parentfile_head", temp_bin_path]) + self._filesystem.rados(["setomapval", "10000000000.00000000", + "shadow_head"], stdin_file=temp_bin_path) + self._filesystem.set_ceph_conf('mds', 'mds hack allow loading invalid metadata', True) + self._filesystem.mds_restart() + self._filesystem.wait_for_daemons() + + def validate(self): + out_json = self._filesystem.rank_tell(["scrub", "start", "/", "recursive", "repair"]) + self.assertNotEqual(out_json, None) + self.assertTrue(self._filesystem.are_daemons_healthy()) + return self._errors + + +class TestScrub(CephFSTestCase): + MDSS_REQUIRED = 1 + + def setUp(self): + super().setUp() + + def _scrub(self, workload, workers=1): + """ + That when all objects in metadata pool are removed, we can rebuild a metadata pool + based on the contents of a data pool, and a client can see and read our files. + """ + + # First, inject some files + + workload.write() + + # are off by default, but in QA we need to explicitly disable them) + self.fs.set_ceph_conf('mds', 'mds verify scatter', False) + self.fs.set_ceph_conf('mds', 'mds debug scatterstat', False) + + # Apply any data damage the workload wants + workload.damage() + + out_json = self.fs.rank_tell(["scrub", "start", "/", "recursive", "repair"]) + self.assertNotEqual(out_json, None) + + # See that the files are present and correct + errors = workload.validate() + if errors: + log.error("Validation errors found: {0}".format(len(errors))) + for e in errors: + log.error(e.exception) + log.error(e.backtrace) + raise AssertionError("Validation failed, first error: {0}\n{1}".format( + errors[0].exception, errors[0].backtrace + )) + + def _get_damage_count(self, damage_type='backtrace'): + out_json = self.fs.rank_tell(["damage", "ls"]) + self.assertNotEqual(out_json, None) + + damage_count = 0 + for it in out_json: + if it['damage_type'] == damage_type: + damage_count += 1 + return damage_count + + def _scrub_new_files(self, workload): + """ + That scrubbing new files does not lead to errors + """ + workload.create_files(1000) + self._wait_until_scrub_complete() + self.assertEqual(self._get_damage_count(), 0) + + def test_scrub_backtrace_for_new_files(self): + self._scrub_new_files(BacktraceWorkload(self.fs, self.mount_a)) + + def test_scrub_backtrace(self): + self._scrub(BacktraceWorkload(self.fs, self.mount_a)) + + def test_scrub_dup_inode(self): + self._scrub(DupInodeWorkload(self.fs, self.mount_a)) diff --git a/qa/tasks/cephfs/test_scrub_checks.py b/qa/tasks/cephfs/test_scrub_checks.py new file mode 100644 index 00000000..54ed16ff --- /dev/null +++ b/qa/tasks/cephfs/test_scrub_checks.py @@ -0,0 +1,405 @@ +""" +MDS admin socket scrubbing-related tests. +""" +import json +import logging +import errno +import time +from teuthology.exceptions import CommandFailedError +import os +from tasks.cephfs.cephfs_test_case import CephFSTestCase + +log = logging.getLogger(__name__) + +class TestScrubControls(CephFSTestCase): + """ + Test basic scrub control operations such as abort, pause and resume. + """ + + MDSS_REQUIRED = 2 + CLIENTS_REQUIRED = 1 + + def _abort_scrub(self, expected): + res = self.fs.rank_tell(["scrub", "abort"]) + self.assertEqual(res['return_code'], expected) + def _pause_scrub(self, expected): + res = self.fs.rank_tell(["scrub", "pause"]) + self.assertEqual(res['return_code'], expected) + def _resume_scrub(self, expected): + res = self.fs.rank_tell(["scrub", "resume"]) + self.assertEqual(res['return_code'], expected) + def _get_scrub_status(self): + return self.fs.rank_tell(["scrub", "status"]) + def _check_task_status(self, expected_status): + task_status = self.fs.get_task_status("scrub status") + active = self.fs.get_active_names() + log.debug("current active={0}".format(active)) + self.assertTrue(task_status[active[0]].startswith(expected_status)) + + def test_scrub_abort(self): + test_dir = "scrub_control_test_path" + abs_test_path = "/{0}".format(test_dir) + + log.info("mountpoint: {0}".format(self.mount_a.mountpoint)) + client_path = os.path.join(self.mount_a.mountpoint, test_dir) + log.info("client_path: {0}".format(client_path)) + + log.info("Cloning repo into place") + TestScrubChecks.clone_repo(self.mount_a, client_path) + + out_json = self.fs.rank_tell(["scrub", "start", abs_test_path, "recursive"]) + self.assertNotEqual(out_json, None) + + # abort and verify + self._abort_scrub(0) + out_json = self._get_scrub_status() + self.assertTrue("no active" in out_json['status']) + + # sleep enough to fetch updated task status + time.sleep(10) + self._check_task_status("idle") + + def test_scrub_pause_and_resume(self): + test_dir = "scrub_control_test_path" + abs_test_path = "/{0}".format(test_dir) + + log.info("mountpoint: {0}".format(self.mount_a.mountpoint)) + client_path = os.path.join(self.mount_a.mountpoint, test_dir) + log.info("client_path: {0}".format(client_path)) + + log.info("Cloning repo into place") + _ = TestScrubChecks.clone_repo(self.mount_a, client_path) + + out_json = self.fs.rank_tell(["scrub", "start", abs_test_path, "recursive"]) + self.assertNotEqual(out_json, None) + + # pause and verify + self._pause_scrub(0) + out_json = self._get_scrub_status() + self.assertTrue("PAUSED" in out_json['status']) + + # sleep enough to fetch updated task status + time.sleep(10) + self._check_task_status("paused") + + # resume and verify + self._resume_scrub(0) + out_json = self._get_scrub_status() + self.assertFalse("PAUSED" in out_json['status']) + + def test_scrub_pause_and_resume_with_abort(self): + test_dir = "scrub_control_test_path" + abs_test_path = "/{0}".format(test_dir) + + log.info("mountpoint: {0}".format(self.mount_a.mountpoint)) + client_path = os.path.join(self.mount_a.mountpoint, test_dir) + log.info("client_path: {0}".format(client_path)) + + log.info("Cloning repo into place") + _ = TestScrubChecks.clone_repo(self.mount_a, client_path) + + out_json = self.fs.rank_tell(["scrub", "start", abs_test_path, "recursive"]) + self.assertNotEqual(out_json, None) + + # pause and verify + self._pause_scrub(0) + out_json = self._get_scrub_status() + self.assertTrue("PAUSED" in out_json['status']) + + # sleep enough to fetch updated task status + time.sleep(10) + self._check_task_status("paused") + + # abort and verify + self._abort_scrub(0) + out_json = self._get_scrub_status() + self.assertTrue("PAUSED" in out_json['status']) + self.assertTrue("0 inodes" in out_json['status']) + + # sleep enough to fetch updated task status + time.sleep(10) + self._check_task_status("paused") + + # resume and verify + self._resume_scrub(0) + out_json = self._get_scrub_status() + self.assertTrue("no active" in out_json['status']) + + # sleep enough to fetch updated task status + time.sleep(10) + self._check_task_status("idle") + + def test_scrub_task_status_on_mds_failover(self): + # sleep enough to fetch updated task status + time.sleep(10) + + (original_active, ) = self.fs.get_active_names() + original_standbys = self.mds_cluster.get_standby_daemons() + self._check_task_status("idle") + + # Kill the rank 0 + self.fs.mds_stop(original_active) + + grace = float(self.fs.get_config("mds_beacon_grace", service_type="mon")) + + def promoted(): + active = self.fs.get_active_names() + return active and active[0] in original_standbys + + log.info("Waiting for promotion of one of the original standbys {0}".format( + original_standbys)) + self.wait_until_true(promoted, timeout=grace*2) + + mgr_beacon_grace = float(self.fs.get_config("mgr_service_beacon_grace", service_type="mon")) + + def status_check(): + task_status = self.fs.get_task_status("scrub status") + return original_active not in task_status + self.wait_until_true(status_check, timeout=mgr_beacon_grace*2) + +class TestScrubChecks(CephFSTestCase): + """ + Run flush and scrub commands on the specified files in the filesystem. This + task will run through a sequence of operations, but it is not comprehensive + on its own -- it doesn't manipulate the mds cache state to test on both + in- and out-of-memory parts of the hierarchy. So it's designed to be run + multiple times within a single test run, so that the test can manipulate + memory state. + + Usage: + mds_scrub_checks: + mds_rank: 0 + path: path/to/test/dir + client: 0 + run_seq: [0-9]+ + + Increment the run_seq on subsequent invocations within a single test run; + it uses that value to generate unique folder and file names. + """ + + MDSS_REQUIRED = 1 + CLIENTS_REQUIRED = 1 + + def test_scrub_checks(self): + self._checks(0) + self._checks(1) + + def _checks(self, run_seq): + mds_rank = 0 + test_dir = "scrub_test_path" + + abs_test_path = "/{0}".format(test_dir) + + log.info("mountpoint: {0}".format(self.mount_a.mountpoint)) + client_path = os.path.join(self.mount_a.mountpoint, test_dir) + log.info("client_path: {0}".format(client_path)) + + log.info("Cloning repo into place") + repo_path = TestScrubChecks.clone_repo(self.mount_a, client_path) + + log.info("Initiating mds_scrub_checks on mds.{id_}, " + + "test_path {path}, run_seq {seq}".format( + id_=mds_rank, path=abs_test_path, seq=run_seq) + ) + + + success_validator = lambda j, r: self.json_validator(j, r, "return_code", 0) + + nep = "{test_path}/i/dont/exist".format(test_path=abs_test_path) + self.asok_command(mds_rank, "flush_path {nep}".format(nep=nep), + lambda j, r: self.json_validator(j, r, "return_code", -errno.ENOENT)) + self.tell_command(mds_rank, "scrub start {nep}".format(nep=nep), + lambda j, r: self.json_validator(j, r, "return_code", -errno.ENOENT)) + + test_repo_path = "{test_path}/ceph-qa-suite".format(test_path=abs_test_path) + dirpath = "{repo_path}/suites".format(repo_path=test_repo_path) + + if run_seq == 0: + log.info("First run: flushing {dirpath}".format(dirpath=dirpath)) + command = "flush_path {dirpath}".format(dirpath=dirpath) + self.asok_command(mds_rank, command, success_validator) + command = "scrub start {dirpath}".format(dirpath=dirpath) + self.tell_command(mds_rank, command, success_validator) + + filepath = "{repo_path}/suites/fs/verify/validater/valgrind.yaml".format( + repo_path=test_repo_path) + if run_seq == 0: + log.info("First run: flushing {filepath}".format(filepath=filepath)) + command = "flush_path {filepath}".format(filepath=filepath) + self.asok_command(mds_rank, command, success_validator) + command = "scrub start {filepath}".format(filepath=filepath) + self.tell_command(mds_rank, command, success_validator) + + filepath = "{repo_path}/suites/fs/basic/clusters/fixed-3-cephfs.yaml". \ + format(repo_path=test_repo_path) + command = "scrub start {filepath}".format(filepath=filepath) + self.tell_command(mds_rank, command, + lambda j, r: self.json_validator(j, r, "performed_validation", + False)) + + if run_seq == 0: + log.info("First run: flushing base dir /") + command = "flush_path /" + self.asok_command(mds_rank, command, success_validator) + command = "scrub start /" + self.tell_command(mds_rank, command, success_validator) + + new_dir = "{repo_path}/new_dir_{i}".format(repo_path=repo_path, i=run_seq) + test_new_dir = "{repo_path}/new_dir_{i}".format(repo_path=test_repo_path, + i=run_seq) + self.mount_a.run_shell(["mkdir", new_dir]) + command = "flush_path {dir}".format(dir=test_new_dir) + self.asok_command(mds_rank, command, success_validator) + + new_file = "{repo_path}/new_file_{i}".format(repo_path=repo_path, + i=run_seq) + test_new_file = "{repo_path}/new_file_{i}".format(repo_path=test_repo_path, + i=run_seq) + self.mount_a.write_n_mb(new_file, 1) + + command = "flush_path {file}".format(file=test_new_file) + self.asok_command(mds_rank, command, success_validator) + + # check that scrub fails on errors + ino = self.mount_a.path_to_ino(new_file) + rados_obj_name = "{ino:x}.00000000".format(ino=ino) + command = "scrub start {file}".format(file=test_new_file) + + # Missing parent xattr -> ENODATA + self.fs.rados(["rmxattr", rados_obj_name, "parent"], pool=self.fs.get_data_pool_name()) + self.tell_command(mds_rank, command, + lambda j, r: self.json_validator(j, r, "return_code", -errno.ENODATA)) + + # Missing object -> ENOENT + self.fs.rados(["rm", rados_obj_name], pool=self.fs.get_data_pool_name()) + self.tell_command(mds_rank, command, + lambda j, r: self.json_validator(j, r, "return_code", -errno.ENOENT)) + + command = "flush_path /" + self.asok_command(mds_rank, command, success_validator) + + def test_scrub_repair(self): + mds_rank = 0 + test_dir = "scrub_repair_path" + + self.mount_a.run_shell(["sudo", "mkdir", test_dir]) + self.mount_a.run_shell(["sudo", "touch", "{0}/file".format(test_dir)]) + dir_objname = "{:x}.00000000".format(self.mount_a.path_to_ino(test_dir)) + + self.mount_a.umount_wait() + + # flush journal entries to dirfrag objects, and expire journal + self.fs.mds_asok(['flush', 'journal']) + self.fs.mds_stop() + + # remove the dentry from dirfrag, cause incorrect fragstat/rstat + self.fs.rados(["rmomapkey", dir_objname, "file_head"], + pool=self.fs.get_metadata_pool_name()) + + self.fs.mds_fail_restart() + self.fs.wait_for_daemons() + + self.mount_a.mount() + self.mount_a.wait_until_mounted() + + # fragstat indicates the directory is not empty, rmdir should fail + with self.assertRaises(CommandFailedError) as ar: + self.mount_a.run_shell(["sudo", "rmdir", test_dir]) + self.assertEqual(ar.exception.exitstatus, 1) + + self.tell_command(mds_rank, "scrub start /{0} repair".format(test_dir), + lambda j, r: self.json_validator(j, r, "return_code", 0)) + + # wait a few second for background repair + time.sleep(10) + + # fragstat should be fixed + self.mount_a.run_shell(["sudo", "rmdir", test_dir]) + + @staticmethod + def json_validator(json_out, rc, element, expected_value): + if rc != 0: + return False, "asok command returned error {rc}".format(rc=rc) + element_value = json_out.get(element) + if element_value != expected_value: + return False, "unexpectedly got {jv} instead of {ev}!".format( + jv=element_value, ev=expected_value) + return True, "Succeeded" + + def tell_command(self, mds_rank, command, validator): + log.info("Running command '{command}'".format(command=command)) + + command_list = command.split() + jout = self.fs.rank_tell(command_list, mds_rank) + + log.info("command '{command}' returned '{jout}'".format( + command=command, jout=jout)) + + success, errstring = validator(jout, 0) + if not success: + raise AsokCommandFailedError(command, 0, jout, errstring) + return jout + + def asok_command(self, mds_rank, command, validator): + log.info("Running command '{command}'".format(command=command)) + + command_list = command.split() + + # we just assume there's an active mds for every rank + mds_id = self.fs.get_active_names()[mds_rank] + proc = self.fs.mon_manager.admin_socket('mds', mds_id, + command_list, check_status=False) + rout = proc.exitstatus + sout = proc.stdout.getvalue() + + if sout.strip(): + jout = json.loads(sout) + else: + jout = None + + log.info("command '{command}' got response code " + + "'{rout}' and stdout '{sout}'".format( + command=command, rout=rout, sout=sout)) + + success, errstring = validator(jout, rout) + + if not success: + raise AsokCommandFailedError(command, rout, jout, errstring) + + return jout + + @staticmethod + def clone_repo(client_mount, path): + repo = "ceph-qa-suite" + repo_path = os.path.join(path, repo) + client_mount.run_shell(["mkdir", "-p", path]) + + try: + client_mount.stat(repo_path) + except CommandFailedError: + client_mount.run_shell([ + "git", "clone", '--branch', 'giant', + "http://github.com/ceph/{repo}".format(repo=repo), + "{path}/{repo}".format(path=path, repo=repo) + ]) + + return repo_path + + +class AsokCommandFailedError(Exception): + """ + Exception thrown when we get an unexpected response + on an admin socket command + """ + + def __init__(self, command, rc, json_out, errstring): + self.command = command + self.rc = rc + self.json = json_out + self.errstring = errstring + + def __str__(self): + return "Admin socket: {command} failed with rc={rc}," + \ + "json output={json}, because '{es}'".format( + command=self.command, rc=self.rc, + json=self.json, es=self.errstring) diff --git a/qa/tasks/cephfs/test_sessionmap.py b/qa/tasks/cephfs/test_sessionmap.py new file mode 100644 index 00000000..b44991f5 --- /dev/null +++ b/qa/tasks/cephfs/test_sessionmap.py @@ -0,0 +1,236 @@ +import time +import json +import logging +from unittest import SkipTest + +from tasks.cephfs.fuse_mount import FuseMount +from teuthology.exceptions import CommandFailedError +from tasks.cephfs.cephfs_test_case import CephFSTestCase +from teuthology.misc import sudo_write_file + +log = logging.getLogger(__name__) + + +class TestSessionMap(CephFSTestCase): + CLIENTS_REQUIRED = 2 + MDSS_REQUIRED = 2 + + def test_tell_session_drop(self): + """ + That when a `tell` command is sent using the python CLI, + its MDS session is gone after it terminates + """ + self.mount_a.umount_wait() + self.mount_b.umount_wait() + + status = self.fs.status() + self.fs.rank_tell(["session", "ls"], status=status) + + ls_data = self.fs.rank_asok(['session', 'ls'], status=status) + self.assertEqual(len(ls_data), 0) + + def _get_connection_count(self, status=None): + perf = self.fs.rank_asok(["perf", "dump"], status=status) + conn = 0 + for module, dump in perf.items(): + if "AsyncMessenger::Worker" in module: + conn += dump['msgr_active_connections'] + return conn + + def test_tell_conn_close(self): + """ + That when a `tell` command is sent using the python CLI, + the conn count goes back to where it started (i.e. we aren't + leaving connections open) + """ + self.mount_a.umount_wait() + self.mount_b.umount_wait() + + status = self.fs.status() + s = self._get_connection_count(status=status) + self.fs.rank_tell(["session", "ls"], status=status) + e = self._get_connection_count(status=status) + + self.assertEqual(s, e) + + def test_mount_conn_close(self): + """ + That when a client unmounts, the thread count on the MDS goes back + to what it was before the client mounted + """ + self.mount_a.umount_wait() + self.mount_b.umount_wait() + + status = self.fs.status() + s = self._get_connection_count(status=status) + self.mount_a.mount() + self.mount_a.wait_until_mounted() + self.assertGreater(self._get_connection_count(status=status), s) + self.mount_a.umount_wait() + e = self._get_connection_count(status=status) + + self.assertEqual(s, e) + + def test_version_splitting(self): + """ + That when many sessions are updated, they are correctly + split into multiple versions to obey mds_sessionmap_keys_per_op + """ + + # Start umounted + self.mount_a.umount_wait() + self.mount_b.umount_wait() + + # Configure MDS to write one OMAP key at once + self.set_conf('mds', 'mds_sessionmap_keys_per_op', 1) + self.fs.mds_fail_restart() + self.fs.wait_for_daemons() + + # I would like two MDSs, so that I can do an export dir later + self.fs.set_max_mds(2) + self.fs.wait_for_daemons() + + status = self.fs.status() + + # Bring the clients back + self.mount_a.mount() + self.mount_b.mount() + self.mount_a.create_files() # Kick the client into opening sessions + self.mount_b.create_files() + + # See that they've got sessions + self.assert_session_count(2, mds_id=self.fs.get_rank(status=status)['name']) + + # See that we persist their sessions + self.fs.rank_asok(["flush", "journal"], rank=0, status=status) + table_json = json.loads(self.fs.table_tool(["0", "show", "session"])) + log.info("SessionMap: {0}".format(json.dumps(table_json, indent=2))) + self.assertEqual(table_json['0']['result'], 0) + self.assertEqual(len(table_json['0']['data']['sessions']), 2) + + # Now, induce a "force_open_sessions" event by exporting a dir + self.mount_a.run_shell(["mkdir", "bravo"]) + self.mount_a.run_shell(["touch", "bravo/file"]) + self.mount_b.run_shell(["ls", "-l", "bravo/file"]) + + def get_omap_wrs(): + return self.fs.rank_asok(['perf', 'dump', 'objecter'], rank=1, status=status)['objecter']['omap_wr'] + + # Flush so that there are no dirty sessions on rank 1 + self.fs.rank_asok(["flush", "journal"], rank=1, status=status) + + # Export so that we get a force_open to rank 1 for the two sessions from rank 0 + initial_omap_wrs = get_omap_wrs() + self.fs.rank_asok(['export', 'dir', '/bravo', '1'], rank=0, status=status) + + # This is the critical (if rather subtle) check: that in the process of doing an export dir, + # we hit force_open_sessions, and as a result we end up writing out the sessionmap. There + # will be two sessions dirtied here, and because we have set keys_per_op to 1, we should see + # a single session get written out (the first of the two, triggered by the second getting marked + # dirty) + # The number of writes is two per session, because the header (sessionmap version) update and + # KV write both count. Also, multiply by 2 for each openfile table update. + self.wait_until_true( + lambda: get_omap_wrs() - initial_omap_wrs == 2*2, + timeout=30 # Long enough for an export to get acked + ) + + # Now end our sessions and check the backing sessionmap is updated correctly + self.mount_a.umount_wait() + self.mount_b.umount_wait() + + # In-memory sessionmap check + self.assert_session_count(0, mds_id=self.fs.get_rank(status=status)['name']) + + # On-disk sessionmap check + self.fs.rank_asok(["flush", "journal"], rank=0, status=status) + table_json = json.loads(self.fs.table_tool(["0", "show", "session"])) + log.info("SessionMap: {0}".format(json.dumps(table_json, indent=2))) + self.assertEqual(table_json['0']['result'], 0) + self.assertEqual(len(table_json['0']['data']['sessions']), 0) + + def _configure_auth(self, mount, id_name, mds_caps, osd_caps=None, mon_caps=None): + """ + Set up auth credentials for a client mount, and write out the keyring + for the client to use. + """ + + if osd_caps is None: + osd_caps = "allow rw" + + if mon_caps is None: + mon_caps = "allow r" + + out = self.fs.mon_manager.raw_cluster_cmd( + "auth", "get-or-create", "client.{name}".format(name=id_name), + "mds", mds_caps, + "osd", osd_caps, + "mon", mon_caps + ) + mount.client_id = id_name + sudo_write_file(mount.client_remote, mount.get_keyring_path(), out) + self.set_conf("client.{name}".format(name=id_name), "keyring", mount.get_keyring_path()) + + def test_session_reject(self): + if not isinstance(self.mount_a, FuseMount): + raise SkipTest("Requires FUSE client to inject client metadata") + + self.mount_a.run_shell(["mkdir", "foo"]) + self.mount_a.run_shell(["mkdir", "foo/bar"]) + self.mount_a.umount_wait() + + # Mount B will be my rejected client + self.mount_b.umount_wait() + + # Configure a client that is limited to /foo/bar + self._configure_auth(self.mount_b, "badguy", "allow rw path=/foo/bar") + # Check he can mount that dir and do IO + self.mount_b.mount(mount_path="/foo/bar") + self.mount_b.wait_until_mounted() + self.mount_b.create_destroy() + self.mount_b.umount_wait() + + # Configure the client to claim that its mount point metadata is /baz + self.set_conf("client.badguy", "client_metadata", "root=/baz") + # Try to mount the client, see that it fails + with self.assert_cluster_log("client session with non-allowable root '/baz' denied"): + with self.assertRaises(CommandFailedError): + self.mount_b.mount(mount_path="/foo/bar") + + def test_session_evict_blacklisted(self): + """ + Check that mds evicts blacklisted client + """ + if not isinstance(self.mount_a, FuseMount): + self.skipTest("Requires FUSE client to use is_blacklisted()") + + self.fs.set_max_mds(2) + self.fs.wait_for_daemons() + status = self.fs.status() + + self.mount_a.run_shell(["mkdir", "d0", "d1"]) + self.mount_a.setfattr("d0", "ceph.dir.pin", "0") + self.mount_a.setfattr("d1", "ceph.dir.pin", "1") + self._wait_subtrees(status, 0, [('/d0', 0), ('/d1', 1)]) + + self.mount_a.run_shell(["touch", "d0/f0"]) + self.mount_a.run_shell(["touch", "d1/f0"]) + self.mount_b.run_shell(["touch", "d0/f1"]) + self.mount_b.run_shell(["touch", "d1/f1"]) + + self.assert_session_count(2, mds_id=self.fs.get_rank(rank=0, status=status)['name']) + self.assert_session_count(2, mds_id=self.fs.get_rank(rank=1, status=status)['name']) + + mount_a_client_id = self.mount_a.get_global_id() + self.fs.mds_asok(['session', 'evict', "%s" % mount_a_client_id], + mds_id=self.fs.get_rank(rank=0, status=status)['name']) + self.wait_until_true(lambda: self.mount_a.is_blacklisted(), timeout=30) + + # 10 seconds should be enough for evicting client + time.sleep(10) + self.assert_session_count(1, mds_id=self.fs.get_rank(rank=0, status=status)['name']) + self.assert_session_count(1, mds_id=self.fs.get_rank(rank=1, status=status)['name']) + + self.mount_a.kill_cleanup() + self.mount_a.mount() + self.mount_a.wait_until_mounted() diff --git a/qa/tasks/cephfs/test_snapshots.py b/qa/tasks/cephfs/test_snapshots.py new file mode 100644 index 00000000..f09b645c --- /dev/null +++ b/qa/tasks/cephfs/test_snapshots.py @@ -0,0 +1,530 @@ +import sys +import logging +import signal +from textwrap import dedent +from tasks.cephfs.fuse_mount import FuseMount +from tasks.cephfs.cephfs_test_case import CephFSTestCase +from teuthology.orchestra.run import CommandFailedError, Raw +from unittest import SkipTest + +log = logging.getLogger(__name__) + +MDS_RESTART_GRACE = 60 + +class TestSnapshots(CephFSTestCase): + MDSS_REQUIRED = 3 + LOAD_SETTINGS = ["mds_max_snaps_per_dir"] + + def _check_subtree(self, rank, path, status=None): + got_subtrees = self.fs.rank_asok(["get", "subtrees"], rank=rank, status=status) + for s in got_subtrees: + if s['dir']['path'] == path and s['auth_first'] == rank: + return True + return False + + def _get_snapclient_dump(self, rank=0, status=None): + return self.fs.rank_asok(["dump", "snaps"], rank=rank, status=status) + + def _get_snapserver_dump(self, rank=0, status=None): + return self.fs.rank_asok(["dump", "snaps", "--server"], rank=rank, status=status) + + def _get_last_created_snap(self, rank=0, status=None): + return int(self._get_snapserver_dump(rank,status=status)["last_created"]) + + def _get_last_destroyed_snap(self, rank=0, status=None): + return int(self._get_snapserver_dump(rank,status=status)["last_destroyed"]) + + def _get_pending_snap_update(self, rank=0, status=None): + return self._get_snapserver_dump(rank,status=status)["pending_update"] + + def _get_pending_snap_destroy(self, rank=0, status=None): + return self._get_snapserver_dump(rank,status=status)["pending_destroy"] + + def test_kill_mdstable(self): + """ + check snaptable transcation + """ + if not isinstance(self.mount_a, FuseMount): + raise SkipTest("Require FUSE client to forcibly kill mount") + + self.fs.set_allow_new_snaps(True); + self.fs.set_max_mds(2) + status = self.fs.wait_for_daemons() + + grace = float(self.fs.get_config("mds_beacon_grace", service_type="mon")) + + # setup subtrees + self.mount_a.run_shell(["mkdir", "-p", "d1/dir"]) + self.mount_a.setfattr("d1", "ceph.dir.pin", "1") + self.wait_until_true(lambda: self._check_subtree(1, '/d1', status=status), timeout=30) + + last_created = self._get_last_created_snap(rank=0,status=status) + + # mds_kill_mdstable_at: + # 1: MDSTableServer::handle_prepare + # 2: MDSTableServer::_prepare_logged + # 5: MDSTableServer::handle_commit + # 6: MDSTableServer::_commit_logged + for i in [1,2,5,6]: + log.info("testing snapserver mds_kill_mdstable_at={0}".format(i)) + + status = self.fs.status() + rank0 = self.fs.get_rank(rank=0, status=status) + self.fs.rank_freeze(True, rank=0) + self.fs.rank_asok(['config', 'set', "mds_kill_mdstable_at", "{0}".format(i)], rank=0, status=status) + proc = self.mount_a.run_shell(["mkdir", "d1/dir/.snap/s1{0}".format(i)], wait=False) + self.wait_until_true(lambda: "laggy_since" in self.fs.get_rank(rank=0), timeout=grace*2); + self.delete_mds_coredump(rank0['name']); + + self.fs.rank_fail(rank=0) + self.fs.mds_restart(rank0['name']) + self.wait_for_daemon_start([rank0['name']]) + status = self.fs.wait_for_daemons() + + proc.wait() + last_created += 1 + self.wait_until_true(lambda: self._get_last_created_snap(rank=0) == last_created, timeout=30) + + self.set_conf("mds", "mds_reconnect_timeout", "5") + + self.mount_a.run_shell(["rmdir", Raw("d1/dir/.snap/*")]) + + # set mds_kill_mdstable_at, also kill snapclient + for i in [2,5,6]: + log.info("testing snapserver mds_kill_mdstable_at={0}, also kill snapclient".format(i)) + status = self.fs.status() + last_created = self._get_last_created_snap(rank=0, status=status) + + rank0 = self.fs.get_rank(rank=0, status=status) + rank1 = self.fs.get_rank(rank=1, status=status) + self.fs.rank_freeze(True, rank=0) # prevent failover... + self.fs.rank_freeze(True, rank=1) # prevent failover... + self.fs.rank_asok(['config', 'set', "mds_kill_mdstable_at", "{0}".format(i)], rank=0, status=status) + proc = self.mount_a.run_shell(["mkdir", "d1/dir/.snap/s2{0}".format(i)], wait=False) + self.wait_until_true(lambda: "laggy_since" in self.fs.get_rank(rank=0), timeout=grace*2); + self.delete_mds_coredump(rank0['name']); + + self.fs.rank_signal(signal.SIGKILL, rank=1) + + self.mount_a.kill() + self.mount_a.kill_cleanup() + + self.fs.rank_fail(rank=0) + self.fs.mds_restart(rank0['name']) + self.wait_for_daemon_start([rank0['name']]) + + self.fs.wait_for_state('up:resolve', rank=0, timeout=MDS_RESTART_GRACE) + if i in [2,5]: + self.assertEqual(len(self._get_pending_snap_update(rank=0)), 1) + elif i == 6: + self.assertEqual(len(self._get_pending_snap_update(rank=0)), 0) + self.assertGreater(self._get_last_created_snap(rank=0), last_created) + + self.fs.rank_fail(rank=1) + self.fs.mds_restart(rank1['name']) + self.wait_for_daemon_start([rank1['name']]) + self.fs.wait_for_state('up:active', rank=0, timeout=MDS_RESTART_GRACE) + + if i in [2,5]: + self.wait_until_true(lambda: len(self._get_pending_snap_update(rank=0)) == 0, timeout=30) + if i == 2: + self.assertEqual(self._get_last_created_snap(rank=0), last_created) + else: + self.assertGreater(self._get_last_created_snap(rank=0), last_created) + + self.mount_a.mount() + self.mount_a.wait_until_mounted() + + self.mount_a.run_shell(["rmdir", Raw("d1/dir/.snap/*")]) + + # mds_kill_mdstable_at: + # 3: MDSTableClient::handle_request (got agree) + # 4: MDSTableClient::commit + # 7: MDSTableClient::handle_request (got ack) + for i in [3,4,7]: + log.info("testing snapclient mds_kill_mdstable_at={0}".format(i)) + last_created = self._get_last_created_snap(rank=0) + + status = self.fs.status() + rank1 = self.fs.get_rank(rank=1, status=status) + self.fs.rank_freeze(True, rank=1) # prevent failover... + self.fs.rank_asok(['config', 'set', "mds_kill_mdstable_at", "{0}".format(i)], rank=1, status=status) + proc = self.mount_a.run_shell(["mkdir", "d1/dir/.snap/s3{0}".format(i)], wait=False) + self.wait_until_true(lambda: "laggy_since" in self.fs.get_rank(rank=1), timeout=grace*2); + self.delete_mds_coredump(rank1['name']); + + self.mount_a.kill() + self.mount_a.kill_cleanup() + + if i in [3,4]: + self.assertEqual(len(self._get_pending_snap_update(rank=0)), 1) + elif i == 7: + self.assertEqual(len(self._get_pending_snap_update(rank=0)), 0) + self.assertGreater(self._get_last_created_snap(rank=0), last_created) + + self.fs.rank_fail(rank=1) + self.fs.mds_restart(rank1['name']) + self.wait_for_daemon_start([rank1['name']]) + status = self.fs.wait_for_daemons(timeout=MDS_RESTART_GRACE) + + if i in [3,4]: + self.wait_until_true(lambda: len(self._get_pending_snap_update(rank=0)) == 0, timeout=30) + if i == 3: + self.assertEqual(self._get_last_created_snap(rank=0), last_created) + else: + self.assertGreater(self._get_last_created_snap(rank=0), last_created) + + self.mount_a.mount() + self.mount_a.wait_until_mounted() + + self.mount_a.run_shell(["rmdir", Raw("d1/dir/.snap/*")]) + + # mds_kill_mdstable_at: + # 3: MDSTableClient::handle_request (got agree) + # 8: MDSTableServer::handle_rollback + log.info("testing snapclient mds_kill_mdstable_at=3, snapserver mds_kill_mdstable_at=8") + last_created = self._get_last_created_snap(rank=0) + + status = self.fs.status() + rank0 = self.fs.get_rank(rank=0, status=status) + rank1 = self.fs.get_rank(rank=1, status=status) + self.fs.rank_freeze(True, rank=0) + self.fs.rank_freeze(True, rank=1) + self.fs.rank_asok(['config', 'set', "mds_kill_mdstable_at", "8".format(i)], rank=0, status=status) + self.fs.rank_asok(['config', 'set', "mds_kill_mdstable_at", "3".format(i)], rank=1, status=status) + proc = self.mount_a.run_shell(["mkdir", "d1/dir/.snap/s4".format(i)], wait=False) + self.wait_until_true(lambda: "laggy_since" in self.fs.get_rank(rank=1), timeout=grace*2); + self.delete_mds_coredump(rank1['name']); + + self.mount_a.kill() + self.mount_a.kill_cleanup() + + self.assertEqual(len(self._get_pending_snap_update(rank=0)), 1) + + self.fs.rank_fail(rank=1) + self.fs.mds_restart(rank1['name']) + self.wait_for_daemon_start([rank1['name']]) + + # rollback triggers assertion + self.wait_until_true(lambda: "laggy_since" in self.fs.get_rank(rank=0), timeout=grace*2); + self.delete_mds_coredump(rank0['name']); + self.fs.rank_fail(rank=0) + self.fs.mds_restart(rank0['name']) + self.wait_for_daemon_start([rank0['name']]) + self.fs.wait_for_state('up:active', rank=0, timeout=MDS_RESTART_GRACE) + + # mds.1 should re-send rollback message + self.wait_until_true(lambda: len(self._get_pending_snap_update(rank=0)) == 0, timeout=30) + self.assertEqual(self._get_last_created_snap(rank=0), last_created) + + self.mount_a.mount() + self.mount_a.wait_until_mounted() + + def test_snapclient_cache(self): + """ + check if snapclient cache gets synced properly + """ + self.fs.set_allow_new_snaps(True); + self.fs.set_max_mds(3) + status = self.fs.wait_for_daemons() + + grace = float(self.fs.get_config("mds_beacon_grace", service_type="mon")) + + self.mount_a.run_shell(["mkdir", "-p", "d0/d1/dir"]) + self.mount_a.run_shell(["mkdir", "-p", "d0/d2/dir"]) + self.mount_a.setfattr("d0", "ceph.dir.pin", "0") + self.mount_a.setfattr("d0/d1", "ceph.dir.pin", "1") + self.mount_a.setfattr("d0/d2", "ceph.dir.pin", "2") + self.wait_until_true(lambda: self._check_subtree(2, '/d0/d2', status=status), timeout=30) + self.wait_until_true(lambda: self._check_subtree(1, '/d0/d1', status=status), timeout=5) + self.wait_until_true(lambda: self._check_subtree(0, '/d0', status=status), timeout=5) + + def _check_snapclient_cache(snaps_dump, cache_dump=None, rank=0): + if cache_dump is None: + cache_dump = self._get_snapclient_dump(rank=rank) + for key, value in cache_dump.items(): + if value != snaps_dump[key]: + return False + return True; + + # sync after mksnap + last_created = self._get_last_created_snap(rank=0) + self.mount_a.run_shell(["mkdir", "d0/d1/dir/.snap/s1", "d0/d1/dir/.snap/s2"]) + self.wait_until_true(lambda: len(self._get_pending_snap_update(rank=0)) == 0, timeout=30) + self.assertGreater(self._get_last_created_snap(rank=0), last_created) + + snaps_dump = self._get_snapserver_dump(rank=0) + self.assertTrue(_check_snapclient_cache(snaps_dump, rank=0)); + self.assertTrue(_check_snapclient_cache(snaps_dump, rank=1)); + self.assertTrue(_check_snapclient_cache(snaps_dump, rank=2)); + + # sync after rmsnap + last_destroyed = self._get_last_destroyed_snap(rank=0) + self.mount_a.run_shell(["rmdir", "d0/d1/dir/.snap/s1"]) + self.wait_until_true(lambda: len(self._get_pending_snap_destroy(rank=0)) == 0, timeout=30) + self.assertGreater(self._get_last_destroyed_snap(rank=0), last_destroyed) + + snaps_dump = self._get_snapserver_dump(rank=0) + self.assertTrue(_check_snapclient_cache(snaps_dump, rank=0)); + self.assertTrue(_check_snapclient_cache(snaps_dump, rank=1)); + self.assertTrue(_check_snapclient_cache(snaps_dump, rank=2)); + + # sync during mds recovers + self.fs.rank_fail(rank=2) + status = self.fs.wait_for_daemons(timeout=MDS_RESTART_GRACE) + self.assertTrue(_check_snapclient_cache(snaps_dump, rank=2)); + + self.fs.rank_fail(rank=0) + self.fs.rank_fail(rank=1) + status = self.fs.wait_for_daemons() + self.fs.wait_for_state('up:active', rank=0, timeout=MDS_RESTART_GRACE) + self.assertTrue(_check_snapclient_cache(snaps_dump, rank=0)); + self.assertTrue(_check_snapclient_cache(snaps_dump, rank=1)); + self.assertTrue(_check_snapclient_cache(snaps_dump, rank=2)); + + # kill at MDSTableClient::handle_notify_prep + status = self.fs.status() + rank2 = self.fs.get_rank(rank=2, status=status) + self.fs.rank_freeze(True, rank=2) + self.fs.rank_asok(['config', 'set', "mds_kill_mdstable_at", "9"], rank=2, status=status) + proc = self.mount_a.run_shell(["mkdir", "d0/d1/dir/.snap/s3"], wait=False) + self.wait_until_true(lambda: "laggy_since" in self.fs.get_rank(rank=2), timeout=grace*2); + self.delete_mds_coredump(rank2['name']); + + # mksnap should wait for notify ack from mds.2 + self.assertFalse(proc.finished); + + # mksnap should proceed after mds.2 fails + self.fs.rank_fail(rank=2) + self.wait_until_true(lambda: proc.finished, timeout=30); + + self.fs.mds_restart(rank2['name']) + self.wait_for_daemon_start([rank2['name']]) + status = self.fs.wait_for_daemons(timeout=MDS_RESTART_GRACE) + + self.mount_a.run_shell(["rmdir", Raw("d0/d1/dir/.snap/*")]) + + # kill at MDSTableClient::commit + # the recovering mds should sync all mds' cache when it enters resolve stage + self.set_conf("mds", "mds_reconnect_timeout", "5") + for i in range(1, 4): + status = self.fs.status() + rank2 = self.fs.get_rank(rank=2, status=status) + self.fs.rank_freeze(True, rank=2) + self.fs.rank_asok(['config', 'set', "mds_kill_mdstable_at", "4"], rank=2, status=status) + last_created = self._get_last_created_snap(rank=0) + proc = self.mount_a.run_shell(["mkdir", "d0/d2/dir/.snap/s{0}".format(i)], wait=False) + self.wait_until_true(lambda: "laggy_since" in self.fs.get_rank(rank=2), timeout=grace*2); + self.delete_mds_coredump(rank2['name']); + + self.mount_a.kill() + self.mount_a.kill_cleanup() + + self.assertEqual(len(self._get_pending_snap_update(rank=0)), 1) + + if i in [2,4]: + self.fs.rank_fail(rank=0) + if i in [3,4]: + self.fs.rank_fail(rank=1) + + self.fs.rank_fail(rank=2) + self.fs.mds_restart(rank2['name']) + self.wait_for_daemon_start([rank2['name']]) + status = self.fs.wait_for_daemons(timeout=MDS_RESTART_GRACE) + + rank0_cache = self._get_snapclient_dump(rank=0) + rank1_cache = self._get_snapclient_dump(rank=1) + rank2_cache = self._get_snapclient_dump(rank=2) + + self.assertGreater(int(rank0_cache["last_created"]), last_created) + self.assertEqual(rank0_cache, rank1_cache); + self.assertEqual(rank0_cache, rank2_cache); + + self.wait_until_true(lambda: len(self._get_pending_snap_update(rank=0)) == 0, timeout=30) + + snaps_dump = self._get_snapserver_dump(rank=0) + self.assertEqual(snaps_dump["last_created"], rank0_cache["last_created"]) + self.assertTrue(_check_snapclient_cache(snaps_dump, cache_dump=rank0_cache)); + + self.mount_a.mount() + self.mount_a.wait_until_mounted() + + self.mount_a.run_shell(["rmdir", Raw("d0/d2/dir/.snap/*")]) + + def test_multimds_mksnap(self): + """ + check if snapshot takes effect across authority subtrees + """ + self.fs.set_allow_new_snaps(True); + self.fs.set_max_mds(2) + status = self.fs.wait_for_daemons() + + self.mount_a.run_shell(["mkdir", "-p", "d0/d1"]) + self.mount_a.setfattr("d0", "ceph.dir.pin", "0") + self.mount_a.setfattr("d0/d1", "ceph.dir.pin", "1") + self.wait_until_true(lambda: self._check_subtree(1, '/d0/d1', status=status), timeout=30) + self.wait_until_true(lambda: self._check_subtree(0, '/d0', status=status), timeout=5) + + self.mount_a.write_test_pattern("d0/d1/file_a", 8 * 1024 * 1024) + self.mount_a.run_shell(["mkdir", "d0/.snap/s1"]) + self.mount_a.run_shell(["rm", "-f", "d0/d1/file_a"]) + self.mount_a.validate_test_pattern("d0/.snap/s1/d1/file_a", 8 * 1024 * 1024) + + self.mount_a.run_shell(["rmdir", "d0/.snap/s1"]) + self.mount_a.run_shell(["rm", "-rf", "d0"]) + + def test_multimds_past_parents(self): + """ + check if past parents are properly recorded during across authority rename + """ + self.fs.set_allow_new_snaps(True); + self.fs.set_max_mds(2) + status = self.fs.wait_for_daemons() + + self.mount_a.run_shell(["mkdir", "d0", "d1"]) + self.mount_a.setfattr("d0", "ceph.dir.pin", "0") + self.mount_a.setfattr("d1", "ceph.dir.pin", "1") + self.wait_until_true(lambda: self._check_subtree(1, '/d1', status=status), timeout=30) + self.wait_until_true(lambda: self._check_subtree(0, '/d0', status=status), timeout=5) + + self.mount_a.run_shell(["mkdir", "d0/d3"]) + self.mount_a.run_shell(["mkdir", "d0/.snap/s1"]) + snap_name = self.mount_a.run_shell(["ls", "d0/d3/.snap"]).stdout.getvalue() + + self.mount_a.run_shell(["mv", "d0/d3", "d1/d3"]) + snap_name1 = self.mount_a.run_shell(["ls", "d1/d3/.snap"]).stdout.getvalue() + self.assertEqual(snap_name1, snap_name); + + self.mount_a.run_shell(["rmdir", "d0/.snap/s1"]) + snap_name1 = self.mount_a.run_shell(["ls", "d1/d3/.snap"]).stdout.getvalue() + self.assertEqual(snap_name1, ""); + + self.mount_a.run_shell(["rm", "-rf", "d0", "d1"]) + + def test_multimds_hardlink(self): + """ + check if hardlink snapshot works in multimds setup + """ + self.fs.set_allow_new_snaps(True); + self.fs.set_max_mds(2) + status = self.fs.wait_for_daemons() + + self.mount_a.run_shell(["mkdir", "d0", "d1"]) + + self.mount_a.setfattr("d0", "ceph.dir.pin", "0") + self.mount_a.setfattr("d1", "ceph.dir.pin", "1") + self.wait_until_true(lambda: self._check_subtree(1, '/d1', status=status), timeout=30) + self.wait_until_true(lambda: self._check_subtree(0, '/d0', status=status), timeout=5) + + self.mount_a.run_python(dedent(""" + import os + open(os.path.join("{path}", "d0/file1"), 'w').write("asdf") + open(os.path.join("{path}", "d0/file2"), 'w').write("asdf") + """.format(path=self.mount_a.mountpoint) + )) + + self.mount_a.run_shell(["ln", "d0/file1", "d1/file1"]) + self.mount_a.run_shell(["ln", "d0/file2", "d1/file2"]) + + self.mount_a.run_shell(["mkdir", "d1/.snap/s1"]) + + self.mount_a.run_python(dedent(""" + import os + open(os.path.join("{path}", "d0/file1"), 'w').write("qwer") + """.format(path=self.mount_a.mountpoint) + )) + + self.mount_a.run_shell(["grep", "asdf", "d1/.snap/s1/file1"]) + + self.mount_a.run_shell(["rm", "-f", "d0/file2"]) + self.mount_a.run_shell(["grep", "asdf", "d1/.snap/s1/file2"]) + + self.mount_a.run_shell(["rm", "-f", "d1/file2"]) + self.mount_a.run_shell(["grep", "asdf", "d1/.snap/s1/file2"]) + + self.mount_a.run_shell(["rmdir", "d1/.snap/s1"]) + self.mount_a.run_shell(["rm", "-rf", "d0", "d1"]) + + class SnapLimitViolationException(Exception): + failed_snapshot_number = -1 + + def __init__(self, num): + self.failed_snapshot_number = num + + def get_snap_name(self, dir_name, sno): + sname = "{dir_name}/.snap/s_{sno}".format(dir_name=dir_name, sno=sno) + return sname + + def create_snap_dir(self, sname): + self.mount_a.run_shell(["mkdir", sname]) + + def delete_dir_and_snaps(self, dir_name, snaps): + for sno in range(1, snaps+1, 1): + sname = self.get_snap_name(dir_name, sno) + self.mount_a.run_shell(["rmdir", sname]) + self.mount_a.run_shell(["rmdir", dir_name]) + + def create_dir_and_snaps(self, dir_name, snaps): + self.mount_a.run_shell(["mkdir", dir_name]) + + for sno in range(1, snaps+1, 1): + sname = self.get_snap_name(dir_name, sno) + try: + self.create_snap_dir(sname) + except CommandFailedError as e: + # failing at the last mkdir beyond the limit is expected + if sno == snaps: + log.info("failed while creating snap #{}: {}".format(sno, repr(e))) + raise TestSnapshots.SnapLimitViolationException(sno) + + def test_mds_max_snaps_per_dir_default_limit(self): + """ + Test the newly introudced option named mds_max_snaps_per_dir + Default snaps limit is 100 + Test if the default number of snapshot directories can be created + """ + self.create_dir_and_snaps("accounts", int(self.mds_max_snaps_per_dir)) + self.delete_dir_and_snaps("accounts", int(self.mds_max_snaps_per_dir)) + + def test_mds_max_snaps_per_dir_with_increased_limit(self): + """ + Test the newly introudced option named mds_max_snaps_per_dir + First create 101 directories and ensure that the 101st directory + creation fails. Then increase the default by one and see if the + additional directory creation succeeds + """ + # first test the default limit + new_limit = int(self.mds_max_snaps_per_dir) + self.fs.rank_asok(['config', 'set', 'mds_max_snaps_per_dir', repr(new_limit)]) + try: + self.create_dir_and_snaps("accounts", new_limit + 1) + except TestSnapshots.SnapLimitViolationException as e: + if e.failed_snapshot_number == (new_limit + 1): + pass + # then increase the limit by one and test + new_limit = new_limit + 1 + self.fs.rank_asok(['config', 'set', 'mds_max_snaps_per_dir', repr(new_limit)]) + sname = self.get_snap_name("accounts", new_limit) + self.create_snap_dir(sname) + self.delete_dir_and_snaps("accounts", new_limit) + + def test_mds_max_snaps_per_dir_with_reduced_limit(self): + """ + Test the newly introudced option named mds_max_snaps_per_dir + First create 99 directories. Then reduce the limit to 98. Then try + creating another directory and ensure that additional directory + creation fails. + """ + # first test the new limit + new_limit = int(self.mds_max_snaps_per_dir) - 1 + self.create_dir_and_snaps("accounts", new_limit) + sname = self.get_snap_name("accounts", new_limit + 1) + # then reduce the limit by one and test + new_limit = new_limit - 1 + self.fs.rank_asok(['config', 'set', 'mds_max_snaps_per_dir', repr(new_limit)]) + try: + self.create_snap_dir(sname) + except CommandFailedError: + # after reducing limit we expect the new snapshot creation to fail + pass + self.delete_dir_and_snaps("accounts", new_limit + 1) diff --git a/qa/tasks/cephfs/test_strays.py b/qa/tasks/cephfs/test_strays.py new file mode 100644 index 00000000..f518afe7 --- /dev/null +++ b/qa/tasks/cephfs/test_strays.py @@ -0,0 +1,973 @@ +import json +import time +import logging +from textwrap import dedent +import datetime +import gevent + +from teuthology.orchestra.run import CommandFailedError, Raw +from tasks.cephfs.cephfs_test_case import CephFSTestCase, for_teuthology + +log = logging.getLogger(__name__) + + +class TestStrays(CephFSTestCase): + MDSS_REQUIRED = 2 + + OPS_THROTTLE = 1 + FILES_THROTTLE = 2 + + # Range of different file sizes used in throttle test's workload + throttle_workload_size_range = 16 + + @for_teuthology + def test_ops_throttle(self): + self._test_throttling(self.OPS_THROTTLE) + + @for_teuthology + def test_files_throttle(self): + self._test_throttling(self.FILES_THROTTLE) + + def test_dir_deletion(self): + """ + That when deleting a bunch of dentries and the containing + directory, everything gets purged. + Catches cases where the client might e.g. fail to trim + the unlinked dir from its cache. + """ + file_count = 1000 + create_script = dedent(""" + import os + + mount_path = "{mount_path}" + subdir = "delete_me" + size = {size} + file_count = {file_count} + os.mkdir(os.path.join(mount_path, subdir)) + for i in range(0, file_count): + filename = "{{0}}_{{1}}.bin".format(i, size) + with open(os.path.join(mount_path, subdir, filename), 'w') as f: + f.write(size * 'x') + """.format( + mount_path=self.mount_a.mountpoint, + size=1024, + file_count=file_count + )) + + self.mount_a.run_python(create_script) + + # That the dirfrag object is created + self.fs.mds_asok(["flush", "journal"]) + dir_ino = self.mount_a.path_to_ino("delete_me") + self.assertTrue(self.fs.dirfrag_exists(dir_ino, 0)) + + # Remove everything + self.mount_a.run_shell(["rm", "-rf", "delete_me"]) + self.fs.mds_asok(["flush", "journal"]) + + # That all the removed files get created as strays + strays = self.get_mdc_stat("strays_created") + self.assertEqual(strays, file_count + 1) + + # That the strays all get enqueued for purge + self.wait_until_equal( + lambda: self.get_mdc_stat("strays_enqueued"), + strays, + timeout=600 + + ) + + # That all the purge operations execute + self.wait_until_equal( + lambda: self.get_stat("purge_queue", "pq_executed"), + strays, + timeout=600 + ) + + # That finally, the directory metadata object is gone + self.assertFalse(self.fs.dirfrag_exists(dir_ino, 0)) + + # That finally, the data objects are all gone + self.await_data_pool_empty() + + def _test_throttling(self, throttle_type): + self.data_log = [] + try: + return self._do_test_throttling(throttle_type) + except: + for l in self.data_log: + log.info(",".join([l_.__str__() for l_ in l])) + raise + + def _do_test_throttling(self, throttle_type): + """ + That the mds_max_purge_ops setting is respected + """ + + def set_throttles(files, ops): + """ + Helper for updating ops/files limits, and calculating effective + ops_per_pg setting to give the same ops limit. + """ + self.set_conf('mds', 'mds_max_purge_files', "%d" % files) + self.set_conf('mds', 'mds_max_purge_ops', "%d" % ops) + + pgs = self.fs.mon_manager.get_pool_property( + self.fs.get_data_pool_name(), + "pg_num" + ) + ops_per_pg = float(ops) / pgs + self.set_conf('mds', 'mds_max_purge_ops_per_pg', "%s" % ops_per_pg) + + # Test conditions depend on what we're going to be exercising. + # * Lift the threshold on whatever throttle we are *not* testing, so + # that the throttle of interest is the one that will be the bottleneck + # * Create either many small files (test file count throttling) or fewer + # large files (test op throttling) + if throttle_type == self.OPS_THROTTLE: + set_throttles(files=100000000, ops=16) + size_unit = 1024 * 1024 # big files, generate lots of ops + file_multiplier = 100 + elif throttle_type == self.FILES_THROTTLE: + # The default value of file limit is pretty permissive, so to avoid + # the test running too fast, create lots of files and set the limit + # pretty low. + set_throttles(ops=100000000, files=6) + size_unit = 1024 # small, numerous files + file_multiplier = 200 + else: + raise NotImplementedError(throttle_type) + + # Pick up config changes + self.fs.mds_fail_restart() + self.fs.wait_for_daemons() + + create_script = dedent(""" + import os + + mount_path = "{mount_path}" + subdir = "delete_me" + size_unit = {size_unit} + file_multiplier = {file_multiplier} + os.mkdir(os.path.join(mount_path, subdir)) + for i in range(0, file_multiplier): + for size in range(0, {size_range}*size_unit, size_unit): + filename = "{{0}}_{{1}}.bin".format(i, size // size_unit) + with open(os.path.join(mount_path, subdir, filename), 'w') as f: + f.write(size * 'x') + """.format( + mount_path=self.mount_a.mountpoint, + size_unit=size_unit, + file_multiplier=file_multiplier, + size_range=self.throttle_workload_size_range + )) + + self.mount_a.run_python(create_script) + + # We will run the deletion in the background, to reduce the risk of it completing before + # we have started monitoring the stray statistics. + def background(): + self.mount_a.run_shell(["rm", "-rf", "delete_me"]) + self.fs.mds_asok(["flush", "journal"]) + + background_thread = gevent.spawn(background) + + total_inodes = file_multiplier * self.throttle_workload_size_range + 1 + mds_max_purge_ops = int(self.fs.get_config("mds_max_purge_ops", 'mds')) + mds_max_purge_files = int(self.fs.get_config("mds_max_purge_files", 'mds')) + + # During this phase we look for the concurrent ops to exceed half + # the limit (a heuristic) and not exceed the limit (a correctness + # condition). + purge_timeout = 600 + elapsed = 0 + files_high_water = 0 + ops_high_water = 0 + + while True: + stats = self.fs.mds_asok(['perf', 'dump']) + mdc_stats = stats['mds_cache'] + pq_stats = stats['purge_queue'] + if elapsed >= purge_timeout: + raise RuntimeError("Timeout waiting for {0} inodes to purge, stats:{1}".format(total_inodes, mdc_stats)) + + num_strays = mdc_stats['num_strays'] + num_strays_purging = pq_stats['pq_executing'] + num_purge_ops = pq_stats['pq_executing_ops'] + files_high_water = pq_stats['pq_executing_high_water'] + ops_high_water = pq_stats['pq_executing_ops_high_water'] + + self.data_log.append([datetime.datetime.now(), num_strays, num_strays_purging, num_purge_ops, files_high_water, ops_high_water]) + + total_strays_created = mdc_stats['strays_created'] + total_strays_purged = pq_stats['pq_executed'] + + if total_strays_purged == total_inodes: + log.info("Complete purge in {0} seconds".format(elapsed)) + break + elif total_strays_purged > total_inodes: + raise RuntimeError("Saw more strays than expected, mdc stats: {0}".format(mdc_stats)) + else: + if throttle_type == self.OPS_THROTTLE: + # 11 is filer_max_purge_ops plus one for the backtrace: + # limit is allowed to be overshot by this much. + if num_purge_ops > mds_max_purge_ops + 11: + raise RuntimeError("num_purge_ops violates threshold {0}/{1}".format( + num_purge_ops, mds_max_purge_ops + )) + elif throttle_type == self.FILES_THROTTLE: + if num_strays_purging > mds_max_purge_files: + raise RuntimeError("num_strays_purging violates threshold {0}/{1}".format( + num_strays_purging, mds_max_purge_files + )) + else: + raise NotImplementedError(throttle_type) + + log.info("Waiting for purge to complete {0}/{1}, {2}/{3}".format( + num_strays_purging, num_strays, + total_strays_purged, total_strays_created + )) + time.sleep(1) + elapsed += 1 + + background_thread.join() + + # Check that we got up to a respectable rate during the purge. This is totally + # racy, but should be safeish unless the cluster is pathologically slow, or + # insanely fast such that the deletions all pass before we have polled the + # statistics. + if throttle_type == self.OPS_THROTTLE: + if ops_high_water < mds_max_purge_ops // 2: + raise RuntimeError("Ops in flight high water is unexpectedly low ({0} / {1})".format( + ops_high_water, mds_max_purge_ops + )) + # The MDS may go over mds_max_purge_ops for some items, like a + # heavily fragmented directory. The throttle does not kick in + # until *after* we reach or exceed the limit. This is expected + # because we don't want to starve the PQ or never purge a + # particularly large file/directory. + self.assertLessEqual(ops_high_water, mds_max_purge_ops+64) + elif throttle_type == self.FILES_THROTTLE: + if files_high_water < mds_max_purge_files // 2: + raise RuntimeError("Files in flight high water is unexpectedly low ({0} / {1})".format( + files_high_water, mds_max_purge_files + )) + self.assertLessEqual(files_high_water, mds_max_purge_files) + + # Sanity check all MDC stray stats + stats = self.fs.mds_asok(['perf', 'dump']) + mdc_stats = stats['mds_cache'] + pq_stats = stats['purge_queue'] + self.assertEqual(mdc_stats['num_strays'], 0) + self.assertEqual(mdc_stats['num_strays_delayed'], 0) + self.assertEqual(pq_stats['pq_executing'], 0) + self.assertEqual(pq_stats['pq_executing_ops'], 0) + self.assertEqual(mdc_stats['strays_created'], total_inodes) + self.assertEqual(mdc_stats['strays_enqueued'], total_inodes) + self.assertEqual(pq_stats['pq_executed'], total_inodes) + + def get_mdc_stat(self, name, mds_id=None): + return self.get_stat("mds_cache", name, mds_id) + + def get_stat(self, subsys, name, mds_id=None): + return self.fs.mds_asok(['perf', 'dump', subsys, name], + mds_id=mds_id)[subsys][name] + + def _wait_for_counter(self, subsys, counter, expect_val, timeout=60, + mds_id=None): + self.wait_until_equal( + lambda: self.get_stat(subsys, counter, mds_id), + expect_val=expect_val, timeout=timeout, + reject_fn=lambda x: x > expect_val + ) + + def test_open_inode(self): + """ + That the case of a dentry unlinked while a client holds an + inode open is handled correctly. + + The inode should be moved into a stray dentry, while the original + dentry and directory should be purged. + + The inode's data should be purged when the client eventually closes + it. + """ + mount_a_client_id = self.mount_a.get_global_id() + + # Write some bytes to a file + size_mb = 8 + + # Hold the file open + p = self.mount_a.open_background("open_file") + self.mount_a.write_n_mb("open_file", size_mb) + open_file_ino = self.mount_a.path_to_ino("open_file") + + self.assertEqual(self.get_session(mount_a_client_id)['num_caps'], 2) + + # Unlink the dentry + self.mount_a.run_shell(["rm", "-f", "open_file"]) + + # Wait to see the stray count increment + self.wait_until_equal( + lambda: self.get_mdc_stat("num_strays"), + expect_val=1, timeout=60, reject_fn=lambda x: x > 1) + + # See that while the stray count has incremented, none have passed + # on to the purge queue + self.assertEqual(self.get_mdc_stat("strays_created"), 1) + self.assertEqual(self.get_mdc_stat("strays_enqueued"), 0) + + # See that the client still holds 2 caps + self.assertEqual(self.get_session(mount_a_client_id)['num_caps'], 2) + + # See that the data objects remain in the data pool + self.assertTrue(self.fs.data_objects_present(open_file_ino, size_mb * 1024 * 1024)) + + # Now close the file + self.mount_a.kill_background(p) + + # Wait to see the client cap count decrement + self.wait_until_equal( + lambda: self.get_session(mount_a_client_id)['num_caps'], + expect_val=1, timeout=60, reject_fn=lambda x: x > 2 or x < 1 + ) + # Wait to see the purge counter increment, stray count go to zero + self._wait_for_counter("mds_cache", "strays_enqueued", 1) + self.wait_until_equal( + lambda: self.get_mdc_stat("num_strays"), + expect_val=0, timeout=6, reject_fn=lambda x: x > 1 + ) + self._wait_for_counter("purge_queue", "pq_executed", 1) + + # See that the data objects no longer exist + self.assertTrue(self.fs.data_objects_absent(open_file_ino, size_mb * 1024 * 1024)) + + self.await_data_pool_empty() + + def test_hardlink_reintegration(self): + """ + That removal of primary dentry of hardlinked inode results + in reintegration of inode into the previously-remote dentry, + rather than lingering as a stray indefinitely. + """ + # Write some bytes to file_a + size_mb = 8 + self.mount_a.run_shell(["mkdir", "dir_1"]) + self.mount_a.write_n_mb("dir_1/file_a", size_mb) + ino = self.mount_a.path_to_ino("dir_1/file_a") + + # Create a hardlink named file_b + self.mount_a.run_shell(["mkdir", "dir_2"]) + self.mount_a.run_shell(["ln", "dir_1/file_a", "dir_2/file_b"]) + self.assertEqual(self.mount_a.path_to_ino("dir_2/file_b"), ino) + + # Flush journal + self.fs.mds_asok(['flush', 'journal']) + + # See that backtrace for the file points to the file_a path + pre_unlink_bt = self.fs.read_backtrace(ino) + self.assertEqual(pre_unlink_bt['ancestors'][0]['dname'], "file_a") + + # empty mds cache. otherwise mds reintegrates stray when unlink finishes + self.mount_a.umount_wait() + self.fs.mds_asok(['flush', 'journal']) + self.fs.mds_fail_restart() + self.fs.wait_for_daemons() + self.mount_a.mount() + + # Unlink file_a + self.mount_a.run_shell(["rm", "-f", "dir_1/file_a"]) + + # See that a stray was created + self.assertEqual(self.get_mdc_stat("num_strays"), 1) + self.assertEqual(self.get_mdc_stat("strays_created"), 1) + + # Wait, see that data objects are still present (i.e. that the + # stray did not advance to purging given time) + time.sleep(30) + self.assertTrue(self.fs.data_objects_present(ino, size_mb * 1024 * 1024)) + self.assertEqual(self.get_mdc_stat("strays_enqueued"), 0) + + # See that before reintegration, the inode's backtrace points to a stray dir + self.fs.mds_asok(['flush', 'journal']) + self.assertTrue(self.get_backtrace_path(ino).startswith("stray")) + + last_reintegrated = self.get_mdc_stat("strays_reintegrated") + + # Do a metadata operation on the remaining link (mv is heavy handed, but + # others like touch may be satisfied from caps without poking MDS) + self.mount_a.run_shell(["mv", "dir_2/file_b", "dir_2/file_c"]) + + # Stray reintegration should happen as a result of the eval_remote call + # on responding to a client request. + self.wait_until_equal( + lambda: self.get_mdc_stat("num_strays"), + expect_val=0, + timeout=60 + ) + + # See the reintegration counter increment + curr_reintegrated = self.get_mdc_stat("strays_reintegrated") + self.assertGreater(curr_reintegrated, last_reintegrated) + last_reintegrated = curr_reintegrated + + # Flush the journal + self.fs.mds_asok(['flush', 'journal']) + + # See that the backtrace for the file points to the remaining link's path + post_reint_bt = self.fs.read_backtrace(ino) + self.assertEqual(post_reint_bt['ancestors'][0]['dname'], "file_c") + + # mds should reintegrates stray when unlink finishes + self.mount_a.run_shell(["ln", "dir_2/file_c", "dir_2/file_d"]) + self.mount_a.run_shell(["rm", "-f", "dir_2/file_c"]) + + # Stray reintegration should happen as a result of the notify_stray call + # on completion of unlink + self.wait_until_equal( + lambda: self.get_mdc_stat("num_strays"), + expect_val=0, + timeout=60 + ) + + # See the reintegration counter increment + curr_reintegrated = self.get_mdc_stat("strays_reintegrated") + self.assertGreater(curr_reintegrated, last_reintegrated) + last_reintegrated = curr_reintegrated + + # Flush the journal + self.fs.mds_asok(['flush', 'journal']) + + # See that the backtrace for the file points to the newest link's path + post_reint_bt = self.fs.read_backtrace(ino) + self.assertEqual(post_reint_bt['ancestors'][0]['dname'], "file_d") + + # Now really delete it + self.mount_a.run_shell(["rm", "-f", "dir_2/file_d"]) + self._wait_for_counter("mds_cache", "strays_enqueued", 1) + self._wait_for_counter("purge_queue", "pq_executed", 1) + + self.assert_purge_idle() + self.assertTrue(self.fs.data_objects_absent(ino, size_mb * 1024 * 1024)) + + # We caused the inode to go stray 3 times + self.assertEqual(self.get_mdc_stat("strays_created"), 3) + # We purged it at the last + self.assertEqual(self.get_mdc_stat("strays_enqueued"), 1) + + def test_mv_hardlink_cleanup(self): + """ + That when doing a rename from A to B, and B has hardlinks, + then we make a stray for B which is then reintegrated + into one of his hardlinks. + """ + # Create file_a, file_b, and a hardlink to file_b + size_mb = 8 + self.mount_a.write_n_mb("file_a", size_mb) + file_a_ino = self.mount_a.path_to_ino("file_a") + + self.mount_a.write_n_mb("file_b", size_mb) + file_b_ino = self.mount_a.path_to_ino("file_b") + + self.mount_a.run_shell(["ln", "file_b", "linkto_b"]) + self.assertEqual(self.mount_a.path_to_ino("linkto_b"), file_b_ino) + + # mv file_a file_b + self.mount_a.run_shell(["mv", "file_a", "file_b"]) + + # Stray reintegration should happen as a result of the notify_stray call on + # completion of rename + self.wait_until_equal( + lambda: self.get_mdc_stat("num_strays"), + expect_val=0, + timeout=60 + ) + + self.assertEqual(self.get_mdc_stat("strays_created"), 1) + self.assertGreaterEqual(self.get_mdc_stat("strays_reintegrated"), 1) + + # No data objects should have been deleted, as both files still have linkage. + self.assertTrue(self.fs.data_objects_present(file_a_ino, size_mb * 1024 * 1024)) + self.assertTrue(self.fs.data_objects_present(file_b_ino, size_mb * 1024 * 1024)) + + self.fs.mds_asok(['flush', 'journal']) + + post_reint_bt = self.fs.read_backtrace(file_b_ino) + self.assertEqual(post_reint_bt['ancestors'][0]['dname'], "linkto_b") + + def _setup_two_ranks(self): + # Set up two MDSs + self.fs.set_max_mds(2) + + # See that we have two active MDSs + self.wait_until_equal(lambda: len(self.fs.get_active_names()), 2, 30, + reject_fn=lambda v: v > 2 or v < 1) + + active_mds_names = self.fs.get_active_names() + rank_0_id = active_mds_names[0] + rank_1_id = active_mds_names[1] + log.info("Ranks 0 and 1 are {0} and {1}".format( + rank_0_id, rank_1_id)) + + # Get rid of other MDS daemons so that it's easier to know which + # daemons to expect in which ranks after restarts + for unneeded_mds in set(self.mds_cluster.mds_ids) - {rank_0_id, rank_1_id}: + self.mds_cluster.mds_stop(unneeded_mds) + self.mds_cluster.mds_fail(unneeded_mds) + + return rank_0_id, rank_1_id + + def _force_migrate(self, to_id, path, watch_ino): + """ + :param to_id: MDS id to move it to + :param path: Filesystem path (string) to move + :param watch_ino: Inode number to look for at destination to confirm move + :return: None + """ + self.mount_a.run_shell(["setfattr", "-n", "ceph.dir.pin", "-v", "1", path]) + + # Poll the MDS cache dump to watch for the export completing + migrated = False + migrate_timeout = 60 + migrate_elapsed = 0 + while not migrated: + data = self.fs.mds_asok(["dump", "cache"], to_id) + for inode_data in data: + if inode_data['ino'] == watch_ino: + log.debug("Found ino in cache: {0}".format(json.dumps(inode_data, indent=2))) + if inode_data['is_auth'] is True: + migrated = True + break + + if not migrated: + if migrate_elapsed > migrate_timeout: + raise RuntimeError("Migration hasn't happened after {0}s!".format(migrate_elapsed)) + else: + migrate_elapsed += 1 + time.sleep(1) + + def _is_stopped(self, rank): + mds_map = self.fs.get_mds_map() + return rank not in [i['rank'] for i in mds_map['info'].values()] + + def test_purge_on_shutdown(self): + """ + That when an MDS rank is shut down, its purge queue is + drained in the process. + """ + rank_0_id, rank_1_id = self._setup_two_ranks() + + self.set_conf("mds.{0}".format(rank_1_id), 'mds_max_purge_files', "0") + self.mds_cluster.mds_fail_restart(rank_1_id) + self.fs.wait_for_daemons() + + file_count = 5 + + self.mount_a.create_n_files("delete_me/file", file_count) + + self._force_migrate(rank_1_id, "delete_me", + self.mount_a.path_to_ino("delete_me/file_0")) + + self.mount_a.run_shell(["rm", "-rf", Raw("delete_me/*")]) + self.mount_a.umount_wait() + + # See all the strays go into purge queue + self._wait_for_counter("mds_cache", "strays_created", file_count, mds_id=rank_1_id) + self._wait_for_counter("mds_cache", "strays_enqueued", file_count, mds_id=rank_1_id) + self.assertEqual(self.get_stat("mds_cache", "num_strays", mds_id=rank_1_id), 0) + + # See nothing get purged from the purge queue (yet) + time.sleep(10) + self.assertEqual(self.get_stat("purge_queue", "pq_executed", mds_id=rank_1_id), 0) + + # Shut down rank 1 + self.fs.set_max_mds(1) + + # It shouldn't proceed past stopping because its still not allowed + # to purge + time.sleep(10) + self.assertEqual(self.get_stat("purge_queue", "pq_executed", mds_id=rank_1_id), 0) + self.assertFalse(self._is_stopped(1)) + + # Permit the daemon to start purging again + self.fs.mon_manager.raw_cluster_cmd('tell', 'mds.{0}'.format(rank_1_id), + 'injectargs', + "--mds_max_purge_files 100") + + # It should now proceed through shutdown + self.fs.wait_for_daemons(timeout=120) + + # ...and in the process purge all that data + self.await_data_pool_empty() + + def test_migration_on_shutdown(self): + """ + That when an MDS rank is shut down, any non-purgeable strays + get migrated to another rank. + """ + + rank_0_id, rank_1_id = self._setup_two_ranks() + + # Create a non-purgeable stray in a ~mds1 stray directory + # by doing a hard link and deleting the original file + self.mount_a.run_shell(["mkdir", "dir_1", "dir_2"]) + self.mount_a.run_shell(["touch", "dir_1/original"]) + self.mount_a.run_shell(["ln", "dir_1/original", "dir_2/linkto"]) + + self._force_migrate(rank_1_id, "dir_1", + self.mount_a.path_to_ino("dir_1/original")) + + # empty mds cache. otherwise mds reintegrates stray when unlink finishes + self.mount_a.umount_wait() + self.fs.mds_asok(['flush', 'journal'], rank_0_id) + self.fs.mds_asok(['flush', 'journal'], rank_1_id) + self.fs.mds_fail_restart() + self.fs.wait_for_daemons() + + active_mds_names = self.fs.get_active_names() + rank_0_id = active_mds_names[0] + rank_1_id = active_mds_names[1] + + self.mount_a.mount() + + self.mount_a.run_shell(["rm", "-f", "dir_1/original"]) + self.mount_a.umount_wait() + + self._wait_for_counter("mds_cache", "strays_created", 1, + mds_id=rank_1_id) + + # Shut down rank 1 + self.fs.set_max_mds(1) + self.fs.wait_for_daemons(timeout=120) + + # See that the stray counter on rank 0 has incremented + self.assertEqual(self.get_mdc_stat("strays_created", rank_0_id), 1) + + def assert_backtrace(self, ino, expected_path): + """ + Assert that the backtrace in the data pool for an inode matches + an expected /foo/bar path. + """ + expected_elements = expected_path.strip("/").split("/") + bt = self.fs.read_backtrace(ino) + actual_elements = list(reversed([dn['dname'] for dn in bt['ancestors']])) + self.assertListEqual(expected_elements, actual_elements) + + def get_backtrace_path(self, ino): + bt = self.fs.read_backtrace(ino) + elements = reversed([dn['dname'] for dn in bt['ancestors']]) + return "/".join(elements) + + def assert_purge_idle(self): + """ + Assert that the MDS perf counters indicate no strays exist and + no ongoing purge activity. Sanity check for when PurgeQueue should + be idle. + """ + mdc_stats = self.fs.mds_asok(['perf', 'dump', "mds_cache"])['mds_cache'] + pq_stats = self.fs.mds_asok(['perf', 'dump', "purge_queue"])['purge_queue'] + self.assertEqual(mdc_stats["num_strays"], 0) + self.assertEqual(mdc_stats["num_strays_delayed"], 0) + self.assertEqual(pq_stats["pq_executing"], 0) + self.assertEqual(pq_stats["pq_executing_ops"], 0) + + def test_mv_cleanup(self): + """ + That when doing a rename from A to B, and B has no hardlinks, + then we make a stray for B and purge him. + """ + # Create file_a and file_b, write some to both + size_mb = 8 + self.mount_a.write_n_mb("file_a", size_mb) + file_a_ino = self.mount_a.path_to_ino("file_a") + self.mount_a.write_n_mb("file_b", size_mb) + file_b_ino = self.mount_a.path_to_ino("file_b") + + self.fs.mds_asok(['flush', 'journal']) + self.assert_backtrace(file_a_ino, "file_a") + self.assert_backtrace(file_b_ino, "file_b") + + # mv file_a file_b + self.mount_a.run_shell(['mv', 'file_a', 'file_b']) + + # See that stray counter increments + self.assertEqual(self.get_mdc_stat("strays_created"), 1) + # Wait for purge counter to increment + self._wait_for_counter("mds_cache", "strays_enqueued", 1) + self._wait_for_counter("purge_queue", "pq_executed", 1) + + self.assert_purge_idle() + + # file_b should have been purged + self.assertTrue(self.fs.data_objects_absent(file_b_ino, size_mb * 1024 * 1024)) + + # Backtrace should have updated from file_a to file_b + self.fs.mds_asok(['flush', 'journal']) + self.assert_backtrace(file_a_ino, "file_b") + + # file_a's data should still exist + self.assertTrue(self.fs.data_objects_present(file_a_ino, size_mb * 1024 * 1024)) + + def _pool_df(self, pool_name): + """ + Return a dict like + { + "kb_used": 0, + "bytes_used": 0, + "max_avail": 19630292406, + "objects": 0 + } + + :param pool_name: Which pool (must exist) + """ + out = self.fs.mon_manager.raw_cluster_cmd("df", "--format=json-pretty") + for p in json.loads(out)['pools']: + if p['name'] == pool_name: + return p['stats'] + + raise RuntimeError("Pool '{0}' not found".format(pool_name)) + + def await_data_pool_empty(self): + self.wait_until_true( + lambda: self._pool_df( + self.fs.get_data_pool_name() + )['objects'] == 0, + timeout=60) + + def test_snapshot_remove(self): + """ + That removal of a snapshot that references a now-unlinked file results + in purging on the stray for the file. + """ + # Enable snapshots + self.fs.set_allow_new_snaps(True) + + # Create a dir with a file in it + size_mb = 8 + self.mount_a.run_shell(["mkdir", "snapdir"]) + self.mount_a.run_shell(["mkdir", "snapdir/subdir"]) + self.mount_a.write_test_pattern("snapdir/subdir/file_a", size_mb * 1024 * 1024) + file_a_ino = self.mount_a.path_to_ino("snapdir/subdir/file_a") + + # Snapshot the dir + self.mount_a.run_shell(["mkdir", "snapdir/.snap/snap1"]) + + # Cause the head revision to deviate from the snapshot + self.mount_a.write_n_mb("snapdir/subdir/file_a", size_mb) + + # Flush the journal so that backtraces, dirfrag objects will actually be written + self.fs.mds_asok(["flush", "journal"]) + + # Unlink the file + self.mount_a.run_shell(["rm", "-f", "snapdir/subdir/file_a"]) + self.mount_a.run_shell(["rmdir", "snapdir/subdir"]) + + # Unmount the client because when I come back to check the data is still + # in the file I don't want to just see what's in the page cache. + self.mount_a.umount_wait() + + self.assertEqual(self.get_mdc_stat("strays_created"), 2) + + # FIXME: at this stage we see a purge and the stray count drops to + # zero, but there's actually still a stray, so at the very + # least the StrayManager stats code is slightly off + + self.mount_a.mount() + + # See that the data from the snapshotted revision of the file is still present + # and correct + self.mount_a.validate_test_pattern("snapdir/.snap/snap1/subdir/file_a", size_mb * 1024 * 1024) + + # Remove the snapshot + self.mount_a.run_shell(["rmdir", "snapdir/.snap/snap1"]) + + # Purging file_a doesn't happen until after we've flushed the journal, because + # it is referenced by the snapshotted subdir, and the snapshot isn't really + # gone until the journal references to it are gone + self.fs.mds_asok(["flush", "journal"]) + + # Wait for purging to complete, which requires the OSDMap to propagate to the OSDs. + # See also: http://tracker.ceph.com/issues/20072 + self.wait_until_true( + lambda: self.fs.data_objects_absent(file_a_ino, size_mb * 1024 * 1024), + timeout=60 + ) + + # See that a purge happens now + self._wait_for_counter("mds_cache", "strays_enqueued", 2) + self._wait_for_counter("purge_queue", "pq_executed", 2) + + self.await_data_pool_empty() + + def test_fancy_layout(self): + """ + purge stray file with fancy layout + """ + + file_name = "fancy_layout_file" + self.mount_a.run_shell(["touch", file_name]) + + file_layout = "stripe_unit=1048576 stripe_count=4 object_size=8388608" + self.mount_a.setfattr(file_name, "ceph.file.layout", file_layout) + + # 35MB requires 7 objects + size_mb = 35 + self.mount_a.write_n_mb(file_name, size_mb) + + self.mount_a.run_shell(["rm", "-f", file_name]) + self.fs.mds_asok(["flush", "journal"]) + + # can't use self.fs.data_objects_absent here, it does not support fancy layout + self.await_data_pool_empty() + + def test_dirfrag_limit(self): + """ + That the directory fragment size cannot exceed mds_bal_fragment_size_max (using a limit of 50 in all configurations). + + That fragmentation (forced) will allow more entries to be created. + + That unlinking fails when the stray directory fragment becomes too large and that unlinking may continue once those strays are purged. + """ + + LOW_LIMIT = 50 + for mds in self.fs.get_daemon_names(): + self.fs.mds_asok(["config", "set", "mds_bal_fragment_size_max", str(LOW_LIMIT)], mds) + + try: + self.mount_a.run_python(dedent(""" + import os + path = os.path.join("{path}", "subdir") + os.mkdir(path) + for n in range(0, {file_count}): + with open(os.path.join(path, "%s" % n), 'w') as f: + f.write(str(n)) + """.format( + path=self.mount_a.mountpoint, + file_count=LOW_LIMIT+1 + ))) + except CommandFailedError: + pass # ENOSPAC + else: + raise RuntimeError("fragment size exceeded") + + # Now test that we can go beyond the limit if we fragment the directory + + self.mount_a.run_python(dedent(""" + import os + path = os.path.join("{path}", "subdir2") + os.mkdir(path) + for n in range(0, {file_count}): + with open(os.path.join(path, "%s" % n), 'w') as f: + f.write(str(n)) + dfd = os.open(path, os.O_DIRECTORY) + os.fsync(dfd) + """.format( + path=self.mount_a.mountpoint, + file_count=LOW_LIMIT + ))) + + # Ensure that subdir2 is fragmented + mds_id = self.fs.get_active_names()[0] + self.fs.mds_asok(["dirfrag", "split", "/subdir2", "0/0", "1"], mds_id) + + # remount+flush (release client caps) + self.mount_a.umount_wait() + self.fs.mds_asok(["flush", "journal"], mds_id) + self.mount_a.mount() + self.mount_a.wait_until_mounted() + + # Create 50% more files than the current fragment limit + self.mount_a.run_python(dedent(""" + import os + path = os.path.join("{path}", "subdir2") + for n in range({file_count}, ({file_count}*3)//2): + with open(os.path.join(path, "%s" % n), 'w') as f: + f.write(str(n)) + """.format( + path=self.mount_a.mountpoint, + file_count=LOW_LIMIT + ))) + + # Now test the stray directory size is limited and recovers + strays_before = self.get_mdc_stat("strays_created") + try: + self.mount_a.run_python(dedent(""" + import os + path = os.path.join("{path}", "subdir3") + os.mkdir(path) + for n in range({file_count}): + fpath = os.path.join(path, "%s" % n) + with open(fpath, 'w') as f: + f.write(str(n)) + os.unlink(fpath) + """.format( + path=self.mount_a.mountpoint, + file_count=LOW_LIMIT*10 # 10 stray directories, should collide before this count + ))) + except CommandFailedError: + pass # ENOSPAC + else: + raise RuntimeError("fragment size exceeded") + + strays_after = self.get_mdc_stat("strays_created") + self.assertGreaterEqual(strays_after-strays_before, LOW_LIMIT) + + self._wait_for_counter("mds_cache", "strays_enqueued", strays_after) + self._wait_for_counter("purge_queue", "pq_executed", strays_after) + + self.mount_a.run_python(dedent(""" + import os + path = os.path.join("{path}", "subdir4") + os.mkdir(path) + for n in range({file_count}): + fpath = os.path.join(path, "%s" % n) + with open(fpath, 'w') as f: + f.write(str(n)) + os.unlink(fpath) + """.format( + path=self.mount_a.mountpoint, + file_count=LOW_LIMIT + ))) + + def test_purge_queue_upgrade(self): + """ + That when starting on a system with no purge queue in the metadata + pool, we silently create one. + :return: + """ + + self.mds_cluster.mds_stop() + self.mds_cluster.mds_fail() + self.fs.rados(["rm", "500.00000000"]) + self.mds_cluster.mds_restart() + self.fs.wait_for_daemons() + + def test_replicated_delete_speed(self): + """ + That deletions of replicated metadata are not pathologically slow + """ + rank_0_id, rank_1_id = self._setup_two_ranks() + + self.set_conf("mds.{0}".format(rank_1_id), 'mds_max_purge_files', "0") + self.mds_cluster.mds_fail_restart(rank_1_id) + self.fs.wait_for_daemons() + + file_count = 10 + + self.mount_a.create_n_files("delete_me/file", file_count) + + self._force_migrate(rank_1_id, "delete_me", + self.mount_a.path_to_ino("delete_me/file_0")) + + begin = datetime.datetime.now() + self.mount_a.run_shell(["rm", "-rf", Raw("delete_me/*")]) + end = datetime.datetime.now() + + # What we're really checking here is that we are completing client + # operations immediately rather than delaying until the next tick. + tick_period = float(self.fs.get_config("mds_tick_interval", + service_type="mds")) + + duration = (end - begin).total_seconds() + self.assertLess(duration, (file_count * tick_period) * 0.25) + diff --git a/qa/tasks/cephfs/test_volume_client.py b/qa/tasks/cephfs/test_volume_client.py new file mode 100644 index 00000000..7f66218c --- /dev/null +++ b/qa/tasks/cephfs/test_volume_client.py @@ -0,0 +1,1765 @@ +import json +import logging +import os +from textwrap import dedent +from tasks.cephfs.cephfs_test_case import CephFSTestCase +from tasks.cephfs.fuse_mount import FuseMount +from teuthology.exceptions import CommandFailedError +from teuthology.misc import sudo_write_file + +log = logging.getLogger(__name__) + + +class TestVolumeClient(CephFSTestCase): + # One for looking at the global filesystem, one for being + # the VolumeClient, two for mounting the created shares + CLIENTS_REQUIRED = 4 + + def setUp(self): + CephFSTestCase.setUp(self) + + def _volume_client_python(self, client, script, vol_prefix=None, ns_prefix=None): + # Can't dedent this *and* the script we pass in, because they might have different + # levels of indentation to begin with, so leave this string zero-indented + if vol_prefix: + vol_prefix = "\"" + vol_prefix + "\"" + if ns_prefix: + ns_prefix = "\"" + ns_prefix + "\"" + return client.run_python(""" +from __future__ import print_function +from ceph_volume_client import CephFSVolumeClient, VolumePath +from sys import version_info as sys_version_info +from rados import OSError as rados_OSError +import logging +log = logging.getLogger("ceph_volume_client") +log.addHandler(logging.StreamHandler()) +log.setLevel(logging.DEBUG) +vc = CephFSVolumeClient("manila", "{conf_path}", "ceph", {vol_prefix}, {ns_prefix}) +vc.connect() +{payload} +vc.disconnect() + """.format(payload=script, conf_path=client.config_path, + vol_prefix=vol_prefix, ns_prefix=ns_prefix)) + + def _configure_vc_auth(self, mount, id_name): + """ + Set up auth credentials for the VolumeClient user + """ + out = self.fs.mon_manager.raw_cluster_cmd( + "auth", "get-or-create", "client.{name}".format(name=id_name), + "mds", "allow *", + "osd", "allow rw", + "mon", "allow *" + ) + mount.client_id = id_name + sudo_write_file(mount.client_remote, mount.get_keyring_path(), out) + self.set_conf("client.{name}".format(name=id_name), "keyring", mount.get_keyring_path()) + + def _configure_guest_auth(self, volumeclient_mount, guest_mount, + guest_entity, mount_path, + namespace_prefix=None, readonly=False, + tenant_id=None, allow_existing_id=False): + """ + Set up auth credentials for the guest client to mount a volume. + + :param volumeclient_mount: mount used as the handle for driving + volumeclient. + :param guest_mount: mount used by the guest client. + :param guest_entity: auth ID used by the guest client. + :param mount_path: path of the volume. + :param namespace_prefix: name prefix of the RADOS namespace, which + is used for the volume's layout. + :param readonly: defaults to False. If set to 'True' only read-only + mount access is granted to the guest. + :param tenant_id: (OpenStack) tenant ID of the guest client. + """ + + head, volume_id = os.path.split(mount_path) + head, group_id = os.path.split(head) + head, volume_prefix = os.path.split(head) + volume_prefix = "/" + volume_prefix + + # Authorize the guest client's auth ID to mount the volume. + key = self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + auth_result = vc.authorize(vp, "{guest_entity}", readonly={readonly}, + tenant_id="{tenant_id}", + allow_existing_id="{allow_existing_id}") + print(auth_result['auth_key']) + """.format( + group_id=group_id, + volume_id=volume_id, + guest_entity=guest_entity, + readonly=readonly, + tenant_id=tenant_id, + allow_existing_id=allow_existing_id)), volume_prefix, namespace_prefix + ) + + # CephFSVolumeClient's authorize() does not return the secret + # key to a caller who isn't multi-tenant aware. Explicitly + # query the key for such a client. + if not tenant_id: + key = self.fs.mon_manager.raw_cluster_cmd( + "auth", "get-key", "client.{name}".format(name=guest_entity), + ) + + # The guest auth ID should exist. + existing_ids = [a['entity'] for a in self.auth_list()] + self.assertIn("client.{0}".format(guest_entity), existing_ids) + + # Create keyring file for the guest client. + keyring_txt = dedent(""" + [client.{guest_entity}] + key = {key} + + """.format( + guest_entity=guest_entity, + key=key + )) + guest_mount.client_id = guest_entity + sudo_write_file(guest_mount.client_remote, + guest_mount.get_keyring_path(), keyring_txt) + + # Add a guest client section to the ceph config file. + self.set_conf("client.{0}".format(guest_entity), "client quota", "True") + self.set_conf("client.{0}".format(guest_entity), "debug client", "20") + self.set_conf("client.{0}".format(guest_entity), "debug objecter", "20") + self.set_conf("client.{0}".format(guest_entity), + "keyring", guest_mount.get_keyring_path()) + + def test_default_prefix(self): + group_id = "grpid" + volume_id = "volid" + DEFAULT_VOL_PREFIX = "volumes" + DEFAULT_NS_PREFIX = "fsvolumens_" + + self.mount_b.umount_wait() + self._configure_vc_auth(self.mount_b, "manila") + + #create a volume with default prefix + self._volume_client_python(self.mount_b, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.create_volume(vp, 10, data_isolated=True) + """.format( + group_id=group_id, + volume_id=volume_id, + ))) + + # The dir should be created + self.mount_a.stat(os.path.join(DEFAULT_VOL_PREFIX, group_id, volume_id)) + + #namespace should be set + ns_in_attr = self.mount_a.getfattr(os.path.join(DEFAULT_VOL_PREFIX, group_id, volume_id), "ceph.dir.layout.pool_namespace") + namespace = "{0}{1}".format(DEFAULT_NS_PREFIX, volume_id) + self.assertEqual(namespace, ns_in_attr) + + + def test_lifecycle(self): + """ + General smoke test for create, extend, destroy + """ + + # I'm going to use mount_c later as a guest for mounting the created + # shares + self.mounts[2].umount_wait() + + # I'm going to leave mount_b unmounted and just use it as a handle for + # driving volumeclient. It's a little hacky but we don't have a more + # general concept for librados/libcephfs clients as opposed to full + # blown mounting clients. + self.mount_b.umount_wait() + self._configure_vc_auth(self.mount_b, "manila") + + guest_entity = "guest" + group_id = "grpid" + volume_id = "volid" + + volume_prefix = "/myprefix" + namespace_prefix = "mynsprefix_" + + # Create a 100MB volume + volume_size = 100 + mount_path = self._volume_client_python(self.mount_b, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + create_result = vc.create_volume(vp, 1024*1024*{volume_size}) + print(create_result['mount_path']) + """.format( + group_id=group_id, + volume_id=volume_id, + volume_size=volume_size + )), volume_prefix, namespace_prefix) + + # The dir should be created + self.mount_a.stat(os.path.join("myprefix", group_id, volume_id)) + + # Authorize and configure credentials for the guest to mount the + # the volume. + self._configure_guest_auth(self.mount_b, self.mounts[2], guest_entity, + mount_path, namespace_prefix) + self.mounts[2].mount(mount_path=mount_path) + + # The kernel client doesn't have the quota-based df behaviour, + # or quotas at all, so only exercise the client behaviour when + # running fuse. + if isinstance(self.mounts[2], FuseMount): + # df should see volume size, same as the quota set on volume's dir + self.assertEqual(self.mounts[2].df()['total'], + volume_size * 1024 * 1024) + self.assertEqual( + self.mount_a.getfattr( + os.path.join(volume_prefix.strip("/"), group_id, volume_id), + "ceph.quota.max_bytes"), + "%s" % (volume_size * 1024 * 1024)) + + # df granularity is 4MB block so have to write at least that much + data_bin_mb = 4 + self.mounts[2].write_n_mb("data.bin", data_bin_mb) + + # Write something outside volume to check this space usage is + # not reported in the volume's DF. + other_bin_mb = 8 + self.mount_a.write_n_mb("other.bin", other_bin_mb) + + # global: df should see all the writes (data + other). This is a > + # rather than a == because the global spaced used includes all pools + def check_df(): + used = self.mount_a.df()['used'] + return used >= (other_bin_mb * 1024 * 1024) + + self.wait_until_true(check_df, timeout=30) + + # Hack: do a metadata IO to kick rstats + self.mounts[2].run_shell(["touch", "foo"]) + + # volume: df should see the data_bin_mb consumed from quota, same + # as the rbytes for the volume's dir + self.wait_until_equal( + lambda: self.mounts[2].df()['used'], + data_bin_mb * 1024 * 1024, timeout=60) + self.wait_until_equal( + lambda: self.mount_a.getfattr( + os.path.join(volume_prefix.strip("/"), group_id, volume_id), + "ceph.dir.rbytes"), + "%s" % (data_bin_mb * 1024 * 1024), timeout=60) + + # sync so that file data are persist to rados + self.mounts[2].run_shell(["sync"]) + + # Our data should stay in particular rados namespace + pool_name = self.mount_a.getfattr(os.path.join("myprefix", group_id, volume_id), "ceph.dir.layout.pool") + namespace = "{0}{1}".format(namespace_prefix, volume_id) + ns_in_attr = self.mount_a.getfattr(os.path.join("myprefix", group_id, volume_id), "ceph.dir.layout.pool_namespace") + self.assertEqual(namespace, ns_in_attr) + + objects_in_ns = set(self.fs.rados(["ls"], pool=pool_name, namespace=namespace).split("\n")) + self.assertNotEqual(objects_in_ns, set()) + + # De-authorize the guest + self._volume_client_python(self.mount_b, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.deauthorize(vp, "{guest_entity}") + vc.evict("{guest_entity}") + """.format( + group_id=group_id, + volume_id=volume_id, + guest_entity=guest_entity + )), volume_prefix, namespace_prefix) + + # Once deauthorized, the client should be unable to do any more metadata ops + # The way that the client currently behaves here is to block (it acts like + # it has lost network, because there is nothing to tell it that is messages + # are being dropped because it's identity is gone) + background = self.mounts[2].write_n_mb("rogue.bin", 1, wait=False) + try: + background.wait() + except CommandFailedError: + # command failed with EBLACKLISTED? + if "transport endpoint shutdown" in background.stderr.getvalue(): + pass + else: + raise + + # After deauthorisation, the client ID should be gone (this was the only + # volume it was authorised for) + self.assertNotIn("client.{0}".format(guest_entity), [e['entity'] for e in self.auth_list()]) + + # Clean up the dead mount (ceph-fuse's behaviour here is a bit undefined) + self.mounts[2].umount_wait() + + self._volume_client_python(self.mount_b, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.delete_volume(vp) + vc.purge_volume(vp) + """.format( + group_id=group_id, + volume_id=volume_id, + )), volume_prefix, namespace_prefix) + + def test_idempotency(self): + """ + That the volumeclient interface works when calling everything twice + """ + self.mount_b.umount_wait() + self._configure_vc_auth(self.mount_b, "manila") + + guest_entity = "guest" + group_id = "grpid" + volume_id = "volid" + self._volume_client_python(self.mount_b, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.create_volume(vp, 10) + vc.create_volume(vp, 10) + vc.authorize(vp, "{guest_entity}") + vc.authorize(vp, "{guest_entity}") + vc.deauthorize(vp, "{guest_entity}") + vc.deauthorize(vp, "{guest_entity}") + vc.delete_volume(vp) + vc.delete_volume(vp) + vc.purge_volume(vp) + vc.purge_volume(vp) + + vc.create_volume(vp, 10, data_isolated=True) + vc.create_volume(vp, 10, data_isolated=True) + vc.authorize(vp, "{guest_entity}") + vc.authorize(vp, "{guest_entity}") + vc.deauthorize(vp, "{guest_entity}") + vc.deauthorize(vp, "{guest_entity}") + vc.evict("{guest_entity}") + vc.evict("{guest_entity}") + vc.delete_volume(vp, data_isolated=True) + vc.delete_volume(vp, data_isolated=True) + vc.purge_volume(vp, data_isolated=True) + vc.purge_volume(vp, data_isolated=True) + + vc.create_volume(vp, 10, namespace_isolated=False) + vc.create_volume(vp, 10, namespace_isolated=False) + vc.authorize(vp, "{guest_entity}") + vc.authorize(vp, "{guest_entity}") + vc.deauthorize(vp, "{guest_entity}") + vc.deauthorize(vp, "{guest_entity}") + vc.evict("{guest_entity}") + vc.evict("{guest_entity}") + vc.delete_volume(vp) + vc.delete_volume(vp) + vc.purge_volume(vp) + vc.purge_volume(vp) + """.format( + group_id=group_id, + volume_id=volume_id, + guest_entity=guest_entity + ))) + + def test_data_isolated(self): + """ + That data isolated shares get their own pool + :return: + """ + + # Because the teuthology config template sets mon_max_pg_per_osd to + # 10000 (i.e. it just tries to ignore health warnings), reset it to something + # sane before using volume_client, to avoid creating pools with absurdly large + # numbers of PGs. + self.set_conf("global", "mon max pg per osd", "300") + for mon_daemon_state in self.ctx.daemons.iter_daemons_of_role('mon'): + mon_daemon_state.restart() + + self.mount_b.umount_wait() + self._configure_vc_auth(self.mount_b, "manila") + + # Calculate how many PGs we'll expect the new volume pool to have + osd_map = json.loads(self.fs.mon_manager.raw_cluster_cmd('osd', 'dump', '--format=json-pretty')) + max_per_osd = int(self.fs.get_config('mon_max_pg_per_osd')) + osd_count = len(osd_map['osds']) + max_overall = osd_count * max_per_osd + + existing_pg_count = 0 + for p in osd_map['pools']: + existing_pg_count += p['pg_num'] + + expected_pg_num = (max_overall - existing_pg_count) // 10 + log.info("max_per_osd {0}".format(max_per_osd)) + log.info("osd_count {0}".format(osd_count)) + log.info("max_overall {0}".format(max_overall)) + log.info("existing_pg_count {0}".format(existing_pg_count)) + log.info("expected_pg_num {0}".format(expected_pg_num)) + + pools_a = json.loads(self.fs.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['pools'] + + group_id = "grpid" + volume_id = "volid" + self._volume_client_python(self.mount_b, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.create_volume(vp, 10, data_isolated=True) + """.format( + group_id=group_id, + volume_id=volume_id, + ))) + + pools_b = json.loads(self.fs.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['pools'] + + # Should have created one new pool + new_pools = set(p['pool_name'] for p in pools_b) - set([p['pool_name'] for p in pools_a]) + self.assertEqual(len(new_pools), 1) + + # It should have followed the heuristic for PG count + # (this is an overly strict test condition, so we may want to remove + # it at some point as/when the logic gets fancier) + created_pg_num = self.fs.mon_manager.get_pool_property(list(new_pools)[0], "pg_num") + self.assertEqual(expected_pg_num, created_pg_num) + + def test_15303(self): + """ + Reproducer for #15303 "Client holds incorrect complete flag on dir + after losing caps" (http://tracker.ceph.com/issues/15303) + """ + for m in self.mounts: + m.umount_wait() + + # Create a dir on mount A + self.mount_a.mount() + self.mount_a.run_shell(["mkdir", "parent1"]) + self.mount_a.run_shell(["mkdir", "parent2"]) + self.mount_a.run_shell(["mkdir", "parent1/mydir"]) + + # Put some files in it from mount B + self.mount_b.mount() + self.mount_b.run_shell(["touch", "parent1/mydir/afile"]) + self.mount_b.umount_wait() + + # List the dir's contents on mount A + self.assertListEqual(self.mount_a.ls("parent1/mydir"), + ["afile"]) + + def test_evict_client(self): + """ + That a volume client can be evicted based on its auth ID and the volume + path it has mounted. + """ + + if not isinstance(self.mount_a, FuseMount): + self.skipTest("Requires FUSE client to inject client metadata") + + # mounts[1] would be used as handle for driving VolumeClient. mounts[2] + # and mounts[3] would be used as guests to mount the volumes/shares. + + for i in range(1, 4): + self.mounts[i].umount_wait() + + volumeclient_mount = self.mounts[1] + self._configure_vc_auth(volumeclient_mount, "manila") + guest_mounts = (self.mounts[2], self.mounts[3]) + + guest_entity = "guest" + group_id = "grpid" + mount_paths = [] + volume_ids = [] + + # Create two volumes. Authorize 'guest' auth ID to mount the two + # volumes. Mount the two volumes. Write data to the volumes. + for i in range(2): + # Create volume. + volume_ids.append("volid_{0}".format(str(i))) + mount_paths.append( + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + create_result = vc.create_volume(vp, 10 * 1024 * 1024) + print(create_result['mount_path']) + """.format( + group_id=group_id, + volume_id=volume_ids[i] + )))) + + # Authorize 'guest' auth ID to mount the volume. + self._configure_guest_auth(volumeclient_mount, guest_mounts[i], + guest_entity, mount_paths[i]) + + # Mount the volume. + guest_mounts[i].mountpoint_dir_name = 'mnt.{id}.{suffix}'.format( + id=guest_entity, suffix=str(i)) + guest_mounts[i].mount(mount_path=mount_paths[i]) + guest_mounts[i].write_n_mb("data.bin", 1) + + + # Evict client, guest_mounts[0], using auth ID 'guest' and has mounted + # one volume. + self._volume_client_python(self.mount_b, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.deauthorize(vp, "{guest_entity}") + vc.evict("{guest_entity}", volume_path=vp) + """.format( + group_id=group_id, + volume_id=volume_ids[0], + guest_entity=guest_entity + ))) + + # Evicted guest client, guest_mounts[0], should not be able to do + # anymore metadata ops. It should start failing all operations + # when it sees that its own address is in the blacklist. + try: + guest_mounts[0].write_n_mb("rogue.bin", 1) + except CommandFailedError: + pass + else: + raise RuntimeError("post-eviction write should have failed!") + + # The blacklisted guest client should now be unmountable + guest_mounts[0].umount_wait() + + # Guest client, guest_mounts[1], using the same auth ID 'guest', but + # has mounted the other volume, should be able to use its volume + # unaffected. + guest_mounts[1].write_n_mb("data.bin.1", 1) + + # Cleanup. + for i in range(2): + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.deauthorize(vp, "{guest_entity}") + vc.delete_volume(vp) + vc.purge_volume(vp) + """.format( + group_id=group_id, + volume_id=volume_ids[i], + guest_entity=guest_entity + ))) + + + def test_purge(self): + """ + Reproducer for #15266, exception trying to purge volumes that + contain non-ascii filenames. + + Additionally test any other purge corner cases here. + """ + # I'm going to leave mount_b unmounted and just use it as a handle for + # driving volumeclient. It's a little hacky but we don't have a more + # general concept for librados/libcephfs clients as opposed to full + # blown mounting clients. + self.mount_b.umount_wait() + self._configure_vc_auth(self.mount_b, "manila") + + group_id = "grpid" + # Use a unicode volume ID (like Manila), to reproduce #15266 + volume_id = u"volid" + + # Create + mount_path = self._volume_client_python(self.mount_b, dedent(""" + vp = VolumePath("{group_id}", u"{volume_id}") + create_result = vc.create_volume(vp, 10) + print(create_result['mount_path']) + """.format( + group_id=group_id, + volume_id=volume_id + ))) + + # Strip leading "/" + mount_path = mount_path[1:] + + # A file with non-ascii characters + self.mount_a.run_shell(["touch", os.path.join(mount_path, u"b\u00F6b")]) + + # A file with no permissions to do anything + self.mount_a.run_shell(["touch", os.path.join(mount_path, "noperms")]) + self.mount_a.run_shell(["chmod", "0000", os.path.join(mount_path, "noperms")]) + + self._volume_client_python(self.mount_b, dedent(""" + vp = VolumePath("{group_id}", u"{volume_id}") + vc.delete_volume(vp) + vc.purge_volume(vp) + """.format( + group_id=group_id, + volume_id=volume_id + ))) + + # Check it's really gone + self.assertEqual(self.mount_a.ls("volumes/_deleting"), []) + self.assertEqual(self.mount_a.ls("volumes/"), ["_deleting", group_id]) + + def test_readonly_authorization(self): + """ + That guest clients can be restricted to read-only mounts of volumes. + """ + + volumeclient_mount = self.mounts[1] + guest_mount = self.mounts[2] + volumeclient_mount.umount_wait() + guest_mount.umount_wait() + + # Configure volumeclient_mount as the handle for driving volumeclient. + self._configure_vc_auth(volumeclient_mount, "manila") + + guest_entity = "guest" + group_id = "grpid" + volume_id = "volid" + + # Create a volume. + mount_path = self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + create_result = vc.create_volume(vp, 1024*1024*10) + print(create_result['mount_path']) + """.format( + group_id=group_id, + volume_id=volume_id, + ))) + + # Authorize and configure credentials for the guest to mount the + # the volume with read-write access. + self._configure_guest_auth(volumeclient_mount, guest_mount, guest_entity, + mount_path, readonly=False) + + # Mount the volume, and write to it. + guest_mount.mount(mount_path=mount_path) + guest_mount.write_n_mb("data.bin", 1) + + # Change the guest auth ID's authorization to read-only mount access. + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.deauthorize(vp, "{guest_entity}") + """.format( + group_id=group_id, + volume_id=volume_id, + guest_entity=guest_entity + ))) + self._configure_guest_auth(volumeclient_mount, guest_mount, guest_entity, + mount_path, readonly=True) + + # The effect of the change in access level to read-only is not + # immediate. The guest sees the change only after a remount of + # the volume. + guest_mount.umount_wait() + guest_mount.mount(mount_path=mount_path) + + # Read existing content of the volume. + self.assertListEqual(guest_mount.ls(guest_mount.mountpoint), ["data.bin"]) + # Cannot write into read-only volume. + try: + guest_mount.write_n_mb("rogue.bin", 1) + except CommandFailedError: + pass + + def test_get_authorized_ids(self): + """ + That for a volume, the authorized IDs and their access levels + can be obtained using CephFSVolumeClient's get_authorized_ids(). + """ + volumeclient_mount = self.mounts[1] + volumeclient_mount.umount_wait() + + # Configure volumeclient_mount as the handle for driving volumeclient. + self._configure_vc_auth(volumeclient_mount, "manila") + + group_id = "grpid" + volume_id = "volid" + guest_entity_1 = "guest1" + guest_entity_2 = "guest2" + + log.info("print(group ID: {0})".format(group_id)) + + # Create a volume. + auths = self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.create_volume(vp, 1024*1024*10) + auths = vc.get_authorized_ids(vp) + print(auths) + """.format( + group_id=group_id, + volume_id=volume_id, + ))) + # Check the list of authorized IDs for the volume. + self.assertEqual('None', auths) + + # Allow two auth IDs access to the volume. + auths = self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.authorize(vp, "{guest_entity_1}", readonly=False) + vc.authorize(vp, "{guest_entity_2}", readonly=True) + auths = vc.get_authorized_ids(vp) + print(auths) + """.format( + group_id=group_id, + volume_id=volume_id, + guest_entity_1=guest_entity_1, + guest_entity_2=guest_entity_2, + ))) + # Check the list of authorized IDs and their access levels. + expected_result = [('guest1', 'rw'), ('guest2', 'r')] + self.assertCountEqual(str(expected_result), auths) + + # Disallow both the auth IDs' access to the volume. + auths = self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.deauthorize(vp, "{guest_entity_1}") + vc.deauthorize(vp, "{guest_entity_2}") + auths = vc.get_authorized_ids(vp) + print(auths) + """.format( + group_id=group_id, + volume_id=volume_id, + guest_entity_1=guest_entity_1, + guest_entity_2=guest_entity_2, + ))) + # Check the list of authorized IDs for the volume. + self.assertEqual('None', auths) + + def test_multitenant_volumes(self): + """ + That volume access can be restricted to a tenant. + + That metadata used to enforce tenant isolation of + volumes is stored as a two-way mapping between auth + IDs and volumes that they're authorized to access. + """ + volumeclient_mount = self.mounts[1] + volumeclient_mount.umount_wait() + + # Configure volumeclient_mount as the handle for driving volumeclient. + self._configure_vc_auth(volumeclient_mount, "manila") + + group_id = "groupid" + volume_id = "volumeid" + + # Guest clients belonging to different tenants, but using the same + # auth ID. + auth_id = "guest" + guestclient_1 = { + "auth_id": auth_id, + "tenant_id": "tenant1", + } + guestclient_2 = { + "auth_id": auth_id, + "tenant_id": "tenant2", + } + + # Create a volume. + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.create_volume(vp, 1024*1024*10) + """.format( + group_id=group_id, + volume_id=volume_id, + ))) + + # Check that volume metadata file is created on volume creation. + vol_metadata_filename = "_{0}:{1}.meta".format(group_id, volume_id) + self.assertIn(vol_metadata_filename, self.mounts[0].ls("volumes")) + + # Authorize 'guestclient_1', using auth ID 'guest' and belonging to + # 'tenant1', with 'rw' access to the volume. + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}") + """.format( + group_id=group_id, + volume_id=volume_id, + auth_id=guestclient_1["auth_id"], + tenant_id=guestclient_1["tenant_id"] + ))) + + # Check that auth metadata file for auth ID 'guest', is + # created on authorizing 'guest' access to the volume. + auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"]) + self.assertIn(auth_metadata_filename, self.mounts[0].ls("volumes")) + + # Verify that the auth metadata file stores the tenant ID that the + # auth ID belongs to, the auth ID's authorized access levels + # for different volumes, versioning details, etc. + expected_auth_metadata = { + "version": 2, + "compat_version": 6, + "dirty": False, + "tenant_id": "tenant1", + "subvolumes": { + "groupid/volumeid": { + "dirty": False, + "access_level": "rw" + } + } + } + + auth_metadata = self._volume_client_python(volumeclient_mount, dedent(""" + import json + vp = VolumePath("{group_id}", "{volume_id}") + auth_metadata = vc._auth_metadata_get("{auth_id}") + print(json.dumps(auth_metadata)) + """.format( + group_id=group_id, + volume_id=volume_id, + auth_id=guestclient_1["auth_id"], + ))) + auth_metadata = json.loads(auth_metadata) + + self.assertGreaterEqual(auth_metadata["version"], expected_auth_metadata["version"]) + del expected_auth_metadata["version"] + del auth_metadata["version"] + self.assertEqual(expected_auth_metadata, auth_metadata) + + # Verify that the volume metadata file stores info about auth IDs + # and their access levels to the volume, versioning details, etc. + expected_vol_metadata = { + "version": 2, + "compat_version": 1, + "auths": { + "guest": { + "dirty": False, + "access_level": "rw" + } + } + } + + vol_metadata = self._volume_client_python(volumeclient_mount, dedent(""" + import json + vp = VolumePath("{group_id}", "{volume_id}") + volume_metadata = vc._volume_metadata_get(vp) + print(json.dumps(volume_metadata)) + """.format( + group_id=group_id, + volume_id=volume_id, + ))) + vol_metadata = json.loads(vol_metadata) + + self.assertGreaterEqual(vol_metadata["version"], expected_vol_metadata["version"]) + del expected_vol_metadata["version"] + del vol_metadata["version"] + self.assertEqual(expected_vol_metadata, vol_metadata) + + # Cannot authorize 'guestclient_2' to access the volume. + # It uses auth ID 'guest', which has already been used by a + # 'guestclient_1' belonging to an another tenant for accessing + # the volume. + with self.assertRaises(CommandFailedError): + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}") + """.format( + group_id=group_id, + volume_id=volume_id, + auth_id=guestclient_2["auth_id"], + tenant_id=guestclient_2["tenant_id"] + ))) + + # Check that auth metadata file is cleaned up on removing + # auth ID's only access to a volume. + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.deauthorize(vp, "{guest_entity}") + """.format( + group_id=group_id, + volume_id=volume_id, + guest_entity=guestclient_1["auth_id"] + ))) + + self.assertNotIn(auth_metadata_filename, self.mounts[0].ls("volumes")) + + # Check that volume metadata file is cleaned up on volume deletion. + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.delete_volume(vp) + """.format( + group_id=group_id, + volume_id=volume_id, + ))) + self.assertNotIn(vol_metadata_filename, self.mounts[0].ls("volumes")) + + def test_authorize_auth_id_not_created_by_ceph_volume_client(self): + """ + If the auth_id already exists and is not created by + ceph_volume_client, it's not allowed to authorize + the auth-id by default. + """ + volumeclient_mount = self.mounts[1] + volumeclient_mount.umount_wait() + + # Configure volumeclient_mount as the handle for driving volumeclient. + self._configure_vc_auth(volumeclient_mount, "manila") + + group_id = "groupid" + volume_id = "volumeid" + + # Create auth_id + self.fs.mon_manager.raw_cluster_cmd( + "auth", "get-or-create", "client.guest1", + "mds", "allow *", + "osd", "allow rw", + "mon", "allow *" + ) + + auth_id = "guest1" + guestclient_1 = { + "auth_id": auth_id, + "tenant_id": "tenant1", + } + + # Create a volume. + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.create_volume(vp, 1024*1024*10) + """.format( + group_id=group_id, + volume_id=volume_id, + ))) + + # Cannot authorize 'guestclient_1' to access the volume. + # It uses auth ID 'guest1', which already exists and not + # created by ceph_volume_client + with self.assertRaises(CommandFailedError): + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}") + """.format( + group_id=group_id, + volume_id=volume_id, + auth_id=guestclient_1["auth_id"], + tenant_id=guestclient_1["tenant_id"] + ))) + + # Delete volume + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.delete_volume(vp) + """.format( + group_id=group_id, + volume_id=volume_id, + ))) + + def test_authorize_allow_existing_id_option(self): + """ + If the auth_id already exists and is not created by + ceph_volume_client, it's not allowed to authorize + the auth-id by default but is allowed with option + allow_existing_id. + """ + volumeclient_mount = self.mounts[1] + volumeclient_mount.umount_wait() + + # Configure volumeclient_mount as the handle for driving volumeclient. + self._configure_vc_auth(volumeclient_mount, "manila") + + group_id = "groupid" + volume_id = "volumeid" + + # Create auth_id + self.fs.mon_manager.raw_cluster_cmd( + "auth", "get-or-create", "client.guest1", + "mds", "allow *", + "osd", "allow rw", + "mon", "allow *" + ) + + auth_id = "guest1" + guestclient_1 = { + "auth_id": auth_id, + "tenant_id": "tenant1", + } + + # Create a volume. + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.create_volume(vp, 1024*1024*10) + """.format( + group_id=group_id, + volume_id=volume_id, + ))) + + # Cannot authorize 'guestclient_1' to access the volume + # by default, which already exists and not created by + # ceph_volume_client but is allowed with option 'allow_existing_id'. + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}", + allow_existing_id="{allow_existing_id}") + """.format( + group_id=group_id, + volume_id=volume_id, + auth_id=guestclient_1["auth_id"], + tenant_id=guestclient_1["tenant_id"], + allow_existing_id=True + ))) + + # Delete volume + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.delete_volume(vp) + """.format( + group_id=group_id, + volume_id=volume_id, + ))) + + def test_deauthorize_auth_id_after_out_of_band_update(self): + """ + If the auth_id authorized by ceph_volume_client is updated + out of band, the auth_id should not be deleted after a + deauthorize. It should only remove caps associated it. + """ + volumeclient_mount = self.mounts[1] + volumeclient_mount.umount_wait() + + # Configure volumeclient_mount as the handle for driving volumeclient. + self._configure_vc_auth(volumeclient_mount, "manila") + + group_id = "groupid" + volume_id = "volumeid" + + + auth_id = "guest1" + guestclient_1 = { + "auth_id": auth_id, + "tenant_id": "tenant1", + } + + # Create a volume. + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.create_volume(vp, 1024*1024*10) + """.format( + group_id=group_id, + volume_id=volume_id, + ))) + + # Authorize 'guestclient_1' to access the volume. + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}") + """.format( + group_id=group_id, + volume_id=volume_id, + auth_id=guestclient_1["auth_id"], + tenant_id=guestclient_1["tenant_id"] + ))) + + # Update caps for guestclient_1 out of band + out = self.fs.mon_manager.raw_cluster_cmd( + "auth", "caps", "client.guest1", + "mds", "allow rw path=/volumes/groupid, allow rw path=/volumes/groupid/volumeid", + "osd", "allow rw pool=cephfs_data namespace=fsvolumens_volumeid", + "mon", "allow r", + "mgr", "allow *" + ) + + # Deauthorize guestclient_1 + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.deauthorize(vp, "{guest_entity}") + """.format( + group_id=group_id, + volume_id=volume_id, + guest_entity=guestclient_1["auth_id"] + ))) + + # Validate the caps of guestclient_1 after deauthorize. It should not have deleted + # guestclient_1. The mgr and mds caps should be present which was updated out of band. + out = json.loads(self.fs.mon_manager.raw_cluster_cmd("auth", "get", "client.guest1", "--format=json-pretty")) + + self.assertEqual("client.guest1", out[0]["entity"]) + self.assertEqual("allow rw path=/volumes/groupid", out[0]["caps"]["mds"]) + self.assertEqual("allow *", out[0]["caps"]["mgr"]) + self.assertNotIn("osd", out[0]["caps"]) + + # Delete volume + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.delete_volume(vp) + """.format( + group_id=group_id, + volume_id=volume_id, + ))) + + def test_recover_metadata(self): + """ + That volume client can recover from partial auth updates using + metadata files, which store auth info and its update status info. + """ + volumeclient_mount = self.mounts[1] + volumeclient_mount.umount_wait() + + # Configure volumeclient_mount as the handle for driving volumeclient. + self._configure_vc_auth(volumeclient_mount, "manila") + + group_id = "groupid" + volume_id = "volumeid" + + guestclient = { + "auth_id": "guest", + "tenant_id": "tenant", + } + + # Create a volume. + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.create_volume(vp, 1024*1024*10) + """.format( + group_id=group_id, + volume_id=volume_id, + ))) + + # Authorize 'guestclient' access to the volume. + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}") + """.format( + group_id=group_id, + volume_id=volume_id, + auth_id=guestclient["auth_id"], + tenant_id=guestclient["tenant_id"] + ))) + + # Check that auth metadata file for auth ID 'guest' is created. + auth_metadata_filename = "${0}.meta".format(guestclient["auth_id"]) + self.assertIn(auth_metadata_filename, self.mounts[0].ls("volumes")) + + # Induce partial auth update state by modifying the auth metadata file, + # and then run recovery procedure. + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + auth_metadata = vc._auth_metadata_get("{auth_id}") + auth_metadata['dirty'] = True + vc._auth_metadata_set("{auth_id}", auth_metadata) + vc.recover() + """.format( + group_id=group_id, + volume_id=volume_id, + auth_id=guestclient["auth_id"], + ))) + + def test_update_old_style_auth_metadata_to_new_during_recover(self): + """ + From nautilus onwards 'volumes' created by ceph_volume_client were + renamed and used as CephFS subvolumes accessed via the ceph-mgr + interface. Hence it makes sense to store the subvolume data in + auth-metadata file with 'subvolumes' key instead of 'volumes' key. + This test validates the transparent update of 'volumes' key to + 'subvolumes' key in auth metadata file during recover. + """ + volumeclient_mount = self.mounts[1] + volumeclient_mount.umount_wait() + + # Configure volumeclient_mount as the handle for driving volumeclient. + self._configure_vc_auth(volumeclient_mount, "manila") + + group_id = "groupid" + volume_id = "volumeid" + + guestclient = { + "auth_id": "guest", + "tenant_id": "tenant", + } + + # Create a volume. + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.create_volume(vp, 1024*1024*10) + """.format( + group_id=group_id, + volume_id=volume_id, + ))) + + # Check that volume metadata file is created on volume creation. + vol_metadata_filename = "_{0}:{1}.meta".format(group_id, volume_id) + self.assertIn(vol_metadata_filename, self.mounts[0].ls("volumes")) + + # Authorize 'guestclient' access to the volume. + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}") + """.format( + group_id=group_id, + volume_id=volume_id, + auth_id=guestclient["auth_id"], + tenant_id=guestclient["tenant_id"] + ))) + + # Check that auth metadata file for auth ID 'guest' is created. + auth_metadata_filename = "${0}.meta".format(guestclient["auth_id"]) + self.assertIn(auth_metadata_filename, self.mounts[0].ls("volumes")) + + # Replace 'subvolumes' to 'volumes', old style auth-metadata file + self.mounts[0].run_shell(['sed', '-i', 's/subvolumes/volumes/g', 'volumes/{0}'.format(auth_metadata_filename)]) + + # Verify that the auth metadata file stores the tenant ID that the + # auth ID belongs to, the auth ID's authorized access levels + # for different volumes, versioning details, etc. + expected_auth_metadata = { + "version": 2, + "compat_version": 6, + "dirty": False, + "tenant_id": "tenant", + "subvolumes": { + "groupid/volumeid": { + "dirty": False, + "access_level": "rw" + } + } + } + + # Induce partial auth update state by modifying the auth metadata file, + # and then run recovery procedure. This should also update 'volumes' key + # to 'subvolumes'. + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + auth_metadata = vc._auth_metadata_get("{auth_id}") + auth_metadata['dirty'] = True + vc._auth_metadata_set("{auth_id}", auth_metadata) + vc.recover() + """.format( + group_id=group_id, + volume_id=volume_id, + auth_id=guestclient["auth_id"], + ))) + + auth_metadata = self._volume_client_python(volumeclient_mount, dedent(""" + import json + auth_metadata = vc._auth_metadata_get("{auth_id}") + print(json.dumps(auth_metadata)) + """.format( + auth_id=guestclient["auth_id"], + ))) + auth_metadata = json.loads(auth_metadata) + + self.assertGreaterEqual(auth_metadata["version"], expected_auth_metadata["version"]) + del expected_auth_metadata["version"] + del auth_metadata["version"] + self.assertEqual(expected_auth_metadata, auth_metadata) + + # Check that auth metadata file is cleaned up on removing + # auth ID's access to volumes 'volumeid'. + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.deauthorize(vp, "{guest_entity}") + """.format( + group_id=group_id, + volume_id=volume_id, + guest_entity=guestclient["auth_id"] + ))) + self.assertNotIn(auth_metadata_filename, self.mounts[0].ls("volumes")) + + # Check that volume metadata file is cleaned up on volume deletion. + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.delete_volume(vp) + """.format( + group_id=group_id, + volume_id=volume_id, + ))) + self.assertNotIn(vol_metadata_filename, self.mounts[0].ls("volumes")) + + def test_update_old_style_auth_metadata_to_new_during_authorize(self): + """ + From nautilus onwards 'volumes' created by ceph_volume_client were + renamed and used as CephFS subvolumes accessed via the ceph-mgr + interface. Hence it makes sense to store the subvolume data in + auth-metadata file with 'subvolumes' key instead of 'volumes' key. + This test validates the transparent update of 'volumes' key to + 'subvolumes' key in auth metadata file during authorize. + """ + volumeclient_mount = self.mounts[1] + volumeclient_mount.umount_wait() + + # Configure volumeclient_mount as the handle for driving volumeclient. + self._configure_vc_auth(volumeclient_mount, "manila") + + group_id = "groupid" + volume_id1 = "volumeid1" + volume_id2 = "volumeid2" + + auth_id = "guest" + guestclient_1 = { + "auth_id": auth_id, + "tenant_id": "tenant1", + } + + # Create a volume volumeid1. + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + create_result = vc.create_volume(vp, 10*1024*1024) + print(create_result['mount_path']) + """.format( + group_id=group_id, + volume_id=volume_id1, + ))) + + # Create a volume volumeid2. + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + create_result = vc.create_volume(vp, 10*1024*1024) + print(create_result['mount_path']) + """.format( + group_id=group_id, + volume_id=volume_id2, + ))) + + # Check that volume metadata file is created on volume creation. + vol_metadata_filename = "_{0}:{1}.meta".format(group_id, volume_id1) + self.assertIn(vol_metadata_filename, self.mounts[0].ls("volumes")) + vol_metadata_filename2 = "_{0}:{1}.meta".format(group_id, volume_id2) + self.assertIn(vol_metadata_filename2, self.mounts[0].ls("volumes")) + + # Authorize 'guestclient_1', using auth ID 'guest' and belonging to + # 'tenant1', with 'rw' access to the volume 'volumeid1'. + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}") + """.format( + group_id=group_id, + volume_id=volume_id1, + auth_id=guestclient_1["auth_id"], + tenant_id=guestclient_1["tenant_id"] + ))) + + # Check that auth metadata file for auth ID 'guest', is + # created on authorizing 'guest' access to the volume. + auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"]) + self.assertIn(auth_metadata_filename, self.mounts[0].ls("volumes")) + + # Replace 'subvolumes' to 'volumes', old style auth-metadata file + self.mounts[0].run_shell(['sed', '-i', 's/subvolumes/volumes/g', 'volumes/{0}'.format(auth_metadata_filename)]) + + # Authorize 'guestclient_1', using auth ID 'guest' and belonging to + # 'tenant1', with 'rw' access to the volume 'volumeid2'. + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}") + """.format( + group_id=group_id, + volume_id=volume_id2, + auth_id=guestclient_1["auth_id"], + tenant_id=guestclient_1["tenant_id"] + ))) + + # Verify that the auth metadata file stores the tenant ID that the + # auth ID belongs to, the auth ID's authorized access levels + # for different volumes, versioning details, etc. + expected_auth_metadata = { + "version": 2, + "compat_version": 6, + "dirty": False, + "tenant_id": "tenant1", + "subvolumes": { + "groupid/volumeid1": { + "dirty": False, + "access_level": "rw" + }, + "groupid/volumeid2": { + "dirty": False, + "access_level": "rw" + } + } + } + + auth_metadata = self._volume_client_python(volumeclient_mount, dedent(""" + import json + auth_metadata = vc._auth_metadata_get("{auth_id}") + print(json.dumps(auth_metadata)) + """.format( + auth_id=guestclient_1["auth_id"], + ))) + auth_metadata = json.loads(auth_metadata) + + self.assertGreaterEqual(auth_metadata["version"], expected_auth_metadata["version"]) + del expected_auth_metadata["version"] + del auth_metadata["version"] + self.assertEqual(expected_auth_metadata, auth_metadata) + + # Check that auth metadata file is cleaned up on removing + # auth ID's access to volumes 'volumeid1' and 'volumeid2'. + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.deauthorize(vp, "{guest_entity}") + """.format( + group_id=group_id, + volume_id=volume_id1, + guest_entity=guestclient_1["auth_id"] + ))) + + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.deauthorize(vp, "{guest_entity}") + """.format( + group_id=group_id, + volume_id=volume_id2, + guest_entity=guestclient_1["auth_id"] + ))) + self.assertNotIn(auth_metadata_filename, self.mounts[0].ls("volumes")) + + # Check that volume metadata file is cleaned up on volume deletion. + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.delete_volume(vp) + """.format( + group_id=group_id, + volume_id=volume_id1, + ))) + self.assertNotIn(vol_metadata_filename, self.mounts[0].ls("volumes")) + + # Check that volume metadata file is cleaned up on volume deletion. + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.delete_volume(vp) + """.format( + group_id=group_id, + volume_id=volume_id2, + ))) + self.assertNotIn(vol_metadata_filename2, self.mounts[0].ls("volumes")) + + def test_update_old_style_auth_metadata_to_new_during_deauthorize(self): + """ + From nautilus onwards 'volumes' created by ceph_volume_client were + renamed and used as CephFS subvolumes accessed via the ceph-mgr + interface. Hence it makes sense to store the subvolume data in + auth-metadata file with 'subvolumes' key instead of 'volumes' key. + This test validates the transparent update of 'volumes' key to + 'subvolumes' key in auth metadata file during de-authorize. + """ + volumeclient_mount = self.mounts[1] + volumeclient_mount.umount_wait() + + # Configure volumeclient_mount as the handle for driving volumeclient. + self._configure_vc_auth(volumeclient_mount, "manila") + + group_id = "groupid" + volume_id1 = "volumeid1" + volume_id2 = "volumeid2" + + auth_id = "guest" + guestclient_1 = { + "auth_id": auth_id, + "tenant_id": "tenant1", + } + + # Create a volume volumeid1. + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + create_result = vc.create_volume(vp, 10*1024*1024) + print(create_result['mount_path']) + """.format( + group_id=group_id, + volume_id=volume_id1, + ))) + + # Create a volume volumeid2. + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + create_result = vc.create_volume(vp, 10*1024*1024) + print(create_result['mount_path']) + """.format( + group_id=group_id, + volume_id=volume_id2, + ))) + + # Check that volume metadata file is created on volume creation. + vol_metadata_filename = "_{0}:{1}.meta".format(group_id, volume_id1) + self.assertIn(vol_metadata_filename, self.mounts[0].ls("volumes")) + vol_metadata_filename2 = "_{0}:{1}.meta".format(group_id, volume_id2) + self.assertIn(vol_metadata_filename2, self.mounts[0].ls("volumes")) + + # Authorize 'guestclient_1', using auth ID 'guest' and belonging to + # 'tenant1', with 'rw' access to the volume 'volumeid1'. + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}") + """.format( + group_id=group_id, + volume_id=volume_id1, + auth_id=guestclient_1["auth_id"], + tenant_id=guestclient_1["tenant_id"] + ))) + + # Authorize 'guestclient_1', using auth ID 'guest' and belonging to + # 'tenant1', with 'rw' access to the volume 'volumeid2'. + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.authorize(vp, "{auth_id}", tenant_id="{tenant_id}") + """.format( + group_id=group_id, + volume_id=volume_id2, + auth_id=guestclient_1["auth_id"], + tenant_id=guestclient_1["tenant_id"] + ))) + + # Check that auth metadata file for auth ID 'guest', is + # created on authorizing 'guest' access to the volume. + auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"]) + self.assertIn(auth_metadata_filename, self.mounts[0].ls("volumes")) + + # Replace 'subvolumes' to 'volumes', old style auth-metadata file + self.mounts[0].run_shell(['sed', '-i', 's/subvolumes/volumes/g', 'volumes/{0}'.format(auth_metadata_filename)]) + + # Deauthorize 'guestclient_1' to access 'volumeid2'. This should update + # 'volumes' key to 'subvolumes' + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.deauthorize(vp, "{guest_entity}") + """.format( + group_id=group_id, + volume_id=volume_id2, + guest_entity=guestclient_1["auth_id"], + ))) + + # Verify that the auth metadata file stores the tenant ID that the + # auth ID belongs to, the auth ID's authorized access levels + # for different volumes, versioning details, etc. + expected_auth_metadata = { + "version": 2, + "compat_version": 6, + "dirty": False, + "tenant_id": "tenant1", + "subvolumes": { + "groupid/volumeid1": { + "dirty": False, + "access_level": "rw" + } + } + } + + auth_metadata = self._volume_client_python(volumeclient_mount, dedent(""" + import json + auth_metadata = vc._auth_metadata_get("{auth_id}") + print(json.dumps(auth_metadata)) + """.format( + auth_id=guestclient_1["auth_id"], + ))) + auth_metadata = json.loads(auth_metadata) + + self.assertGreaterEqual(auth_metadata["version"], expected_auth_metadata["version"]) + del expected_auth_metadata["version"] + del auth_metadata["version"] + self.assertEqual(expected_auth_metadata, auth_metadata) + + # Check that auth metadata file is cleaned up on removing + # auth ID's access to volumes 'volumeid1' and 'volumeid2' + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.deauthorize(vp, "{guest_entity}") + """.format( + group_id=group_id, + volume_id=volume_id1, + guest_entity=guestclient_1["auth_id"] + ))) + self.assertNotIn(auth_metadata_filename, self.mounts[0].ls("volumes")) + + # Check that volume metadata file is cleaned up on 'volumeid1' deletion. + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.delete_volume(vp) + """.format( + group_id=group_id, + volume_id=volume_id1, + ))) + self.assertNotIn(vol_metadata_filename, self.mounts[0].ls("volumes")) + + # Check that volume metadata file is cleaned up on 'volumeid2' deletion. + self._volume_client_python(volumeclient_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.delete_volume(vp) + """.format( + group_id=group_id, + volume_id=volume_id2, + ))) + self.assertNotIn(vol_metadata_filename2, self.mounts[0].ls("volumes")) + + def test_put_object(self): + vc_mount = self.mounts[1] + vc_mount.umount_wait() + self._configure_vc_auth(vc_mount, "manila") + + obj_data = 'test data' + obj_name = 'test_vc_obj_1' + pool_name = self.fs.get_data_pool_names()[0] + + self._volume_client_python(vc_mount, dedent(""" + vc.put_object("{pool_name}", "{obj_name}", b"{obj_data}") + """.format( + pool_name = pool_name, + obj_name = obj_name, + obj_data = obj_data + ))) + + read_data = self.fs.rados(['get', obj_name, '-'], pool=pool_name) + self.assertEqual(obj_data, read_data) + + def test_get_object(self): + vc_mount = self.mounts[1] + vc_mount.umount_wait() + self._configure_vc_auth(vc_mount, "manila") + + obj_data = 'test_data' + obj_name = 'test_vc_ob_2' + pool_name = self.fs.get_data_pool_names()[0] + + self.fs.rados(['put', obj_name, '-'], pool=pool_name, stdin_data=obj_data) + + self._volume_client_python(vc_mount, dedent(""" + data_read = vc.get_object("{pool_name}", "{obj_name}") + assert data_read == b"{obj_data}" + """.format( + pool_name = pool_name, + obj_name = obj_name, + obj_data = obj_data + ))) + + def test_put_object_versioned(self): + vc_mount = self.mounts[1] + vc_mount.umount_wait() + self._configure_vc_auth(vc_mount, "manila") + + obj_data = 'test_data' + obj_name = 'test_vc_obj' + pool_name = self.fs.get_data_pool_names()[0] + self.fs.rados(['put', obj_name, '-'], pool=pool_name, stdin_data=obj_data) + + self._volume_client_python(vc_mount, dedent(""" + data, version_before = vc.get_object_and_version("{pool_name}", "{obj_name}") + + if sys_version_info.major < 3: + data = data + 'modification1' + elif sys_version_info.major > 3: + data = str.encode(data.decode() + 'modification1') + + vc.put_object_versioned("{pool_name}", "{obj_name}", data, version_before) + data, version_after = vc.get_object_and_version("{pool_name}", "{obj_name}") + assert version_after == version_before + 1 + """).format(pool_name=pool_name, obj_name=obj_name)) + + def test_version_check_for_put_object_versioned(self): + vc_mount = self.mounts[1] + vc_mount.umount_wait() + self._configure_vc_auth(vc_mount, "manila") + + obj_data = 'test_data' + obj_name = 'test_vc_ob_2' + pool_name = self.fs.get_data_pool_names()[0] + self.fs.rados(['put', obj_name, '-'], pool=pool_name, stdin_data=obj_data) + + # Test if put_object_versioned() crosschecks the version of the + # given object. Being a negative test, an exception is expected. + expected_exception = 'rados_OSError' + output = self._volume_client_python(vc_mount, dedent(""" + data, version = vc.get_object_and_version("{pool_name}", "{obj_name}") + + if sys_version_info.major < 3: + data = data + 'm1' + elif sys_version_info.major > 3: + data = str.encode(data.decode('utf-8') + 'm1') + + vc.put_object("{pool_name}", "{obj_name}", data) + + if sys_version_info.major < 3: + data = data + 'm2' + elif sys_version_info.major > 3: + data = str.encode(data.decode('utf-8') + 'm2') + + try: + vc.put_object_versioned("{pool_name}", "{obj_name}", data, version) + except {expected_exception}: + print('{expected_exception} raised') + """).format(pool_name=pool_name, obj_name=obj_name, + expected_exception=expected_exception)) + self.assertEqual(expected_exception + ' raised', output) + + + def test_delete_object(self): + vc_mount = self.mounts[1] + vc_mount.umount_wait() + self._configure_vc_auth(vc_mount, "manila") + + obj_data = 'test data' + obj_name = 'test_vc_obj_3' + pool_name = self.fs.get_data_pool_names()[0] + + self.fs.rados(['put', obj_name, '-'], pool=pool_name, stdin_data=obj_data) + + self._volume_client_python(vc_mount, dedent(""" + data_read = vc.delete_object("{pool_name}", "{obj_name}") + """.format( + pool_name = pool_name, + obj_name = obj_name, + ))) + + with self.assertRaises(CommandFailedError): + self.fs.rados(['stat', obj_name], pool=pool_name) + + # Check idempotency -- no error raised trying to delete non-existent + # object + self._volume_client_python(vc_mount, dedent(""" + data_read = vc.delete_object("{pool_name}", "{obj_name}") + """.format( + pool_name = pool_name, + obj_name = obj_name, + ))) + + def test_21501(self): + """ + Reproducer for #21501 "ceph_volume_client: sets invalid caps for + existing IDs with no caps" (http://tracker.ceph.com/issues/21501) + """ + + vc_mount = self.mounts[1] + vc_mount.umount_wait() + + # Configure vc_mount as the handle for driving volumeclient + self._configure_vc_auth(vc_mount, "manila") + + # Create a volume + group_id = "grpid" + volume_id = "volid" + mount_path = self._volume_client_python(vc_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + create_result = vc.create_volume(vp, 1024*1024*10) + print(create_result['mount_path']) + """.format( + group_id=group_id, + volume_id=volume_id + ))) + + # Create an auth ID with no caps + guest_id = '21501' + self.fs.mon_manager.raw_cluster_cmd_result( + 'auth', 'get-or-create', 'client.{0}'.format(guest_id)) + + guest_mount = self.mounts[2] + guest_mount.umount_wait() + + # Set auth caps for the auth ID using the volumeclient + self._configure_guest_auth(vc_mount, guest_mount, guest_id, mount_path, allow_existing_id=True) + + # Mount the volume in the guest using the auth ID to assert that the + # auth caps are valid + guest_mount.mount(mount_path=mount_path) + + def test_volume_without_namespace_isolation(self): + """ + That volume client can create volumes that do not have separate RADOS + namespace layouts. + """ + vc_mount = self.mounts[1] + vc_mount.umount_wait() + + # Configure vc_mount as the handle for driving volumeclient + self._configure_vc_auth(vc_mount, "manila") + + # Create a volume + volume_prefix = "/myprefix" + group_id = "grpid" + volume_id = "volid" + self._volume_client_python(vc_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + create_result = vc.create_volume(vp, 1024*1024*10, namespace_isolated=False) + print(create_result['mount_path']) + """.format( + group_id=group_id, + volume_id=volume_id + )), volume_prefix) + + # The CephFS volume should be created + self.mounts[0].stat(os.path.join("myprefix", group_id, volume_id)) + vol_namespace = self.mounts[0].getfattr( + os.path.join("myprefix", group_id, volume_id), + "ceph.dir.layout.pool_namespace") + assert not vol_namespace + + self._volume_client_python(vc_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.delete_volume(vp) + vc.purge_volume(vp) + """.format( + group_id=group_id, + volume_id=volume_id, + )), volume_prefix) diff --git a/qa/tasks/cephfs/test_volumes.py b/qa/tasks/cephfs/test_volumes.py new file mode 100644 index 00000000..67f138f8 --- /dev/null +++ b/qa/tasks/cephfs/test_volumes.py @@ -0,0 +1,4435 @@ +import os +import json +import time +import errno +import random +import logging +import collections +import uuid +import unittest +from hashlib import md5 +from textwrap import dedent + +from tasks.cephfs.cephfs_test_case import CephFSTestCase +from teuthology.exceptions import CommandFailedError +from teuthology.misc import sudo_write_file + +log = logging.getLogger(__name__) + +class TestVolumes(CephFSTestCase): + TEST_VOLUME_PREFIX = "volume" + TEST_SUBVOLUME_PREFIX="subvolume" + TEST_GROUP_PREFIX="group" + TEST_SNAPSHOT_PREFIX="snapshot" + TEST_CLONE_PREFIX="clone" + TEST_FILE_NAME_PREFIX="subvolume_file" + + # for filling subvolume with data + CLIENTS_REQUIRED = 2 + + # io defaults + DEFAULT_FILE_SIZE = 1 # MB + DEFAULT_NUMBER_OF_FILES = 1024 + + def _fs_cmd(self, *args): + return self.mgr_cluster.mon_manager.raw_cluster_cmd("fs", *args) + + def _raw_cmd(self, *args): + return self.mgr_cluster.mon_manager.raw_cluster_cmd(*args) + + def __check_clone_state(self, state, clone, clone_group=None, timo=120): + check = 0 + args = ["clone", "status", self.volname, clone] + if clone_group: + args.append(clone_group) + args = tuple(args) + while check < timo: + result = json.loads(self._fs_cmd(*args)) + if result["status"]["state"] == state: + break + check += 1 + time.sleep(1) + self.assertTrue(check < timo) + + def _wait_for_clone_to_complete(self, clone, clone_group=None, timo=120): + self.__check_clone_state("complete", clone, clone_group, timo) + + def _wait_for_clone_to_fail(self, clone, clone_group=None, timo=120): + self.__check_clone_state("failed", clone, clone_group, timo) + + def _check_clone_canceled(self, clone, clone_group=None): + self.__check_clone_state("canceled", clone, clone_group, timo=1) + + def _get_subvolume_snapshot_path(self, subvolume, snapshot, source_group, subvol_path, source_version): + if source_version == 2: + # v2 + if subvol_path is not None: + (base_path, uuid_str) = os.path.split(subvol_path) + else: + (base_path, uuid_str) = os.path.split(self._get_subvolume_path(self.volname, subvolume, group_name=source_group)) + return os.path.join(base_path, ".snap", snapshot, uuid_str) + + # v1 + base_path = self._get_subvolume_path(self.volname, subvolume, group_name=source_group) + return os.path.join(base_path, ".snap", snapshot) + + def _verify_clone_attrs(self, source_path, clone_path): + path1 = source_path + path2 = clone_path + + p = self.mount_a.run_shell(["find", path1]) + paths = p.stdout.getvalue().strip().split() + + # for each entry in source and clone (sink) verify certain inode attributes: + # inode type, mode, ownership, [am]time. + for source_path in paths: + sink_entry = source_path[len(path1)+1:] + sink_path = os.path.join(path2, sink_entry) + + # mode+type + sval = int(self.mount_a.run_shell(['stat', '-c' '%f', source_path]).stdout.getvalue().strip(), 16) + cval = int(self.mount_a.run_shell(['stat', '-c' '%f', sink_path]).stdout.getvalue().strip(), 16) + self.assertEqual(sval, cval) + + # ownership + sval = int(self.mount_a.run_shell(['stat', '-c' '%u', source_path]).stdout.getvalue().strip()) + cval = int(self.mount_a.run_shell(['stat', '-c' '%u', sink_path]).stdout.getvalue().strip()) + self.assertEqual(sval, cval) + + sval = int(self.mount_a.run_shell(['stat', '-c' '%g', source_path]).stdout.getvalue().strip()) + cval = int(self.mount_a.run_shell(['stat', '-c' '%g', sink_path]).stdout.getvalue().strip()) + self.assertEqual(sval, cval) + + # inode timestamps + sval = int(self.mount_a.run_shell(['stat', '-c' '%X', source_path]).stdout.getvalue().strip()) + cval = int(self.mount_a.run_shell(['stat', '-c' '%X', sink_path]).stdout.getvalue().strip()) + self.assertEqual(sval, cval) + + sval = int(self.mount_a.run_shell(['stat', '-c' '%Y', source_path]).stdout.getvalue().strip()) + cval = int(self.mount_a.run_shell(['stat', '-c' '%Y', sink_path]).stdout.getvalue().strip()) + self.assertEqual(sval, cval) + + def _verify_clone_root(self, source_path, clone_path, clone, clone_group, clone_pool): + # verifies following clone root attrs quota, data_pool and pool_namespace + # remaining attributes of clone root are validated in _verify_clone_attrs + + clone_info = json.loads(self._get_subvolume_info(self.volname, clone, clone_group)) + + # verify quota is inherited from source snapshot + src_quota = self.mount_a.getfattr(source_path, "ceph.quota.max_bytes") + self.assertEqual(clone_info["bytes_quota"], "infinite" if src_quota is None else int(src_quota)) + + if clone_pool: + # verify pool is set as per request + self.assertEqual(clone_info["data_pool"], clone_pool) + else: + # verify pool and pool namespace are inherited from snapshot + self.assertEqual(clone_info["data_pool"], + self.mount_a.getfattr(source_path, "ceph.dir.layout.pool")) + self.assertEqual(clone_info["pool_namespace"], + self.mount_a.getfattr(source_path, "ceph.dir.layout.pool_namespace")) + + def _verify_clone(self, subvolume, snapshot, clone, + source_group=None, clone_group=None, clone_pool=None, + subvol_path=None, source_version=2, timo=120): + # pass in subvol_path (subvolume path when snapshot was taken) when subvolume is removed + # but snapshots are retained for clone verification + path1 = self._get_subvolume_snapshot_path(subvolume, snapshot, source_group, subvol_path, source_version) + path2 = self._get_subvolume_path(self.volname, clone, group_name=clone_group) + + check = 0 + # TODO: currently snapshot rentries are not stable if snapshot source entries + # are removed, https://tracker.ceph.com/issues/46747 + while check < timo and subvol_path is None: + val1 = int(self.mount_a.getfattr(path1, "ceph.dir.rentries")) + val2 = int(self.mount_a.getfattr(path2, "ceph.dir.rentries")) + if val1 == val2: + break + check += 1 + time.sleep(1) + self.assertTrue(check < timo) + + self._verify_clone_root(path1, path2, clone, clone_group, clone_pool) + self._verify_clone_attrs(path1, path2) + + def _generate_random_volume_name(self, count=1): + n = self.volume_start + volumes = [f"{TestVolumes.TEST_VOLUME_PREFIX}_{i:016}" for i in range(n, n+count)] + self.volume_start += count + return volumes[0] if count == 1 else volumes + + def _generate_random_subvolume_name(self, count=1): + n = self.subvolume_start + subvolumes = [f"{TestVolumes.TEST_SUBVOLUME_PREFIX}_{i:016}" for i in range(n, n+count)] + self.subvolume_start += count + return subvolumes[0] if count == 1 else subvolumes + + def _generate_random_group_name(self, count=1): + n = self.group_start + groups = [f"{TestVolumes.TEST_GROUP_PREFIX}_{i:016}" for i in range(n, n+count)] + self.group_start += count + return groups[0] if count == 1 else groups + + def _generate_random_snapshot_name(self, count=1): + n = self.snapshot_start + snaps = [f"{TestVolumes.TEST_SNAPSHOT_PREFIX}_{i:016}" for i in range(n, n+count)] + self.snapshot_start += count + return snaps[0] if count == 1 else snaps + + def _generate_random_clone_name(self, count=1): + n = self.clone_start + clones = [f"{TestVolumes.TEST_CLONE_PREFIX}_{i:016}" for i in range(n, n+count)] + self.clone_start += count + return clones[0] if count == 1 else clones + + def _enable_multi_fs(self): + self._fs_cmd("flag", "set", "enable_multiple", "true", "--yes-i-really-mean-it") + + def _create_or_reuse_test_volume(self): + result = json.loads(self._fs_cmd("volume", "ls")) + if len(result) == 0: + self.vol_created = True + self.volname = self._generate_random_volume_name() + self._fs_cmd("volume", "create", self.volname) + else: + self.volname = result[0]['name'] + + def _get_subvolume_group_path(self, vol_name, group_name): + args = ("subvolumegroup", "getpath", vol_name, group_name) + path = self._fs_cmd(*args) + # remove the leading '/', and trailing whitespaces + return path[1:].rstrip() + + def _get_subvolume_path(self, vol_name, subvol_name, group_name=None): + args = ["subvolume", "getpath", vol_name, subvol_name] + if group_name: + args.append(group_name) + args = tuple(args) + path = self._fs_cmd(*args) + # remove the leading '/', and trailing whitespaces + return path[1:].rstrip() + + def _get_subvolume_info(self, vol_name, subvol_name, group_name=None): + args = ["subvolume", "info", vol_name, subvol_name] + if group_name: + args.append(group_name) + args = tuple(args) + subvol_md = self._fs_cmd(*args) + return subvol_md + + def _get_subvolume_snapshot_info(self, vol_name, subvol_name, snapname, group_name=None): + args = ["subvolume", "snapshot", "info", vol_name, subvol_name, snapname] + if group_name: + args.append(group_name) + args = tuple(args) + snap_md = self._fs_cmd(*args) + return snap_md + + def _delete_test_volume(self): + self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it") + + def _do_subvolume_pool_and_namespace_update(self, subvolume, pool=None, pool_namespace=None, subvolume_group=None): + subvolpath = self._get_subvolume_path(self.volname, subvolume, group_name=subvolume_group) + + if pool is not None: + self.mount_a.setfattr(subvolpath, 'ceph.dir.layout.pool', pool) + + if pool_namespace is not None: + self.mount_a.setfattr(subvolpath, 'ceph.dir.layout.pool_namespace', pool_namespace) + + def _do_subvolume_attr_update(self, subvolume, uid, gid, mode, subvolume_group=None): + subvolpath = self._get_subvolume_path(self.volname, subvolume, group_name=subvolume_group) + + # mode + self.mount_a.run_shell(['chmod', mode, subvolpath]) + + # ownership + self.mount_a.run_shell(['chown', uid, subvolpath]) + self.mount_a.run_shell(['chgrp', gid, subvolpath]) + + def _do_subvolume_io(self, subvolume, subvolume_group=None, create_dir=None, + number_of_files=DEFAULT_NUMBER_OF_FILES, file_size=DEFAULT_FILE_SIZE): + # get subvolume path for IO + args = ["subvolume", "getpath", self.volname, subvolume] + if subvolume_group: + args.append(subvolume_group) + args = tuple(args) + subvolpath = self._fs_cmd(*args) + self.assertNotEqual(subvolpath, None) + subvolpath = subvolpath[1:].rstrip() # remove "/" prefix and any trailing newline + + io_path = subvolpath + if create_dir: + io_path = os.path.join(subvolpath, create_dir) + self.mount_a.run_shell(["mkdir", "-p", io_path]) + + log.debug("filling subvolume {0} with {1} files each {2}MB size under directory {3}".format(subvolume, number_of_files, file_size, io_path)) + for i in range(number_of_files): + filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, i) + self.mount_a.write_n_mb(os.path.join(io_path, filename), file_size) + + def _do_subvolume_io_mixed(self, subvolume, subvolume_group=None): + subvolpath = self._get_subvolume_path(self.volname, subvolume, group_name=subvolume_group) + + reg_file = "regfile.0" + reg_path = os.path.join(subvolpath, reg_file) + dir_path = os.path.join(subvolpath, "dir.0") + sym_path1 = os.path.join(subvolpath, "sym.0") + # this symlink's ownership would be changed + sym_path2 = os.path.join(dir_path, "sym.0") + + #self.mount_a.write_n_mb(reg_path, TestVolumes.DEFAULT_FILE_SIZE) + self.mount_a.run_shell(["sudo", "mkdir", dir_path], omit_sudo=False) + self.mount_a.run_shell(["sudo", "ln", "-s", "./{}".format(reg_file), sym_path1], omit_sudo=False) + self.mount_a.run_shell(["sudo", "ln", "-s", "./{}".format(reg_file), sym_path2], omit_sudo=False) + # flip ownership to nobody. assumption: nobody's id is 65534 + self.mount_a.run_shell(["sudo", "chown", "-h", "65534:65534", sym_path2], omit_sudo=False) + + def _wait_for_trash_empty(self, timeout=30): + # XXX: construct the trash dir path (note that there is no mgr + # [sub]volume interface for this). + trashdir = os.path.join("./", "volumes", "_deleting") + self.mount_a.wait_for_dir_empty(trashdir, timeout=timeout) + + def _assert_meta_location_and_version(self, vol_name, subvol_name, subvol_group=None, version=2, legacy=False): + if legacy: + subvol_path = self._get_subvolume_path(vol_name, subvol_name, group_name=subvol_group) + m = md5() + m.update(("/"+subvol_path).encode('utf-8')) + meta_filename = "{0}.meta".format(m.digest().hex()) + metapath = os.path.join(".", "volumes", "_legacy", meta_filename) + else: + group = subvol_group if subvol_group is not None else '_nogroup' + metapath = os.path.join(".", "volumes", group, subvol_name, ".meta") + + out = self.mount_a.run_shell(['cat', metapath]) + lines = out.stdout.getvalue().strip().split('\n') + sv_version = -1 + for line in lines: + if line == "version = " + str(version): + sv_version = version + break + self.assertEqual(sv_version, version, "version expected was '{0}' but got '{1}' from meta file at '{2}'".format( + version, sv_version, metapath)) + + def _create_v1_subvolume(self, subvol_name, subvol_group=None, has_snapshot=True, subvol_type='subvolume', state='complete'): + group = subvol_group if subvol_group is not None else '_nogroup' + basepath = os.path.join("volumes", group, subvol_name) + uuid_str = str(uuid.uuid4()) + createpath = os.path.join(basepath, uuid_str) + self.mount_a.run_shell(['mkdir', '-p', createpath]) + + # create a v1 snapshot, to prevent auto upgrades + if has_snapshot: + snappath = os.path.join(createpath, ".snap", "fake") + self.mount_a.run_shell(['mkdir', '-p', snappath]) + + # add required xattrs to subvolume + default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool") + self.mount_a.setfattr(createpath, 'ceph.dir.layout.pool', default_pool) + + # create a v1 .meta file + meta_contents = "[GLOBAL]\nversion = 1\ntype = {0}\npath = {1}\nstate = {2}\n".format(subvol_type, "/" + createpath, state) + if state == 'pending': + # add a fake clone source + meta_contents = meta_contents + '[source]\nvolume = fake\nsubvolume = fake\nsnapshot = fake\n' + meta_filepath1 = os.path.join(self.mount_a.mountpoint, basepath, ".meta") + sudo_write_file(self.mount_a.client_remote, meta_filepath1, meta_contents) + return createpath + + def _update_fake_trash(self, subvol_name, subvol_group=None, trash_name='fake', create=True): + group = subvol_group if subvol_group is not None else '_nogroup' + trashpath = os.path.join("volumes", group, subvol_name, '.trash', trash_name) + if create: + self.mount_a.run_shell(['mkdir', '-p', trashpath]) + else: + self.mount_a.run_shell(['rmdir', trashpath]) + + def _configure_guest_auth(self, guest_mount, authid, key): + """ + Set up auth credentials for a guest client. + """ + # Create keyring file for the guest client. + keyring_txt = dedent(""" + [client.{authid}] + key = {key} + + """.format(authid=authid,key=key)) + + guest_mount.client_id = authid + guest_mount.client_remote.write_file(guest_mount.get_keyring_path(), + keyring_txt, sudo=True) + # Add a guest client section to the ceph config file. + self.config_set("client.{0}".format(authid), "debug client", 20) + self.config_set("client.{0}".format(authid), "debug objecter", 20) + self.set_conf("client.{0}".format(authid), + "keyring", guest_mount.get_keyring_path()) + + def _auth_metadata_get(self, filedata): + """ + Return a deserialized JSON object, or None + """ + try: + data = json.loads(filedata) + except json.decoder.JSONDecodeError: + data = None + return data + + def setUp(self): + super(TestVolumes, self).setUp() + self.volname = None + self.vol_created = False + self._enable_multi_fs() + self._create_or_reuse_test_volume() + self.config_set('mon', 'mon_allow_pool_delete', True) + self.volume_start = random.randint(1, (1<<20)) + self.subvolume_start = random.randint(1, (1<<20)) + self.group_start = random.randint(1, (1<<20)) + self.snapshot_start = random.randint(1, (1<<20)) + self.clone_start = random.randint(1, (1<<20)) + + def tearDown(self): + if self.vol_created: + self._delete_test_volume() + super(TestVolumes, self).tearDown() + + def test_connection_expiration(self): + # unmount any cephfs mounts + for i in range(0, self.CLIENTS_REQUIRED): + self.mounts[i].umount_wait() + sessions = self._session_list() + self.assertLessEqual(len(sessions), 1) # maybe mgr is already mounted + + # Get the mgr to definitely mount cephfs + subvolume = self._generate_random_subvolume_name() + self._fs_cmd("subvolume", "create", self.volname, subvolume) + sessions = self._session_list() + self.assertEqual(len(sessions), 1) + + # Now wait for the mgr to expire the connection: + self.wait_until_evicted(sessions[0]['id'], timeout=90) + + def test_volume_create(self): + """ + That the volume can be created and then cleans up + """ + volname = self._generate_random_volume_name() + self._fs_cmd("volume", "create", volname) + volumels = json.loads(self._fs_cmd("volume", "ls")) + + if not (volname in ([volume['name'] for volume in volumels])): + raise RuntimeError("Error creating volume '{0}'".format(volname)) + else: + # clean up + self._fs_cmd("volume", "rm", volname, "--yes-i-really-mean-it") + + def test_volume_ls(self): + """ + That the existing and the newly created volumes can be listed and + finally cleans up. + """ + vls = json.loads(self._fs_cmd("volume", "ls")) + volumes = [volume['name'] for volume in vls] + + #create new volumes and add it to the existing list of volumes + volumenames = self._generate_random_volume_name(2) + for volumename in volumenames: + self._fs_cmd("volume", "create", volumename) + volumes.extend(volumenames) + + # list volumes + try: + volumels = json.loads(self._fs_cmd('volume', 'ls')) + if len(volumels) == 0: + raise RuntimeError("Expected the 'fs volume ls' command to list the created volumes.") + else: + volnames = [volume['name'] for volume in volumels] + if collections.Counter(volnames) != collections.Counter(volumes): + raise RuntimeError("Error creating or listing volumes") + finally: + # clean up + for volume in volumenames: + self._fs_cmd("volume", "rm", volume, "--yes-i-really-mean-it") + + def test_volume_rm(self): + """ + That the volume can only be removed when --yes-i-really-mean-it is used + and verify that the deleted volume is not listed anymore. + """ + for m in self.mounts: + m.umount_wait() + try: + self._fs_cmd("volume", "rm", self.volname) + except CommandFailedError as ce: + if ce.exitstatus != errno.EPERM: + raise RuntimeError("expected the 'fs volume rm' command to fail with EPERM, " + "but it failed with {0}".format(ce.exitstatus)) + else: + self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it") + + #check if it's gone + volumes = json.loads(self._fs_cmd("volume", "ls", "--format=json-pretty")) + if (self.volname in [volume['name'] for volume in volumes]): + raise RuntimeError("Expected the 'fs volume rm' command to succeed. " + "The volume {0} not removed.".format(self.volname)) + else: + raise RuntimeError("expected the 'fs volume rm' command to fail.") + + def test_subvolume_marked(self): + """ + ensure a subvolume is marked with the ceph.dir.subvolume xattr + """ + subvolume = self._generate_random_subvolume_name() + + # create subvolume + self._fs_cmd("subvolume", "create", self.volname, subvolume) + + # getpath + subvolpath = self._get_subvolume_path(self.volname, subvolume) + + # subdirectory of a subvolume cannot be moved outside the subvolume once marked with + # the xattr ceph.dir.subvolume, hence test by attempting to rename subvol path (incarnation) + # outside the subvolume + dstpath = os.path.join(self.mount_a.mountpoint, 'volumes', '_nogroup', 'new_subvol_location') + srcpath = os.path.join(self.mount_a.mountpoint, subvolpath) + rename_script = dedent(""" + import os + import errno + try: + os.rename("{src}", "{dst}") + except OSError as e: + if e.errno != errno.EXDEV: + raise RuntimeError("invalid error code on renaming subvolume incarnation out of subvolume directory") + else: + raise RuntimeError("expected renaming subvolume incarnation out of subvolume directory to fail") + """) + self.mount_a.run_python(rename_script.format(src=srcpath, dst=dstpath)) + + # remove subvolume + self._fs_cmd("subvolume", "rm", self.volname, subvolume) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_volume_rm_arbitrary_pool_removal(self): + """ + That the arbitrary pool added to the volume out of band is removed + successfully on volume removal. + """ + for m in self.mounts: + m.umount_wait() + new_pool = "new_pool" + # add arbitrary data pool + self.fs.add_data_pool(new_pool) + self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it") + + #check if fs is gone + volumes = json.loads(self._fs_cmd("volume", "ls", "--format=json-pretty")) + volnames = [volume['name'] for volume in volumes] + self.assertNotIn(self.volname, volnames) + + #check if osd pools are gone + pools = json.loads(self._raw_cmd("osd", "pool", "ls", "detail", "--format=json-pretty")) + for pool in pools: + self.assertNotIn(self.volname, pool["application_metadata"].keys()) + + def test_volume_rm_when_mon_delete_pool_false(self): + """ + That the volume can only be removed when mon_allowd_pool_delete is set + to true and verify that the pools are removed after volume deletion. + """ + for m in self.mounts: + m.umount_wait() + self.config_set('mon', 'mon_allow_pool_delete', False) + try: + self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it") + except CommandFailedError as ce: + self.assertEqual(ce.exitstatus, errno.EPERM, + "expected the 'fs volume rm' command to fail with EPERM, " + "but it failed with {0}".format(ce.exitstatus)) + self.config_set('mon', 'mon_allow_pool_delete', True) + self._fs_cmd("volume", "rm", self.volname, "--yes-i-really-mean-it") + + #check if fs is gone + volumes = json.loads(self._fs_cmd("volume", "ls", "--format=json-pretty")) + volnames = [volume['name'] for volume in volumes] + self.assertNotIn(self.volname, volnames, + "volume {0} exists after removal".format(self.volname)) + #check if pools are gone + pools = json.loads(self._raw_cmd("osd", "pool", "ls", "detail", "--format=json-pretty")) + for pool in pools: + self.assertNotIn(self.volname, pool["application_metadata"].keys(), + "pool {0} exists after volume removal".format(pool["pool_name"])) + + ### basic subvolume operations + + def test_subvolume_create_and_rm(self): + # create subvolume + subvolume = self._generate_random_subvolume_name() + self._fs_cmd("subvolume", "create", self.volname, subvolume) + + # make sure it exists + subvolpath = self._fs_cmd("subvolume", "getpath", self.volname, subvolume) + self.assertNotEqual(subvolpath, None) + + # remove subvolume + self._fs_cmd("subvolume", "rm", self.volname, subvolume) + # make sure its gone + try: + self._fs_cmd("subvolume", "getpath", self.volname, subvolume) + except CommandFailedError as ce: + if ce.exitstatus != errno.ENOENT: + raise + else: + raise RuntimeError("expected the 'fs subvolume getpath' command to fail. Subvolume not removed.") + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_expand(self): + """ + That a subvolume can be expanded in size and its quota matches the expected size. + """ + + # create subvolume + subvolname = self._generate_random_subvolume_name() + osize = self.DEFAULT_FILE_SIZE*1024*1024 + self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize)) + + # make sure it exists + subvolpath = self._get_subvolume_path(self.volname, subvolname) + self.assertNotEqual(subvolpath, None) + + # expand the subvolume + nsize = osize*2 + self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize)) + + # verify the quota + size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes")) + self.assertEqual(size, nsize) + + # remove subvolume + self._fs_cmd("subvolume", "rm", self.volname, subvolname) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_shrink(self): + """ + That a subvolume can be shrinked in size and its quota matches the expected size. + """ + + # create subvolume + subvolname = self._generate_random_subvolume_name() + osize = self.DEFAULT_FILE_SIZE*1024*1024 + self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize)) + + # make sure it exists + subvolpath = self._get_subvolume_path(self.volname, subvolname) + self.assertNotEqual(subvolpath, None) + + # shrink the subvolume + nsize = osize // 2 + self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize)) + + # verify the quota + size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes")) + self.assertEqual(size, nsize) + + # remove subvolume + self._fs_cmd("subvolume", "rm", self.volname, subvolname) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_resize_fail_invalid_size(self): + """ + That a subvolume cannot be resized to an invalid size and the quota did not change + """ + + osize = self.DEFAULT_FILE_SIZE*1024*1024 + # create subvolume + subvolname = self._generate_random_subvolume_name() + self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize)) + + # make sure it exists + subvolpath = self._get_subvolume_path(self.volname, subvolname) + self.assertNotEqual(subvolpath, None) + + # try to resize the subvolume with an invalid size -10 + nsize = -10 + try: + self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize)) + except CommandFailedError as ce: + self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on resize of subvolume with invalid size") + else: + self.fail("expected the 'fs subvolume resize' command to fail") + + # verify the quota did not change + size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes")) + self.assertEqual(size, osize) + + # remove subvolume + self._fs_cmd("subvolume", "rm", self.volname, subvolname) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_resize_fail_zero_size(self): + """ + That a subvolume cannot be resized to a zero size and the quota did not change + """ + + osize = self.DEFAULT_FILE_SIZE*1024*1024 + # create subvolume + subvolname = self._generate_random_subvolume_name() + self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize)) + + # make sure it exists + subvolpath = self._get_subvolume_path(self.volname, subvolname) + self.assertNotEqual(subvolpath, None) + + # try to resize the subvolume with size 0 + nsize = 0 + try: + self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize)) + except CommandFailedError as ce: + self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on resize of subvolume with invalid size") + else: + self.fail("expected the 'fs subvolume resize' command to fail") + + # verify the quota did not change + size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes")) + self.assertEqual(size, osize) + + # remove subvolume + self._fs_cmd("subvolume", "rm", self.volname, subvolname) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_resize_quota_lt_used_size(self): + """ + That a subvolume can be resized to a size smaller than the current used size + and the resulting quota matches the expected size. + """ + + osize = self.DEFAULT_FILE_SIZE*1024*1024*20 + # create subvolume + subvolname = self._generate_random_subvolume_name() + self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize)) + + # make sure it exists + subvolpath = self._get_subvolume_path(self.volname, subvolname) + self.assertNotEqual(subvolpath, None) + + # create one file of 10MB + file_size=self.DEFAULT_FILE_SIZE*10 + number_of_files=1 + log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname, + number_of_files, + file_size)) + filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+1) + self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size) + + usedsize = int(self.mount_a.getfattr(subvolpath, "ceph.dir.rbytes")) + susedsize = int(self.mount_a.run_shell(['stat', '-c' '%s', subvolpath]).stdout.getvalue().strip()) + self.assertEqual(usedsize, susedsize) + + # shrink the subvolume + nsize = usedsize // 2 + try: + self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize)) + except CommandFailedError: + self.fail("expected the 'fs subvolume resize' command to succeed") + + # verify the quota + size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes")) + self.assertEqual(size, nsize) + + # remove subvolume + self._fs_cmd("subvolume", "rm", self.volname, subvolname) + + # verify trash dir is clean + self._wait_for_trash_empty() + + + def test_subvolume_resize_fail_quota_lt_used_size_no_shrink(self): + """ + That a subvolume cannot be resized to a size smaller than the current used size + when --no_shrink is given and the quota did not change. + """ + + osize = self.DEFAULT_FILE_SIZE*1024*1024*20 + # create subvolume + subvolname = self._generate_random_subvolume_name() + self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize)) + + # make sure it exists + subvolpath = self._get_subvolume_path(self.volname, subvolname) + self.assertNotEqual(subvolpath, None) + + # create one file of 10MB + file_size=self.DEFAULT_FILE_SIZE*10 + number_of_files=1 + log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname, + number_of_files, + file_size)) + filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+2) + self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size) + + usedsize = int(self.mount_a.getfattr(subvolpath, "ceph.dir.rbytes")) + susedsize = int(self.mount_a.run_shell(['stat', '-c' '%s', subvolpath]).stdout.getvalue().strip()) + self.assertEqual(usedsize, susedsize) + + # shrink the subvolume + nsize = usedsize // 2 + try: + self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize), "--no_shrink") + except CommandFailedError as ce: + self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on resize of subvolume with invalid size") + else: + self.fail("expected the 'fs subvolume resize' command to fail") + + # verify the quota did not change + size = int(self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes")) + self.assertEqual(size, osize) + + # remove subvolume + self._fs_cmd("subvolume", "rm", self.volname, subvolname) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_resize_expand_on_full_subvolume(self): + """ + That the subvolume can be expanded from a full subvolume and future writes succeed. + """ + + osize = self.DEFAULT_FILE_SIZE*1024*1024*10 + # create subvolume of quota 10MB and make sure it exists + subvolname = self._generate_random_subvolume_name() + self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", str(osize)) + subvolpath = self._get_subvolume_path(self.volname, subvolname) + self.assertNotEqual(subvolpath, None) + + # create one file of size 10MB and write + file_size=self.DEFAULT_FILE_SIZE*10 + number_of_files=1 + log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname, + number_of_files, + file_size)) + filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+3) + self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size) + + # create a file of size 5MB and try write more + file_size=file_size // 2 + number_of_files=1 + log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname, + number_of_files, + file_size)) + filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+4) + try: + self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size) + except CommandFailedError: + # Not able to write. So expand the subvolume more and try writing the 5MB file again + nsize = osize*2 + self._fs_cmd("subvolume", "resize", self.volname, subvolname, str(nsize)) + try: + self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size) + except CommandFailedError: + self.fail("expected filling subvolume {0} with {1} file of size {2}MB" + "to succeed".format(subvolname, number_of_files, file_size)) + else: + self.fail("expected filling subvolume {0} with {1} file of size {2}MB" + "to fail".format(subvolname, number_of_files, file_size)) + + # remove subvolume + self._fs_cmd("subvolume", "rm", self.volname, subvolname) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_create_idempotence(self): + # create subvolume + subvolume = self._generate_random_subvolume_name() + self._fs_cmd("subvolume", "create", self.volname, subvolume) + + # try creating w/ same subvolume name -- should be idempotent + self._fs_cmd("subvolume", "create", self.volname, subvolume) + + # remove subvolume + self._fs_cmd("subvolume", "rm", self.volname, subvolume) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_create_idempotence_resize(self): + # create subvolume + subvolume = self._generate_random_subvolume_name() + self._fs_cmd("subvolume", "create", self.volname, subvolume) + + # try creating w/ same subvolume name with size -- should set quota + self._fs_cmd("subvolume", "create", self.volname, subvolume, "1000000000") + + # get subvolume metadata + subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume)) + self.assertEqual(subvol_info["bytes_quota"], 1000000000) + + # remove subvolume + self._fs_cmd("subvolume", "rm", self.volname, subvolume) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_create_isolated_namespace(self): + """ + Create subvolume in separate rados namespace + """ + + # create subvolume + subvolume = self._generate_random_subvolume_name() + self._fs_cmd("subvolume", "create", self.volname, subvolume, "--namespace-isolated") + + # get subvolume metadata + subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume)) + self.assertNotEqual(len(subvol_info), 0) + self.assertEqual(subvol_info["pool_namespace"], "fsvolumens_" + subvolume) + + # remove subvolumes + self._fs_cmd("subvolume", "rm", self.volname, subvolume) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_create_with_invalid_data_pool_layout(self): + subvolume = self._generate_random_subvolume_name() + data_pool = "invalid_pool" + # create subvolume with invalid data pool layout + try: + self._fs_cmd("subvolume", "create", self.volname, subvolume, "--pool_layout", data_pool) + except CommandFailedError as ce: + self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on create of subvolume with invalid pool layout") + else: + self.fail("expected the 'fs subvolume create' command to fail") + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_rm_force(self): + # test removing non-existing subvolume with --force + subvolume = self._generate_random_subvolume_name() + try: + self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--force") + except CommandFailedError: + self.fail("expected the 'fs subvolume rm --force' command to succeed") + + def test_subvolume_create_with_auto_cleanup_on_fail(self): + subvolume = self._generate_random_subvolume_name() + data_pool = "invalid_pool" + # create subvolume with invalid data pool layout fails + with self.assertRaises(CommandFailedError): + self._fs_cmd("subvolume", "create", self.volname, subvolume, "--pool_layout", data_pool) + + # check whether subvol path is cleaned up + try: + self._fs_cmd("subvolume", "getpath", self.volname, subvolume) + except CommandFailedError as ce: + self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on getpath of non-existent subvolume") + else: + self.fail("expected the 'fs subvolume getpath' command to fail") + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_create_with_invalid_size(self): + # create subvolume with an invalid size -1 + subvolume = self._generate_random_subvolume_name() + try: + self._fs_cmd("subvolume", "create", self.volname, subvolume, "--size", "-1") + except CommandFailedError as ce: + self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on create of subvolume with invalid size") + else: + self.fail("expected the 'fs subvolume create' command to fail") + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_nonexistent_subvolume_rm(self): + # remove non-existing subvolume + subvolume = "non_existent_subvolume" + + # try, remove subvolume + try: + self._fs_cmd("subvolume", "rm", self.volname, subvolume) + except CommandFailedError as ce: + if ce.exitstatus != errno.ENOENT: + raise + else: + raise RuntimeError("expected the 'fs subvolume rm' command to fail") + + def test_nonexistent_subvolume_group_create(self): + subvolume = self._generate_random_subvolume_name() + group = "non_existent_group" + + # try, creating subvolume in a nonexistent group + try: + self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group) + except CommandFailedError as ce: + if ce.exitstatus != errno.ENOENT: + raise + else: + raise RuntimeError("expected the 'fs subvolume create' command to fail") + + def test_default_uid_gid_subvolume(self): + subvolume = self._generate_random_subvolume_name() + expected_uid = 0 + expected_gid = 0 + + # create subvolume + self._fs_cmd("subvolume", "create", self.volname, subvolume) + subvol_path = self._get_subvolume_path(self.volname, subvolume) + + # check subvolume's uid and gid + stat = self.mount_a.stat(subvol_path) + self.assertEqual(stat['st_uid'], expected_uid) + self.assertEqual(stat['st_gid'], expected_gid) + + # remove subvolume + self._fs_cmd("subvolume", "rm", self.volname, subvolume) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_ls(self): + # tests the 'fs subvolume ls' command + + subvolumes = [] + + # create subvolumes + subvolumes = self._generate_random_subvolume_name(3) + for subvolume in subvolumes: + self._fs_cmd("subvolume", "create", self.volname, subvolume) + + # list subvolumes + subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname)) + if len(subvolumels) == 0: + self.fail("Expected the 'fs subvolume ls' command to list the created subvolumes.") + else: + subvolnames = [subvolume['name'] for subvolume in subvolumels] + if collections.Counter(subvolnames) != collections.Counter(subvolumes): + self.fail("Error creating or listing subvolumes") + + # remove subvolume + for subvolume in subvolumes: + self._fs_cmd("subvolume", "rm", self.volname, subvolume) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_ls_for_notexistent_default_group(self): + # tests the 'fs subvolume ls' command when the default group '_nogroup' doesn't exist + # prerequisite: we expect that the volume is created and the default group _nogroup is + # NOT created (i.e. a subvolume without group is not created) + + # list subvolumes + subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname)) + if len(subvolumels) > 0: + raise RuntimeError("Expected the 'fs subvolume ls' command to output an empty list.") + + def test_subvolume_resize_infinite_size(self): + """ + That a subvolume can be resized to an infinite size by unsetting its quota. + """ + + # create subvolume + subvolname = self._generate_random_subvolume_name() + self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", + str(self.DEFAULT_FILE_SIZE*1024*1024)) + + # make sure it exists + subvolpath = self._get_subvolume_path(self.volname, subvolname) + self.assertNotEqual(subvolpath, None) + + # resize inf + self._fs_cmd("subvolume", "resize", self.volname, subvolname, "inf") + + # verify that the quota is None + size = self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes") + self.assertEqual(size, None) + + # remove subvolume + self._fs_cmd("subvolume", "rm", self.volname, subvolname) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_resize_infinite_size_future_writes(self): + """ + That a subvolume can be resized to an infinite size and the future writes succeed. + """ + + # create subvolume + subvolname = self._generate_random_subvolume_name() + self._fs_cmd("subvolume", "create", self.volname, subvolname, "--size", + str(self.DEFAULT_FILE_SIZE*1024*1024*5)) + + # make sure it exists + subvolpath = self._get_subvolume_path(self.volname, subvolname) + self.assertNotEqual(subvolpath, None) + + # resize inf + self._fs_cmd("subvolume", "resize", self.volname, subvolname, "inf") + + # verify that the quota is None + size = self.mount_a.getfattr(subvolpath, "ceph.quota.max_bytes") + self.assertEqual(size, None) + + # create one file of 10MB and try to write + file_size=self.DEFAULT_FILE_SIZE*10 + number_of_files=1 + log.debug("filling subvolume {0} with {1} file of size {2}MB".format(subvolname, + number_of_files, + file_size)) + filename = "{0}.{1}".format(TestVolumes.TEST_FILE_NAME_PREFIX, self.DEFAULT_NUMBER_OF_FILES+5) + + try: + self.mount_a.write_n_mb(os.path.join(subvolpath, filename), file_size) + except CommandFailedError: + self.fail("expected filling subvolume {0} with {1} file of size {2}MB " + "to succeed".format(subvolname, number_of_files, file_size)) + + # remove subvolume + self._fs_cmd("subvolume", "rm", self.volname, subvolname) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_info(self): + # tests the 'fs subvolume info' command + + subvol_md = ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime", + "data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "pool_namespace", + "type", "uid", "features", "state"] + + # create subvolume + subvolume = self._generate_random_subvolume_name() + self._fs_cmd("subvolume", "create", self.volname, subvolume) + + # get subvolume metadata + subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume)) + for md in subvol_md: + self.assertIn(md, subvol_info, "'{0}' key not present in metadata of subvolume".format(md)) + + self.assertEqual(subvol_info["bytes_pcent"], "undefined", "bytes_pcent should be set to undefined if quota is not set") + self.assertEqual(subvol_info["bytes_quota"], "infinite", "bytes_quota should be set to infinite if quota is not set") + self.assertEqual(subvol_info["pool_namespace"], "", "expected pool namespace to be empty") + self.assertEqual(subvol_info["state"], "complete", "expected state to be complete") + + self.assertEqual(len(subvol_info["features"]), 3, + msg="expected 3 features, found '{0}' ({1})".format(len(subvol_info["features"]), subvol_info["features"])) + for feature in ['snapshot-clone', 'snapshot-autoprotect', 'snapshot-retention']: + self.assertIn(feature, subvol_info["features"], msg="expected feature '{0}' in subvolume".format(feature)) + + nsize = self.DEFAULT_FILE_SIZE*1024*1024 + self._fs_cmd("subvolume", "resize", self.volname, subvolume, str(nsize)) + + # get subvolume metadata after quota set + subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume)) + for md in subvol_md: + self.assertIn(md, subvol_info, "'{0}' key not present in metadata of subvolume".format(md)) + + self.assertNotEqual(subvol_info["bytes_pcent"], "undefined", "bytes_pcent should not be set to undefined if quota is not set") + self.assertEqual(subvol_info["bytes_quota"], nsize, "bytes_quota should be set to '{0}'".format(nsize)) + self.assertEqual(subvol_info["type"], "subvolume", "type should be set to subvolume") + self.assertEqual(subvol_info["state"], "complete", "expected state to be complete") + + self.assertEqual(len(subvol_info["features"]), 3, + msg="expected 3 features, found '{0}' ({1})".format(len(subvol_info["features"]), subvol_info["features"])) + for feature in ['snapshot-clone', 'snapshot-autoprotect', 'snapshot-retention']: + self.assertIn(feature, subvol_info["features"], msg="expected feature '{0}' in subvolume".format(feature)) + + # remove subvolumes + self._fs_cmd("subvolume", "rm", self.volname, subvolume) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_clone_subvolume_info(self): + + # tests the 'fs subvolume info' command for a clone + subvol_md = ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime", + "data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "pool_namespace", + "type", "uid"] + + subvolume = self._generate_random_subvolume_name() + snapshot = self._generate_random_snapshot_name() + clone = self._generate_random_clone_name() + + # create subvolume + self._fs_cmd("subvolume", "create", self.volname, subvolume) + + # do some IO + self._do_subvolume_io(subvolume, number_of_files=1) + + # snapshot subvolume + self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) + + # schedule a clone + self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) + + # check clone status + self._wait_for_clone_to_complete(clone) + + # remove snapshot + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) + + subvol_info = json.loads(self._get_subvolume_info(self.volname, clone)) + if len(subvol_info) == 0: + raise RuntimeError("Expected the 'fs subvolume info' command to list metadata of subvolume") + for md in subvol_md: + if md not in subvol_info.keys(): + raise RuntimeError("%s not present in the metadata of subvolume" % md) + if subvol_info["type"] != "clone": + raise RuntimeError("type should be set to clone") + + # remove subvolumes + self._fs_cmd("subvolume", "rm", self.volname, subvolume) + self._fs_cmd("subvolume", "rm", self.volname, clone) + + # verify trash dir is clean + self._wait_for_trash_empty() + + + ### subvolume group operations + + def test_subvolume_create_and_rm_in_group(self): + subvolume = self._generate_random_subvolume_name() + group = self._generate_random_group_name() + + # create group + self._fs_cmd("subvolumegroup", "create", self.volname, group) + + # create subvolume in group + self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group) + + # remove subvolume + self._fs_cmd("subvolume", "rm", self.volname, subvolume, group) + + # verify trash dir is clean + self._wait_for_trash_empty() + + # remove group + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + def test_subvolume_group_create_with_desired_data_pool_layout(self): + group1, group2 = self._generate_random_group_name(2) + + # create group + self._fs_cmd("subvolumegroup", "create", self.volname, group1) + group1_path = self._get_subvolume_group_path(self.volname, group1) + + default_pool = self.mount_a.getfattr(group1_path, "ceph.dir.layout.pool") + new_pool = "new_pool" + self.assertNotEqual(default_pool, new_pool) + + # add data pool + self.fs.add_data_pool(new_pool) + + # create group specifying the new data pool as its pool layout + self._fs_cmd("subvolumegroup", "create", self.volname, group2, + "--pool_layout", new_pool) + group2_path = self._get_subvolume_group_path(self.volname, group2) + + desired_pool = self.mount_a.getfattr(group2_path, "ceph.dir.layout.pool") + self.assertEqual(desired_pool, new_pool) + + self._fs_cmd("subvolumegroup", "rm", self.volname, group1) + self._fs_cmd("subvolumegroup", "rm", self.volname, group2) + + ### authorize operations + + def test_authorize_deauthorize_legacy_subvolume(self): + subvolume = self._generate_random_subvolume_name() + group = self._generate_random_group_name() + authid = "alice" + + guest_mount = self.mount_b + guest_mount.umount_wait() + + # emulate a old-fashioned subvolume in a custom group + createpath = os.path.join(".", "volumes", group, subvolume) + self.mount_a.run_shell(['mkdir', '-p', createpath]) + + # add required xattrs to subvolume + default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool") + self.mount_a.setfattr(createpath, 'ceph.dir.layout.pool', default_pool) + + mount_path = os.path.join("/", "volumes", group, subvolume) + + # authorize guest authID read-write access to subvolume + key = self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid, + "--group_name", group, "--tenant_id", "tenant_id") + + # guest authID should exist + existing_ids = [a['entity'] for a in self.auth_list()] + self.assertIn("client.{0}".format(authid), existing_ids) + + # configure credentials for guest client + self._configure_guest_auth(guest_mount, authid, key) + + # mount the subvolume, and write to it + guest_mount.mount(mount_path=mount_path) + guest_mount.write_n_mb("data.bin", 1) + + # authorize guest authID read access to subvolume + key = self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid, + "--group_name", group, "--tenant_id", "tenant_id", "--access_level", "r") + + # guest client sees the change in access level to read only after a + # remount of the subvolume. + guest_mount.umount_wait() + guest_mount.mount(mount_path=mount_path) + + # read existing content of the subvolume + self.assertListEqual(guest_mount.ls(guest_mount.mountpoint), ["data.bin"]) + # cannot write into read-only subvolume + with self.assertRaises(CommandFailedError): + guest_mount.write_n_mb("rogue.bin", 1) + + # cleanup + guest_mount.umount_wait() + self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, authid, + "--group_name", group) + # guest authID should no longer exist + existing_ids = [a['entity'] for a in self.auth_list()] + self.assertNotIn("client.{0}".format(authid), existing_ids) + self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group) + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + def test_authorize_deauthorize_subvolume(self): + subvolume = self._generate_random_subvolume_name() + group = self._generate_random_group_name() + authid = "alice" + + guest_mount = self.mount_b + guest_mount.umount_wait() + + # create group + self._fs_cmd("subvolumegroup", "create", self.volname, group) + + # create subvolume in group + self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group) + mount_path = self._fs_cmd("subvolume", "getpath", self.volname, subvolume, + "--group_name", group).rstrip() + + # authorize guest authID read-write access to subvolume + key = self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid, + "--group_name", group, "--tenant_id", "tenant_id") + + # guest authID should exist + existing_ids = [a['entity'] for a in self.auth_list()] + self.assertIn("client.{0}".format(authid), existing_ids) + + # configure credentials for guest client + self._configure_guest_auth(guest_mount, authid, key) + + # mount the subvolume, and write to it + guest_mount.mount(mount_path=mount_path) + guest_mount.write_n_mb("data.bin", 1) + + # authorize guest authID read access to subvolume + key = self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid, + "--group_name", group, "--tenant_id", "tenant_id", "--access_level", "r") + + # guest client sees the change in access level to read only after a + # remount of the subvolume. + guest_mount.umount_wait() + guest_mount.mount(mount_path=mount_path) + + # read existing content of the subvolume + self.assertListEqual(guest_mount.ls(guest_mount.mountpoint), ["data.bin"]) + # cannot write into read-only subvolume + with self.assertRaises(CommandFailedError): + guest_mount.write_n_mb("rogue.bin", 1) + + # cleanup + guest_mount.umount_wait() + self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, authid, + "--group_name", group) + # guest authID should no longer exist + existing_ids = [a['entity'] for a in self.auth_list()] + self.assertNotIn("client.{0}".format(authid), existing_ids) + self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group) + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + def test_multitenant_subvolumes(self): + """ + That subvolume access can be restricted to a tenant. + + That metadata used to enforce tenant isolation of + subvolumes is stored as a two-way mapping between auth + IDs and subvolumes that they're authorized to access. + """ + subvolume = self._generate_random_subvolume_name() + group = self._generate_random_group_name() + + guest_mount = self.mount_b + + # Guest clients belonging to different tenants, but using the same + # auth ID. + auth_id = "alice" + guestclient_1 = { + "auth_id": auth_id, + "tenant_id": "tenant1", + } + guestclient_2 = { + "auth_id": auth_id, + "tenant_id": "tenant2", + } + + # create group + self._fs_cmd("subvolumegroup", "create", self.volname, group) + + # create subvolume in group + self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group) + + # Check that subvolume metadata file is created on subvolume creation. + subvol_metadata_filename = "_{0}:{1}.meta".format(group, subvolume) + self.assertIn(subvol_metadata_filename, guest_mount.ls("volumes")) + + # Authorize 'guestclient_1', using auth ID 'alice' and belonging to + # 'tenant1', with 'rw' access to the volume. + self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"], + "--group_name", group, "--tenant_id", guestclient_1["tenant_id"]) + + # Check that auth metadata file for auth ID 'alice', is + # created on authorizing 'alice' access to the subvolume. + auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"]) + self.assertIn(auth_metadata_filename, guest_mount.ls("volumes")) + + # Verify that the auth metadata file stores the tenant ID that the + # auth ID belongs to, the auth ID's authorized access levels + # for different subvolumes, versioning details, etc. + expected_auth_metadata = { + "version": 5, + "compat_version": 6, + "dirty": False, + "tenant_id": "tenant1", + "subvolumes": { + "{0}/{1}".format(group,subvolume): { + "dirty": False, + "access_level": "rw" + } + } + } + + auth_metadata = self._auth_metadata_get(guest_mount.read_file("volumes/{0}".format(auth_metadata_filename))) + self.assertGreaterEqual(auth_metadata["version"], expected_auth_metadata["version"]) + del expected_auth_metadata["version"] + del auth_metadata["version"] + self.assertEqual(expected_auth_metadata, auth_metadata) + + # Verify that the subvolume metadata file stores info about auth IDs + # and their access levels to the subvolume, versioning details, etc. + expected_subvol_metadata = { + "version": 1, + "compat_version": 1, + "auths": { + "alice": { + "dirty": False, + "access_level": "rw" + } + } + } + subvol_metadata = self._auth_metadata_get(guest_mount.read_file("volumes/{0}".format(subvol_metadata_filename))) + + self.assertGreaterEqual(subvol_metadata["version"], expected_subvol_metadata["version"]) + del expected_subvol_metadata["version"] + del subvol_metadata["version"] + self.assertEqual(expected_subvol_metadata, subvol_metadata) + + # Cannot authorize 'guestclient_2' to access the volume. + # It uses auth ID 'alice', which has already been used by a + # 'guestclient_1' belonging to an another tenant for accessing + # the volume. + + try: + self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_2["auth_id"], + "--group_name", group, "--tenant_id", guestclient_2["tenant_id"]) + except CommandFailedError as ce: + self.assertEqual(ce.exitstatus, errno.EPERM, + "Invalid error code returned on authorize of subvolume with same auth_id but different tenant_id") + else: + self.fail("expected the 'fs subvolume authorize' command to fail") + + # Check that auth metadata file is cleaned up on removing + # auth ID's only access to a volume. + + self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, auth_id, + "--group_name", group) + self.assertNotIn(auth_metadata_filename, guest_mount.ls("volumes")) + + # Check that subvolume metadata file is cleaned up on subvolume deletion. + self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group) + self.assertNotIn(subvol_metadata_filename, guest_mount.ls("volumes")) + + # clean up + guest_mount.umount_wait() + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + def test_subvolume_authorized_list(self): + subvolume = self._generate_random_subvolume_name() + group = self._generate_random_group_name() + authid1 = "alice" + authid2 = "guest1" + authid3 = "guest2" + + # create group + self._fs_cmd("subvolumegroup", "create", self.volname, group) + + # create subvolume in group + self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group) + + # authorize alice authID read-write access to subvolume + self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid1, + "--group_name", group) + # authorize guest1 authID read-write access to subvolume + self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid2, + "--group_name", group) + # authorize guest2 authID read access to subvolume + self._fs_cmd("subvolume", "authorize", self.volname, subvolume, authid3, + "--group_name", group, "--access_level", "r") + + # list authorized-ids of the subvolume + expected_auth_list = [{'alice': 'rw'}, {'guest1': 'rw'}, {'guest2': 'r'}] + auth_list = json.loads(self._fs_cmd('subvolume', 'authorized_list', self.volname, subvolume, "--group_name", group)) + self.assertCountEqual(expected_auth_list, auth_list) + + # cleanup + self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, authid1, + "--group_name", group) + self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, authid2, + "--group_name", group) + self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, authid3, + "--group_name", group) + self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group) + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + def test_authorize_auth_id_not_created_by_mgr_volumes(self): + """ + If the auth_id already exists and is not created by mgr plugin, + it's not allowed to authorize the auth-id by default. + """ + + subvolume = self._generate_random_subvolume_name() + group = self._generate_random_group_name() + + # Create auth_id + self.fs.mon_manager.raw_cluster_cmd( + "auth", "get-or-create", "client.guest1", + "mds", "allow *", + "osd", "allow rw", + "mon", "allow *" + ) + + auth_id = "guest1" + guestclient_1 = { + "auth_id": auth_id, + "tenant_id": "tenant1", + } + + # create group + self._fs_cmd("subvolumegroup", "create", self.volname, group) + + # create subvolume in group + self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group) + + try: + self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"], + "--group_name", group, "--tenant_id", guestclient_1["tenant_id"]) + except CommandFailedError as ce: + self.assertEqual(ce.exitstatus, errno.EPERM, + "Invalid error code returned on authorize of subvolume for auth_id created out of band") + else: + self.fail("expected the 'fs subvolume authorize' command to fail") + + # clean up + self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1") + self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group) + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + def test_authorize_allow_existing_id_option(self): + """ + If the auth_id already exists and is not created by mgr volumes, + it's not allowed to authorize the auth-id by default but is + allowed with option allow_existing_id. + """ + + subvolume = self._generate_random_subvolume_name() + group = self._generate_random_group_name() + + # Create auth_id + self.fs.mon_manager.raw_cluster_cmd( + "auth", "get-or-create", "client.guest1", + "mds", "allow *", + "osd", "allow rw", + "mon", "allow *" + ) + + auth_id = "guest1" + guestclient_1 = { + "auth_id": auth_id, + "tenant_id": "tenant1", + } + + # create group + self._fs_cmd("subvolumegroup", "create", self.volname, group) + + # create subvolume in group + self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group) + + # Cannot authorize 'guestclient_1' to access the volume by default, + # which already exists and not created by mgr volumes but is allowed + # with option 'allow_existing_id'. + self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"], + "--group_name", group, "--tenant_id", guestclient_1["tenant_id"], "--allow-existing-id") + + # clean up + self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, auth_id, + "--group_name", group) + self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1") + self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group) + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + def test_deauthorize_auth_id_after_out_of_band_update(self): + """ + If the auth_id authorized by mgr/volumes plugin is updated + out of band, the auth_id should not be deleted after a + deauthorize. It should only remove caps associated with it. + """ + + subvolume = self._generate_random_subvolume_name() + group = self._generate_random_group_name() + + auth_id = "guest1" + guestclient_1 = { + "auth_id": auth_id, + "tenant_id": "tenant1", + } + + # create group + self._fs_cmd("subvolumegroup", "create", self.volname, group) + + # create subvolume in group + self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group) + + # Authorize 'guestclient_1' to access the subvolume. + self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"], + "--group_name", group, "--tenant_id", guestclient_1["tenant_id"]) + + subvol_path = self._fs_cmd("subvolume", "getpath", self.volname, subvolume, + "--group_name", group).rstrip() + + # Update caps for guestclient_1 out of band + out = self.fs.mon_manager.raw_cluster_cmd( + "auth", "caps", "client.guest1", + "mds", "allow rw path=/volumes/{0}, allow rw path={1}".format(group, subvol_path), + "osd", "allow rw pool=cephfs_data", + "mon", "allow r", + "mgr", "allow *" + ) + + # Deauthorize guestclient_1 + self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, auth_id, "--group_name", group) + + # Validate the caps of guestclient_1 after deauthorize. It should not have deleted + # guestclient_1. The mgr and mds caps should be present which was updated out of band. + out = json.loads(self.fs.mon_manager.raw_cluster_cmd("auth", "get", "client.guest1", "--format=json-pretty")) + + self.assertEqual("client.guest1", out[0]["entity"]) + self.assertEqual("allow rw path=/volumes/{0}".format(group), out[0]["caps"]["mds"]) + self.assertEqual("allow *", out[0]["caps"]["mgr"]) + self.assertNotIn("osd", out[0]["caps"]) + + # clean up + out = self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1") + self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group) + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + def test_recover_auth_metadata_during_authorize(self): + """ + That auth metadata manager can recover from partial auth updates using + metadata files, which store auth info and its update status info. This + test validates the recovery during authorize. + """ + + guest_mount = self.mount_b + + subvolume = self._generate_random_subvolume_name() + group = self._generate_random_group_name() + + auth_id = "guest1" + guestclient_1 = { + "auth_id": auth_id, + "tenant_id": "tenant1", + } + + # create group + self._fs_cmd("subvolumegroup", "create", self.volname, group) + + # create subvolume in group + self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group) + + # Authorize 'guestclient_1' to access the subvolume. + self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"], + "--group_name", group, "--tenant_id", guestclient_1["tenant_id"]) + + # Check that auth metadata file for auth ID 'guest1', is + # created on authorizing 'guest1' access to the subvolume. + auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"]) + self.assertIn(auth_metadata_filename, guest_mount.ls("volumes")) + expected_auth_metadata_content = self._auth_metadata_get(self.mount_a.read_file("volumes/{0}".format(auth_metadata_filename))) + + # Induce partial auth update state by modifying the auth metadata file, + # and then run authorize again. + guest_mount.run_shell(['sed', '-i', 's/false/true/g', 'volumes/{0}'.format(auth_metadata_filename)]) + + # Authorize 'guestclient_1' to access the subvolume. + self._fs_cmd("subvolume", "authorize", self.volname, subvolume, guestclient_1["auth_id"], + "--group_name", group, "--tenant_id", guestclient_1["tenant_id"]) + + auth_metadata_content = self._auth_metadata_get(self.mount_a.read_file("volumes/{0}".format(auth_metadata_filename))) + self.assertEqual(auth_metadata_content, expected_auth_metadata_content) + + # clean up + self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume, auth_id, "--group_name", group) + guest_mount.umount_wait() + self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1") + self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group) + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + def test_recover_auth_metadata_during_deauthorize(self): + """ + That auth metadata manager can recover from partial auth updates using + metadata files, which store auth info and its update status info. This + test validates the recovery during deauthorize. + """ + + guest_mount = self.mount_b + + subvolume1, subvolume2 = self._generate_random_subvolume_name(2) + group = self._generate_random_group_name() + + guestclient_1 = { + "auth_id": "guest1", + "tenant_id": "tenant1", + } + + # create group + self._fs_cmd("subvolumegroup", "create", self.volname, group) + + # create subvolumes in group + self._fs_cmd("subvolume", "create", self.volname, subvolume1, "--group_name", group) + self._fs_cmd("subvolume", "create", self.volname, subvolume2, "--group_name", group) + + # Authorize 'guestclient_1' to access the subvolume1. + self._fs_cmd("subvolume", "authorize", self.volname, subvolume1, guestclient_1["auth_id"], + "--group_name", group, "--tenant_id", guestclient_1["tenant_id"]) + + # Check that auth metadata file for auth ID 'guest1', is + # created on authorizing 'guest1' access to the subvolume1. + auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"]) + self.assertIn(auth_metadata_filename, guest_mount.ls("volumes")) + expected_auth_metadata_content = self._auth_metadata_get(self.mount_a.read_file("volumes/{0}".format(auth_metadata_filename))) + + # Authorize 'guestclient_1' to access the subvolume2. + self._fs_cmd("subvolume", "authorize", self.volname, subvolume2, guestclient_1["auth_id"], + "--group_name", group, "--tenant_id", guestclient_1["tenant_id"]) + + # Induce partial auth update state by modifying the auth metadata file, + # and then run de-authorize. + guest_mount.run_shell(['sed', '-i', 's/false/true/g', 'volumes/{0}'.format(auth_metadata_filename)]) + + # Deauthorize 'guestclient_1' to access the subvolume2. + self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume2, guestclient_1["auth_id"], + "--group_name", group) + + auth_metadata_content = self._auth_metadata_get(self.mount_a.read_file("volumes/{0}".format(auth_metadata_filename))) + self.assertEqual(auth_metadata_content, expected_auth_metadata_content) + + # clean up + self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume1, "guest1", "--group_name", group) + guest_mount.umount_wait() + self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1") + self._fs_cmd("subvolume", "rm", self.volname, subvolume1, "--group_name", group) + self._fs_cmd("subvolume", "rm", self.volname, subvolume2, "--group_name", group) + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + def test_update_old_style_auth_metadata_to_new_during_authorize(self): + """ + CephVolumeClient stores the subvolume data in auth metadata file with + 'volumes' key as there was no subvolume namespace. It doesn't makes sense + with mgr/volumes. This test validates the transparent update of 'volumes' + key to 'subvolumes' key in auth metadata file during authorize. + """ + + guest_mount = self.mount_b + + subvolume1, subvolume2 = self._generate_random_subvolume_name(2) + group = self._generate_random_group_name() + + auth_id = "guest1" + guestclient_1 = { + "auth_id": auth_id, + "tenant_id": "tenant1", + } + + # create group + self._fs_cmd("subvolumegroup", "create", self.volname, group) + + # create subvolumes in group + self._fs_cmd("subvolume", "create", self.volname, subvolume1, "--group_name", group) + self._fs_cmd("subvolume", "create", self.volname, subvolume2, "--group_name", group) + + # Authorize 'guestclient_1' to access the subvolume1. + self._fs_cmd("subvolume", "authorize", self.volname, subvolume1, guestclient_1["auth_id"], + "--group_name", group, "--tenant_id", guestclient_1["tenant_id"]) + + # Check that auth metadata file for auth ID 'guest1', is + # created on authorizing 'guest1' access to the subvolume1. + auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"]) + self.assertIn(auth_metadata_filename, guest_mount.ls("volumes")) + + # Replace 'subvolumes' to 'volumes', old style auth-metadata file + guest_mount.run_shell(['sed', '-i', 's/subvolumes/volumes/g', 'volumes/{0}'.format(auth_metadata_filename)]) + + # Authorize 'guestclient_1' to access the subvolume2. This should transparently update 'volumes' to 'subvolumes' + self._fs_cmd("subvolume", "authorize", self.volname, subvolume2, guestclient_1["auth_id"], + "--group_name", group, "--tenant_id", guestclient_1["tenant_id"]) + + expected_auth_metadata = { + "version": 5, + "compat_version": 6, + "dirty": False, + "tenant_id": "tenant1", + "subvolumes": { + "{0}/{1}".format(group,subvolume1): { + "dirty": False, + "access_level": "rw" + }, + "{0}/{1}".format(group,subvolume2): { + "dirty": False, + "access_level": "rw" + } + } + } + + auth_metadata = self._auth_metadata_get(guest_mount.read_file("volumes/{0}".format(auth_metadata_filename))) + + self.assertGreaterEqual(auth_metadata["version"], expected_auth_metadata["version"]) + del expected_auth_metadata["version"] + del auth_metadata["version"] + self.assertEqual(expected_auth_metadata, auth_metadata) + + # clean up + self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume1, auth_id, "--group_name", group) + self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume2, auth_id, "--group_name", group) + guest_mount.umount_wait() + self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1") + self._fs_cmd("subvolume", "rm", self.volname, subvolume1, "--group_name", group) + self._fs_cmd("subvolume", "rm", self.volname, subvolume2, "--group_name", group) + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + def test_update_old_style_auth_metadata_to_new_during_deauthorize(self): + """ + CephVolumeClient stores the subvolume data in auth metadata file with + 'volumes' key as there was no subvolume namespace. It doesn't makes sense + with mgr/volumes. This test validates the transparent update of 'volumes' + key to 'subvolumes' key in auth metadata file during deauthorize. + """ + + guest_mount = self.mount_b + + subvolume1, subvolume2 = self._generate_random_subvolume_name(2) + group = self._generate_random_group_name() + + auth_id = "guest1" + guestclient_1 = { + "auth_id": auth_id, + "tenant_id": "tenant1", + } + + # create group + self._fs_cmd("subvolumegroup", "create", self.volname, group) + + # create subvolumes in group + self._fs_cmd("subvolume", "create", self.volname, subvolume1, "--group_name", group) + self._fs_cmd("subvolume", "create", self.volname, subvolume2, "--group_name", group) + + # Authorize 'guestclient_1' to access the subvolume1. + self._fs_cmd("subvolume", "authorize", self.volname, subvolume1, guestclient_1["auth_id"], + "--group_name", group, "--tenant_id", guestclient_1["tenant_id"]) + + # Authorize 'guestclient_1' to access the subvolume2. + self._fs_cmd("subvolume", "authorize", self.volname, subvolume2, guestclient_1["auth_id"], + "--group_name", group, "--tenant_id", guestclient_1["tenant_id"]) + + # Check that auth metadata file for auth ID 'guest1', is created. + auth_metadata_filename = "${0}.meta".format(guestclient_1["auth_id"]) + self.assertIn(auth_metadata_filename, guest_mount.ls("volumes")) + + # Replace 'subvolumes' to 'volumes', old style auth-metadata file + guest_mount.run_shell(['sed', '-i', 's/subvolumes/volumes/g', 'volumes/{0}'.format(auth_metadata_filename)]) + + # Deauthorize 'guestclient_1' to access the subvolume2. This should update 'volumes' to subvolumes' + self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume2, auth_id, "--group_name", group) + + expected_auth_metadata = { + "version": 5, + "compat_version": 6, + "dirty": False, + "tenant_id": "tenant1", + "subvolumes": { + "{0}/{1}".format(group,subvolume1): { + "dirty": False, + "access_level": "rw" + } + } + } + + auth_metadata = self._auth_metadata_get(guest_mount.read_file("volumes/{0}".format(auth_metadata_filename))) + + self.assertGreaterEqual(auth_metadata["version"], expected_auth_metadata["version"]) + del expected_auth_metadata["version"] + del auth_metadata["version"] + self.assertEqual(expected_auth_metadata, auth_metadata) + + # clean up + self._fs_cmd("subvolume", "deauthorize", self.volname, subvolume1, auth_id, "--group_name", group) + guest_mount.umount_wait() + self.fs.mon_manager.raw_cluster_cmd("auth", "rm", "client.guest1") + self._fs_cmd("subvolume", "rm", self.volname, subvolume1, "--group_name", group) + self._fs_cmd("subvolume", "rm", self.volname, subvolume2, "--group_name", group) + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + def test_subvolume_evict_client(self): + """ + That a subvolume client can be evicted based on the auth ID + """ + + subvolumes = self._generate_random_subvolume_name(2) + group = self._generate_random_group_name() + + # create group + self._fs_cmd("subvolumegroup", "create", self.volname, group) + + # mounts[0] and mounts[1] would be used as guests to mount the volumes/shares. + for i in range(0, 2): + self.mounts[i].umount_wait() + guest_mounts = (self.mounts[0], self.mounts[1]) + auth_id = "guest" + guestclient_1 = { + "auth_id": auth_id, + "tenant_id": "tenant1", + } + + # Create two subvolumes. Authorize 'guest' auth ID to mount the two + # subvolumes. Mount the two subvolumes. Write data to the volumes. + for i in range(2): + # Create subvolume. + self._fs_cmd("subvolume", "create", self.volname, subvolumes[i], "--group_name", group) + + # authorize guest authID read-write access to subvolume + key = self._fs_cmd("subvolume", "authorize", self.volname, subvolumes[i], guestclient_1["auth_id"], + "--group_name", group, "--tenant_id", guestclient_1["tenant_id"]) + + mount_path = self._fs_cmd("subvolume", "getpath", self.volname, subvolumes[i], + "--group_name", group).rstrip() + # configure credentials for guest client + self._configure_guest_auth(guest_mounts[i], auth_id, key) + + # mount the subvolume, and write to it + guest_mounts[i].mount(mount_path=mount_path) + guest_mounts[i].write_n_mb("data.bin", 1) + + # Evict client, guest_mounts[0], using auth ID 'guest' and has mounted + # one volume. + self._fs_cmd("subvolume", "evict", self.volname, subvolumes[0], auth_id, "--group_name", group) + + # Evicted guest client, guest_mounts[0], should not be able to do + # anymore metadata ops. It should start failing all operations + # when it sees that its own address is in the blocklist. + try: + guest_mounts[0].write_n_mb("rogue.bin", 1) + except CommandFailedError: + pass + else: + raise RuntimeError("post-eviction write should have failed!") + + # The blocklisted guest client should now be unmountable + guest_mounts[0].umount_wait() + + # Guest client, guest_mounts[1], using the same auth ID 'guest', but + # has mounted the other volume, should be able to use its volume + # unaffected. + guest_mounts[1].write_n_mb("data.bin.1", 1) + + # Cleanup. + guest_mounts[1].umount_wait() + for i in range(2): + self._fs_cmd("subvolume", "deauthorize", self.volname, subvolumes[i], auth_id, "--group_name", group) + self._fs_cmd("subvolume", "rm", self.volname, subvolumes[i], "--group_name", group) + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + def test_subvolume_group_create_with_invalid_data_pool_layout(self): + group = self._generate_random_group_name() + data_pool = "invalid_pool" + # create group with invalid data pool layout + try: + self._fs_cmd("subvolumegroup", "create", self.volname, group, "--pool_layout", data_pool) + except CommandFailedError as ce: + if ce.exitstatus != errno.EINVAL: + raise + else: + raise RuntimeError("expected the 'fs subvolumegroup create' command to fail") + + def test_subvolume_group_rm_force(self): + # test removing non-existing subvolume group with --force + group = self._generate_random_group_name() + try: + self._fs_cmd("subvolumegroup", "rm", self.volname, group, "--force") + except CommandFailedError as ce: + raise RuntimeError("expected the 'fs subvolumegroup rm --force' command to succeed") + + def test_subvolume_group_create_with_auto_cleanup_on_fail(self): + group = self._generate_random_group_name() + data_pool = "invalid_pool" + # create group with invalid data pool layout + with self.assertRaises(CommandFailedError): + self._fs_cmd("subvolumegroup", "create", self.volname, group, "--pool_layout", data_pool) + + # check whether group path is cleaned up + try: + self._fs_cmd("subvolumegroup", "getpath", self.volname, group) + except CommandFailedError as ce: + if ce.exitstatus != errno.ENOENT: + raise + else: + raise RuntimeError("expected the 'fs subvolumegroup getpath' command to fail") + + def test_subvolume_create_with_desired_data_pool_layout_in_group(self): + subvol1, subvol2 = self._generate_random_subvolume_name(2) + group = self._generate_random_group_name() + + # create group. this also helps set default pool layout for subvolumes + # created within the group. + self._fs_cmd("subvolumegroup", "create", self.volname, group) + + # create subvolume in group. + self._fs_cmd("subvolume", "create", self.volname, subvol1, "--group_name", group) + subvol1_path = self._get_subvolume_path(self.volname, subvol1, group_name=group) + + default_pool = self.mount_a.getfattr(subvol1_path, "ceph.dir.layout.pool") + new_pool = "new_pool" + self.assertNotEqual(default_pool, new_pool) + + # add data pool + self.fs.add_data_pool(new_pool) + + # create subvolume specifying the new data pool as its pool layout + self._fs_cmd("subvolume", "create", self.volname, subvol2, "--group_name", group, + "--pool_layout", new_pool) + subvol2_path = self._get_subvolume_path(self.volname, subvol2, group_name=group) + + desired_pool = self.mount_a.getfattr(subvol2_path, "ceph.dir.layout.pool") + self.assertEqual(desired_pool, new_pool) + + self._fs_cmd("subvolume", "rm", self.volname, subvol2, group) + self._fs_cmd("subvolume", "rm", self.volname, subvol1, group) + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_group_create_with_desired_mode(self): + group1, group2 = self._generate_random_group_name(2) + # default mode + expected_mode1 = "755" + # desired mode + expected_mode2 = "777" + + # create group + self._fs_cmd("subvolumegroup", "create", self.volname, group1) + self._fs_cmd("subvolumegroup", "create", self.volname, group2, "--mode", "777") + + group1_path = self._get_subvolume_group_path(self.volname, group1) + group2_path = self._get_subvolume_group_path(self.volname, group2) + + # check group's mode + actual_mode1 = self.mount_a.run_shell(['stat', '-c' '%a', group1_path]).stdout.getvalue().strip() + actual_mode2 = self.mount_a.run_shell(['stat', '-c' '%a', group2_path]).stdout.getvalue().strip() + self.assertEqual(actual_mode1, expected_mode1) + self.assertEqual(actual_mode2, expected_mode2) + + self._fs_cmd("subvolumegroup", "rm", self.volname, group1) + self._fs_cmd("subvolumegroup", "rm", self.volname, group2) + + def test_subvolume_group_create_with_desired_uid_gid(self): + """ + That the subvolume group can be created with the desired uid and gid and its uid and gid matches the + expected values. + """ + uid = 1000 + gid = 1000 + + # create subvolume group + subvolgroupname = self._generate_random_group_name() + self._fs_cmd("subvolumegroup", "create", self.volname, subvolgroupname, "--uid", str(uid), "--gid", str(gid)) + + # make sure it exists + subvolgrouppath = self._get_subvolume_group_path(self.volname, subvolgroupname) + self.assertNotEqual(subvolgrouppath, None) + + # verify the uid and gid + suid = int(self.mount_a.run_shell(['stat', '-c' '%u', subvolgrouppath]).stdout.getvalue().strip()) + sgid = int(self.mount_a.run_shell(['stat', '-c' '%g', subvolgrouppath]).stdout.getvalue().strip()) + self.assertEqual(uid, suid) + self.assertEqual(gid, sgid) + + # remove group + self._fs_cmd("subvolumegroup", "rm", self.volname, subvolgroupname) + + def test_subvolume_create_with_desired_mode_in_group(self): + subvol1, subvol2, subvol3 = self._generate_random_subvolume_name(3) + + group = self._generate_random_group_name() + # default mode + expected_mode1 = "755" + # desired mode + expected_mode2 = "777" + + # create group + self._fs_cmd("subvolumegroup", "create", self.volname, group) + + # create subvolume in group + self._fs_cmd("subvolume", "create", self.volname, subvol1, "--group_name", group) + self._fs_cmd("subvolume", "create", self.volname, subvol2, "--group_name", group, "--mode", "777") + # check whether mode 0777 also works + self._fs_cmd("subvolume", "create", self.volname, subvol3, "--group_name", group, "--mode", "0777") + + subvol1_path = self._get_subvolume_path(self.volname, subvol1, group_name=group) + subvol2_path = self._get_subvolume_path(self.volname, subvol2, group_name=group) + subvol3_path = self._get_subvolume_path(self.volname, subvol3, group_name=group) + + # check subvolume's mode + actual_mode1 = self.mount_a.run_shell(['stat', '-c' '%a', subvol1_path]).stdout.getvalue().strip() + actual_mode2 = self.mount_a.run_shell(['stat', '-c' '%a', subvol2_path]).stdout.getvalue().strip() + actual_mode3 = self.mount_a.run_shell(['stat', '-c' '%a', subvol3_path]).stdout.getvalue().strip() + self.assertEqual(actual_mode1, expected_mode1) + self.assertEqual(actual_mode2, expected_mode2) + self.assertEqual(actual_mode3, expected_mode2) + + self._fs_cmd("subvolume", "rm", self.volname, subvol1, group) + self._fs_cmd("subvolume", "rm", self.volname, subvol2, group) + self._fs_cmd("subvolume", "rm", self.volname, subvol3, group) + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_create_with_desired_uid_gid(self): + """ + That the subvolume can be created with the desired uid and gid and its uid and gid matches the + expected values. + """ + uid = 1000 + gid = 1000 + + # create subvolume + subvolname = self._generate_random_subvolume_name() + self._fs_cmd("subvolume", "create", self.volname, subvolname, "--uid", str(uid), "--gid", str(gid)) + + # make sure it exists + subvolpath = self._get_subvolume_path(self.volname, subvolname) + self.assertNotEqual(subvolpath, None) + + # verify the uid and gid + suid = int(self.mount_a.run_shell(['stat', '-c' '%u', subvolpath]).stdout.getvalue().strip()) + sgid = int(self.mount_a.run_shell(['stat', '-c' '%g', subvolpath]).stdout.getvalue().strip()) + self.assertEqual(uid, suid) + self.assertEqual(gid, sgid) + + # remove subvolume + self._fs_cmd("subvolume", "rm", self.volname, subvolname) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_nonexistent_subvolume_group_rm(self): + group = "non_existent_group" + + # try, remove subvolume group + try: + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + except CommandFailedError as ce: + if ce.exitstatus != errno.ENOENT: + raise + else: + raise RuntimeError("expected the 'fs subvolumegroup rm' command to fail") + + def test_default_uid_gid_subvolume_group(self): + group = self._generate_random_group_name() + expected_uid = 0 + expected_gid = 0 + + # create group + self._fs_cmd("subvolumegroup", "create", self.volname, group) + group_path = self._get_subvolume_group_path(self.volname, group) + + # check group's uid and gid + stat = self.mount_a.stat(group_path) + self.assertEqual(stat['st_uid'], expected_uid) + self.assertEqual(stat['st_gid'], expected_gid) + + # remove group + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + def test_subvolume_group_ls(self): + # tests the 'fs subvolumegroup ls' command + + subvolumegroups = [] + + #create subvolumegroups + subvolumegroups = self._generate_random_group_name(3) + for groupname in subvolumegroups: + self._fs_cmd("subvolumegroup", "create", self.volname, groupname) + + subvolumegroupls = json.loads(self._fs_cmd('subvolumegroup', 'ls', self.volname)) + if len(subvolumegroupls) == 0: + raise RuntimeError("Expected the 'fs subvolumegroup ls' command to list the created subvolume groups") + else: + subvolgroupnames = [subvolumegroup['name'] for subvolumegroup in subvolumegroupls] + if collections.Counter(subvolgroupnames) != collections.Counter(subvolumegroups): + raise RuntimeError("Error creating or listing subvolume groups") + + def test_subvolume_group_ls_for_nonexistent_volume(self): + # tests the 'fs subvolumegroup ls' command when /volume doesn't exist + # prerequisite: we expect that the test volume is created and a subvolumegroup is NOT created + + # list subvolume groups + subvolumegroupls = json.loads(self._fs_cmd('subvolumegroup', 'ls', self.volname)) + if len(subvolumegroupls) > 0: + raise RuntimeError("Expected the 'fs subvolumegroup ls' command to output an empty list") + + ### snapshot operations + + def test_subvolume_snapshot_create_and_rm(self): + subvolume = self._generate_random_subvolume_name() + snapshot = self._generate_random_snapshot_name() + + # create subvolume + self._fs_cmd("subvolume", "create", self.volname, subvolume) + + # snapshot subvolume + self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) + + # remove snapshot + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) + + # remove subvolume + self._fs_cmd("subvolume", "rm", self.volname, subvolume) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_snapshot_info(self): + + """ + tests the 'fs subvolume snapshot info' command + """ + + snap_md = ["created_at", "data_pool", "has_pending_clones", "size"] + + subvolume = self._generate_random_subvolume_name() + snapshot, snap_missing = self._generate_random_snapshot_name(2) + + # create subvolume + self._fs_cmd("subvolume", "create", self.volname, subvolume) + + # do some IO + self._do_subvolume_io(subvolume, number_of_files=1) + + # snapshot subvolume + self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) + + snap_info = json.loads(self._get_subvolume_snapshot_info(self.volname, subvolume, snapshot)) + for md in snap_md: + self.assertIn(md, snap_info, "'{0}' key not present in metadata of snapshot".format(md)) + self.assertEqual(snap_info["has_pending_clones"], "no") + + # snapshot info for non-existent snapshot + try: + self._get_subvolume_snapshot_info(self.volname, subvolume, snap_missing) + except CommandFailedError as ce: + self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on snapshot info of non-existent snapshot") + else: + self.fail("expected snapshot info of non-existent snapshot to fail") + + # remove snapshot + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) + + # remove subvolume + self._fs_cmd("subvolume", "rm", self.volname, subvolume) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_snapshot_create_idempotence(self): + subvolume = self._generate_random_subvolume_name() + snapshot = self._generate_random_snapshot_name() + + # create subvolume + self._fs_cmd("subvolume", "create", self.volname, subvolume) + + # snapshot subvolume + self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) + + # try creating w/ same subvolume snapshot name -- should be idempotent + self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) + + # remove snapshot + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) + + # remove subvolume + self._fs_cmd("subvolume", "rm", self.volname, subvolume) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_nonexistent_subvolume_snapshot_rm(self): + subvolume = self._generate_random_subvolume_name() + snapshot = self._generate_random_snapshot_name() + + # create subvolume + self._fs_cmd("subvolume", "create", self.volname, subvolume) + + # snapshot subvolume + self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) + + # remove snapshot + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) + + # remove snapshot again + try: + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) + except CommandFailedError as ce: + if ce.exitstatus != errno.ENOENT: + raise + else: + raise RuntimeError("expected the 'fs subvolume snapshot rm' command to fail") + + # remove subvolume + self._fs_cmd("subvolume", "rm", self.volname, subvolume) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_snapshot_rm_force(self): + # test removing non existing subvolume snapshot with --force + subvolume = self._generate_random_subvolume_name() + snapshot = self._generate_random_snapshot_name() + + # remove snapshot + try: + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, "--force") + except CommandFailedError as ce: + raise RuntimeError("expected the 'fs subvolume snapshot rm --force' command to succeed") + + def test_subvolume_snapshot_in_group(self): + subvolume = self._generate_random_subvolume_name() + group = self._generate_random_group_name() + snapshot = self._generate_random_snapshot_name() + + # create group + self._fs_cmd("subvolumegroup", "create", self.volname, group) + + # create subvolume in group + self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group) + + # snapshot subvolume in group + self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot, group) + + # remove snapshot + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, group) + + # remove subvolume + self._fs_cmd("subvolume", "rm", self.volname, subvolume, group) + + # verify trash dir is clean + self._wait_for_trash_empty() + + # remove group + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + def test_subvolume_snapshot_ls(self): + # tests the 'fs subvolume snapshot ls' command + + snapshots = [] + + # create subvolume + subvolume = self._generate_random_subvolume_name() + self._fs_cmd("subvolume", "create", self.volname, subvolume) + + # create subvolume snapshots + snapshots = self._generate_random_snapshot_name(3) + for snapshot in snapshots: + self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) + + subvolsnapshotls = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, subvolume)) + if len(subvolsnapshotls) == 0: + self.fail("Expected the 'fs subvolume snapshot ls' command to list the created subvolume snapshots") + else: + snapshotnames = [snapshot['name'] for snapshot in subvolsnapshotls] + if collections.Counter(snapshotnames) != collections.Counter(snapshots): + self.fail("Error creating or listing subvolume snapshots") + + # remove snapshot + for snapshot in snapshots: + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) + + # remove subvolume + self._fs_cmd("subvolume", "rm", self.volname, subvolume) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_group_snapshot_unsupported_status(self): + group = self._generate_random_group_name() + snapshot = self._generate_random_snapshot_name() + + # create group + self._fs_cmd("subvolumegroup", "create", self.volname, group) + + # snapshot group + try: + self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot) + except CommandFailedError as ce: + self.assertEqual(ce.exitstatus, errno.ENOSYS, "invalid error code on subvolumegroup snapshot create") + else: + self.fail("expected subvolumegroup snapshot create command to fail") + + # remove group + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + @unittest.skip("skipping subvolumegroup snapshot tests") + def test_subvolume_group_snapshot_create_and_rm(self): + subvolume = self._generate_random_subvolume_name() + group = self._generate_random_group_name() + snapshot = self._generate_random_snapshot_name() + + # create group + self._fs_cmd("subvolumegroup", "create", self.volname, group) + + # create subvolume in group + self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group) + + # snapshot group + self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot) + + # remove snapshot + self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot) + + # remove subvolume + self._fs_cmd("subvolume", "rm", self.volname, subvolume, group) + + # verify trash dir is clean + self._wait_for_trash_empty() + + # remove group + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + @unittest.skip("skipping subvolumegroup snapshot tests") + def test_subvolume_group_snapshot_idempotence(self): + subvolume = self._generate_random_subvolume_name() + group = self._generate_random_group_name() + snapshot = self._generate_random_snapshot_name() + + # create group + self._fs_cmd("subvolumegroup", "create", self.volname, group) + + # create subvolume in group + self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group) + + # snapshot group + self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot) + + # try creating snapshot w/ same snapshot name -- shoule be idempotent + self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot) + + # remove snapshot + self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot) + + # remove subvolume + self._fs_cmd("subvolume", "rm", self.volname, subvolume, group) + + # verify trash dir is clean + self._wait_for_trash_empty() + + # remove group + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + @unittest.skip("skipping subvolumegroup snapshot tests") + def test_nonexistent_subvolume_group_snapshot_rm(self): + subvolume = self._generate_random_subvolume_name() + group = self._generate_random_group_name() + snapshot = self._generate_random_snapshot_name() + + # create group + self._fs_cmd("subvolumegroup", "create", self.volname, group) + + # create subvolume in group + self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group) + + # snapshot group + self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot) + + # remove snapshot + self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot) + + # remove snapshot + try: + self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot) + except CommandFailedError as ce: + if ce.exitstatus != errno.ENOENT: + raise + else: + raise RuntimeError("expected the 'fs subvolumegroup snapshot rm' command to fail") + + # remove subvolume + self._fs_cmd("subvolume", "rm", self.volname, subvolume, group) + + # verify trash dir is clean + self._wait_for_trash_empty() + + # remove group + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + @unittest.skip("skipping subvolumegroup snapshot tests") + def test_subvolume_group_snapshot_rm_force(self): + # test removing non-existing subvolume group snapshot with --force + group = self._generate_random_group_name() + snapshot = self._generate_random_snapshot_name() + # remove snapshot + try: + self._fs_cmd("subvolumegroup", "snapshot", "rm", self.volname, group, snapshot, "--force") + except CommandFailedError as ce: + raise RuntimeError("expected the 'fs subvolumegroup snapshot rm --force' command to succeed") + + @unittest.skip("skipping subvolumegroup snapshot tests") + def test_subvolume_group_snapshot_ls(self): + # tests the 'fs subvolumegroup snapshot ls' command + + snapshots = [] + + # create group + group = self._generate_random_group_name() + self._fs_cmd("subvolumegroup", "create", self.volname, group) + + # create subvolumegroup snapshots + snapshots = self._generate_random_snapshot_name(3) + for snapshot in snapshots: + self._fs_cmd("subvolumegroup", "snapshot", "create", self.volname, group, snapshot) + + subvolgrpsnapshotls = json.loads(self._fs_cmd('subvolumegroup', 'snapshot', 'ls', self.volname, group)) + if len(subvolgrpsnapshotls) == 0: + raise RuntimeError("Expected the 'fs subvolumegroup snapshot ls' command to list the created subvolume group snapshots") + else: + snapshotnames = [snapshot['name'] for snapshot in subvolgrpsnapshotls] + if collections.Counter(snapshotnames) != collections.Counter(snapshots): + raise RuntimeError("Error creating or listing subvolume group snapshots") + + def test_async_subvolume_rm(self): + subvolumes = self._generate_random_subvolume_name(100) + + # create subvolumes + for subvolume in subvolumes: + self._fs_cmd("subvolume", "create", self.volname, subvolume) + self._do_subvolume_io(subvolume, number_of_files=10) + + self.mount_a.umount_wait() + + # remove subvolumes + for subvolume in subvolumes: + self._fs_cmd("subvolume", "rm", self.volname, subvolume) + + self.mount_a.mount() + + # verify trash dir is clean + self._wait_for_trash_empty(timeout=300) + + def test_subvolume_inherited_snapshot_ls(self): + # tests the scenario where 'fs subvolume snapshot ls' command + # should not list inherited snapshots created as part of snapshot + # at ancestral level + + snapshots = [] + subvolume = self._generate_random_subvolume_name() + group = self._generate_random_group_name() + snap_count = 3 + + # create group + self._fs_cmd("subvolumegroup", "create", self.volname, group) + + # create subvolume in group + self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group) + + # create subvolume snapshots + snapshots = self._generate_random_snapshot_name(snap_count) + for snapshot in snapshots: + self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot, group) + + # Create snapshot at ancestral level + ancestral_snappath1 = os.path.join(".", "volumes", group, ".snap", "ancestral_snap_1") + ancestral_snappath2 = os.path.join(".", "volumes", group, ".snap", "ancestral_snap_2") + self.mount_a.run_shell(['mkdir', '-p', ancestral_snappath1, ancestral_snappath2]) + + subvolsnapshotls = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, subvolume, group)) + self.assertEqual(len(subvolsnapshotls), snap_count) + + # remove ancestral snapshots + self.mount_a.run_shell(['rmdir', ancestral_snappath1, ancestral_snappath2]) + + # remove snapshot + for snapshot in snapshots: + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, group) + + # remove subvolume + self._fs_cmd("subvolume", "rm", self.volname, subvolume, group) + + # verify trash dir is clean + self._wait_for_trash_empty() + + # remove group + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + def test_subvolume_inherited_snapshot_info(self): + """ + tests the scenario where 'fs subvolume snapshot info' command + should fail for inherited snapshots created as part of snapshot + at ancestral level + """ + + subvolume = self._generate_random_subvolume_name() + group = self._generate_random_group_name() + + # create group + self._fs_cmd("subvolumegroup", "create", self.volname, group) + + # create subvolume in group + self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group) + + # Create snapshot at ancestral level + ancestral_snap_name = "ancestral_snap_1" + ancestral_snappath1 = os.path.join(".", "volumes", group, ".snap", ancestral_snap_name) + self.mount_a.run_shell(['mkdir', '-p', ancestral_snappath1]) + + # Validate existence of inherited snapshot + group_path = os.path.join(".", "volumes", group) + inode_number_group_dir = int(self.mount_a.run_shell(['stat', '-c' '%i', group_path]).stdout.getvalue().strip()) + inherited_snap = "_{0}_{1}".format(ancestral_snap_name, inode_number_group_dir) + inherited_snappath = os.path.join(".", "volumes", group, subvolume,".snap", inherited_snap) + self.mount_a.run_shell(['ls', inherited_snappath]) + + # snapshot info on inherited snapshot + try: + self._get_subvolume_snapshot_info(self.volname, subvolume, inherited_snap, group) + except CommandFailedError as ce: + self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on snapshot info of inherited snapshot") + else: + self.fail("expected snapshot info of inherited snapshot to fail") + + # remove ancestral snapshots + self.mount_a.run_shell(['rmdir', ancestral_snappath1]) + + # remove subvolume + self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--group_name", group) + + # verify trash dir is clean + self._wait_for_trash_empty() + + # remove group + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + def test_subvolume_inherited_snapshot_rm(self): + """ + tests the scenario where 'fs subvolume snapshot rm' command + should fail for inherited snapshots created as part of snapshot + at ancestral level + """ + + subvolume = self._generate_random_subvolume_name() + group = self._generate_random_group_name() + + # create group + self._fs_cmd("subvolumegroup", "create", self.volname, group) + + # create subvolume in group + self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group) + + # Create snapshot at ancestral level + ancestral_snap_name = "ancestral_snap_1" + ancestral_snappath1 = os.path.join(".", "volumes", group, ".snap", ancestral_snap_name) + self.mount_a.run_shell(['mkdir', '-p', ancestral_snappath1]) + + # Validate existence of inherited snap + group_path = os.path.join(".", "volumes", group) + inode_number_group_dir = int(self.mount_a.run_shell(['stat', '-c' '%i', group_path]).stdout.getvalue().strip()) + inherited_snap = "_{0}_{1}".format(ancestral_snap_name, inode_number_group_dir) + inherited_snappath = os.path.join(".", "volumes", group, subvolume,".snap", inherited_snap) + self.mount_a.run_shell(['ls', inherited_snappath]) + + # inherited snapshot should not be deletable + try: + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, inherited_snap, "--group_name", group) + except CommandFailedError as ce: + self.assertEqual(ce.exitstatus, errno.EINVAL, msg="invalid error code when removing inherited snapshot") + else: + self.fail("expected removing inheirted snapshot to fail") + + # remove ancestral snapshots + self.mount_a.run_shell(['rmdir', ancestral_snappath1]) + + # remove subvolume + self._fs_cmd("subvolume", "rm", self.volname, subvolume, group) + + # verify trash dir is clean + self._wait_for_trash_empty() + + # remove group + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + def test_subvolume_subvolumegroup_snapshot_name_conflict(self): + """ + tests the scenario where creation of subvolume snapshot name + with same name as it's subvolumegroup snapshot name. This should + fail. + """ + + subvolume = self._generate_random_subvolume_name() + group = self._generate_random_group_name() + group_snapshot = self._generate_random_snapshot_name() + + # create group + self._fs_cmd("subvolumegroup", "create", self.volname, group) + + # create subvolume in group + self._fs_cmd("subvolume", "create", self.volname, subvolume, "--group_name", group) + + # Create subvolumegroup snapshot + group_snapshot_path = os.path.join(".", "volumes", group, ".snap", group_snapshot) + self.mount_a.run_shell(['mkdir', '-p', group_snapshot_path]) + + # Validate existence of subvolumegroup snapshot + self.mount_a.run_shell(['ls', group_snapshot_path]) + + # Creation of subvolume snapshot with it's subvolumegroup snapshot name should fail + try: + self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, group_snapshot, "--group_name", group) + except CommandFailedError as ce: + self.assertEqual(ce.exitstatus, errno.EINVAL, msg="invalid error code when creating subvolume snapshot with same name as subvolume group snapshot") + else: + self.fail("expected subvolume snapshot creation with same name as subvolumegroup snapshot to fail") + + # remove subvolumegroup snapshot + self.mount_a.run_shell(['rmdir', group_snapshot_path]) + + # remove subvolume + self._fs_cmd("subvolume", "rm", self.volname, subvolume, group) + + # verify trash dir is clean + self._wait_for_trash_empty() + + # remove group + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + def test_subvolume_upgrade_legacy_to_v1(self): + """ + poor man's upgrade test -- rather than going through a full upgrade cycle, + emulate subvolumes by going through the wormhole and verify if they are + accessible. + further ensure that a legacy volume is not updated to v2. + """ + subvolume1, subvolume2 = self._generate_random_subvolume_name(2) + group = self._generate_random_group_name() + + # emulate a old-fashioned subvolume -- one in the default group and + # the other in a custom group + createpath1 = os.path.join(".", "volumes", "_nogroup", subvolume1) + self.mount_a.run_shell(['mkdir', '-p', createpath1]) + + # create group + createpath2 = os.path.join(".", "volumes", group, subvolume2) + self.mount_a.run_shell(['mkdir', '-p', createpath2]) + + # this would auto-upgrade on access without anyone noticing + subvolpath1 = self._fs_cmd("subvolume", "getpath", self.volname, subvolume1) + self.assertNotEqual(subvolpath1, None) + subvolpath1 = subvolpath1.rstrip() # remove "/" prefix and any trailing newline + + subvolpath2 = self._fs_cmd("subvolume", "getpath", self.volname, subvolume2, group) + self.assertNotEqual(subvolpath2, None) + subvolpath2 = subvolpath2.rstrip() # remove "/" prefix and any trailing newline + + # and... the subvolume path returned should be what we created behind the scene + self.assertEqual(createpath1[1:], subvolpath1) + self.assertEqual(createpath2[1:], subvolpath2) + + # ensure metadata file is in legacy location, with required version v1 + self._assert_meta_location_and_version(self.volname, subvolume1, version=1, legacy=True) + self._assert_meta_location_and_version(self.volname, subvolume2, subvol_group=group, version=1, legacy=True) + + # remove subvolume + self._fs_cmd("subvolume", "rm", self.volname, subvolume1) + self._fs_cmd("subvolume", "rm", self.volname, subvolume2, group) + + # verify trash dir is clean + self._wait_for_trash_empty() + + # remove group + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + def test_subvolume_no_upgrade_v1_sanity(self): + """ + poor man's upgrade test -- theme continues... + + This test is to ensure v1 subvolumes are retained as is, due to a snapshot being present, and runs through + a series of operations on the v1 subvolume to ensure they work as expected. + """ + subvol_md = ["atime", "bytes_pcent", "bytes_quota", "bytes_used", "created_at", "ctime", + "data_pool", "gid", "mode", "mon_addrs", "mtime", "path", "pool_namespace", + "type", "uid", "features", "state"] + snap_md = ["created_at", "data_pool", "has_pending_clones", "size"] + + subvolume = self._generate_random_subvolume_name() + snapshot = self._generate_random_snapshot_name() + clone1, clone2 = self._generate_random_clone_name(2) + mode = "777" + uid = "1000" + gid = "1000" + + # emulate a v1 subvolume -- in the default group + subvolume_path = self._create_v1_subvolume(subvolume) + + # getpath + subvolpath = self._get_subvolume_path(self.volname, subvolume) + self.assertEqual(subvolpath, subvolume_path) + + # ls + subvolumes = json.loads(self._fs_cmd('subvolume', 'ls', self.volname)) + self.assertEqual(len(subvolumes), 1, "subvolume ls count mismatch, expected '1', found {0}".format(len(subvolumes))) + self.assertEqual(subvolumes[0]['name'], subvolume, + "subvolume name mismatch in ls output, expected '{0}', found '{1}'".format(subvolume, subvolumes[0]['name'])) + + # info + subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume)) + for md in subvol_md: + self.assertIn(md, subvol_info, "'{0}' key not present in metadata of subvolume".format(md)) + + self.assertEqual(subvol_info["state"], "complete", + msg="expected state to be 'complete', found '{0}".format(subvol_info["state"])) + self.assertEqual(len(subvol_info["features"]), 2, + msg="expected 1 feature, found '{0}' ({1})".format(len(subvol_info["features"]), subvol_info["features"])) + for feature in ['snapshot-clone', 'snapshot-autoprotect']: + self.assertIn(feature, subvol_info["features"], msg="expected feature '{0}' in subvolume".format(feature)) + + # resize + nsize = self.DEFAULT_FILE_SIZE*1024*1024*10 + self._fs_cmd("subvolume", "resize", self.volname, subvolume, str(nsize)) + subvol_info = json.loads(self._get_subvolume_info(self.volname, subvolume)) + for md in subvol_md: + self.assertIn(md, subvol_info, "'{0}' key not present in metadata of subvolume".format(md)) + self.assertEqual(subvol_info["bytes_quota"], nsize, "bytes_quota should be set to '{0}'".format(nsize)) + + # create (idempotent) (change some attrs, to ensure attrs are preserved from the snapshot on clone) + self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode", mode, "--uid", uid, "--gid", gid) + + # do some IO + self._do_subvolume_io(subvolume, number_of_files=8) + + # snap-create + self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) + + # clone + self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1) + + # check clone status + self._wait_for_clone_to_complete(clone1) + + # ensure clone is v2 + self._assert_meta_location_and_version(self.volname, clone1, version=2) + + # verify clone + self._verify_clone(subvolume, snapshot, clone1, source_version=1) + + # clone (older snapshot) + self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, 'fake', clone2) + + # check clone status + self._wait_for_clone_to_complete(clone2) + + # ensure clone is v2 + self._assert_meta_location_and_version(self.volname, clone2, version=2) + + # verify clone + # TODO: rentries will mismatch till this is fixed https://tracker.ceph.com/issues/46747 + #self._verify_clone(subvolume, 'fake', clone2, source_version=1) + + # snap-info + snap_info = json.loads(self._get_subvolume_snapshot_info(self.volname, subvolume, snapshot)) + for md in snap_md: + self.assertIn(md, snap_info, "'{0}' key not present in metadata of snapshot".format(md)) + self.assertEqual(snap_info["has_pending_clones"], "no") + + # snap-ls + subvol_snapshots = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, subvolume)) + self.assertEqual(len(subvol_snapshots), 2, "subvolume ls count mismatch, expected 2', found {0}".format(len(subvol_snapshots))) + snapshotnames = [snapshot['name'] for snapshot in subvol_snapshots] + for name in [snapshot, 'fake']: + self.assertIn(name, snapshotnames, msg="expected snapshot '{0}' in subvolume snapshot ls".format(name)) + + # snap-rm + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, "fake") + + # ensure volume is still at version 1 + self._assert_meta_location_and_version(self.volname, subvolume, version=1) + + # rm + self._fs_cmd("subvolume", "rm", self.volname, subvolume) + self._fs_cmd("subvolume", "rm", self.volname, clone1) + self._fs_cmd("subvolume", "rm", self.volname, clone2) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_no_upgrade_v1_to_v2(self): + """ + poor man's upgrade test -- theme continues... + ensure v1 to v2 upgrades are not done automatically due to various states of v1 + """ + subvolume1, subvolume2, subvolume3 = self._generate_random_subvolume_name(3) + group = self._generate_random_group_name() + + # emulate a v1 subvolume -- in the default group + subvol1_path = self._create_v1_subvolume(subvolume1) + + # emulate a v1 subvolume -- in a custom group + subvol2_path = self._create_v1_subvolume(subvolume2, subvol_group=group) + + # emulate a v1 subvolume -- in a clone pending state + self._create_v1_subvolume(subvolume3, subvol_type='clone', has_snapshot=False, state='pending') + + # this would attempt auto-upgrade on access, but fail to do so as snapshots exist + subvolpath1 = self._get_subvolume_path(self.volname, subvolume1) + self.assertEqual(subvolpath1, subvol1_path) + + subvolpath2 = self._get_subvolume_path(self.volname, subvolume2, group_name=group) + self.assertEqual(subvolpath2, subvol2_path) + + # this would attempt auto-upgrade on access, but fail to do so as volume is not complete + # use clone status, as only certain operations are allowed in pending state + status = json.loads(self._fs_cmd("clone", "status", self.volname, subvolume3)) + self.assertEqual(status["status"]["state"], "pending") + + # remove snapshot + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume1, "fake") + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume2, "fake", group) + + # ensure metadata file is in v1 location, with version retained as v1 + self._assert_meta_location_and_version(self.volname, subvolume1, version=1) + self._assert_meta_location_and_version(self.volname, subvolume2, subvol_group=group, version=1) + + # remove subvolume + self._fs_cmd("subvolume", "rm", self.volname, subvolume1) + self._fs_cmd("subvolume", "rm", self.volname, subvolume2, group) + try: + self._fs_cmd("subvolume", "rm", self.volname, subvolume3) + except CommandFailedError as ce: + self.assertEqual(ce.exitstatus, errno.EAGAIN, "invalid error code on rm of subvolume undergoing clone") + else: + self.fail("expected rm of subvolume undergoing clone to fail") + + # ensure metadata file is in v1 location, with version retained as v1 + self._assert_meta_location_and_version(self.volname, subvolume3, version=1) + self._fs_cmd("subvolume", "rm", self.volname, subvolume3, "--force") + + # verify list subvolumes returns an empty list + subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname)) + self.assertEqual(len(subvolumels), 0) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_upgrade_v1_to_v2(self): + """ + poor man's upgrade test -- theme continues... + ensure v1 to v2 upgrades work + """ + subvolume1, subvolume2 = self._generate_random_subvolume_name(2) + group = self._generate_random_group_name() + + # emulate a v1 subvolume -- in the default group + subvol1_path = self._create_v1_subvolume(subvolume1, has_snapshot=False) + + # emulate a v1 subvolume -- in a custom group + subvol2_path = self._create_v1_subvolume(subvolume2, subvol_group=group, has_snapshot=False) + + # this would attempt auto-upgrade on access + subvolpath1 = self._get_subvolume_path(self.volname, subvolume1) + self.assertEqual(subvolpath1, subvol1_path) + + subvolpath2 = self._get_subvolume_path(self.volname, subvolume2, group_name=group) + self.assertEqual(subvolpath2, subvol2_path) + + # ensure metadata file is in v2 location, with version retained as v2 + self._assert_meta_location_and_version(self.volname, subvolume1, version=2) + self._assert_meta_location_and_version(self.volname, subvolume2, subvol_group=group, version=2) + + # remove subvolume + self._fs_cmd("subvolume", "rm", self.volname, subvolume1) + self._fs_cmd("subvolume", "rm", self.volname, subvolume2, group) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_rm_with_snapshots(self): + subvolume = self._generate_random_subvolume_name() + snapshot = self._generate_random_snapshot_name() + + # create subvolume + self._fs_cmd("subvolume", "create", self.volname, subvolume) + + # snapshot subvolume + self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) + + # remove subvolume -- should fail with ENOTEMPTY since it has snapshots + try: + self._fs_cmd("subvolume", "rm", self.volname, subvolume) + except CommandFailedError as ce: + if ce.exitstatus != errno.ENOTEMPTY: + raise RuntimeError("invalid error code returned when deleting subvolume with snapshots") + else: + raise RuntimeError("expected subvolume deletion to fail") + + # remove snapshot + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) + + # remove subvolume + self._fs_cmd("subvolume", "rm", self.volname, subvolume) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_retain_snapshot_without_snapshots(self): + """ + ensure retain snapshots based delete of a subvolume with no snapshots, deletes the subbvolume + """ + subvolume = self._generate_random_subvolume_name() + + # create subvolume + self._fs_cmd("subvolume", "create", self.volname, subvolume) + + # remove with snapshot retention (should remove volume, no snapshots to retain) + self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots") + + # verify list subvolumes returns an empty list + subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname)) + self.assertEqual(len(subvolumels), 0) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_retain_snapshot_with_snapshots(self): + """ + ensure retain snapshots based delete of a subvolume with snapshots retains the subvolume + also test allowed and dis-allowed operations on a retained subvolume + """ + snap_md = ["created_at", "data_pool", "has_pending_clones", "size"] + + subvolume = self._generate_random_subvolume_name() + snapshot = self._generate_random_snapshot_name() + + # create subvolume + self._fs_cmd("subvolume", "create", self.volname, subvolume) + + # snapshot subvolume + self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) + + # remove subvolume -- should fail with ENOTEMPTY since it has snapshots + try: + self._fs_cmd("subvolume", "rm", self.volname, subvolume) + except CommandFailedError as ce: + self.assertEqual(ce.exitstatus, errno.ENOTEMPTY, "invalid error code on rm of retained subvolume with snapshots") + else: + self.fail("expected rm of subvolume with retained snapshots to fail") + + # remove with snapshot retention + self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots") + + # fetch info + subvol_info = json.loads(self._fs_cmd("subvolume", "info", self.volname, subvolume)) + self.assertEqual(subvol_info["state"], "snapshot-retained", + msg="expected state to be 'snapshot-retained', found '{0}".format(subvol_info["state"])) + + ## test allowed ops in retained state + # ls + subvolumes = json.loads(self._fs_cmd('subvolume', 'ls', self.volname)) + self.assertEqual(len(subvolumes), 1, "subvolume ls count mismatch, expected '1', found {0}".format(len(subvolumes))) + self.assertEqual(subvolumes[0]['name'], subvolume, + "subvolume name mismatch in ls output, expected '{0}', found '{1}'".format(subvolume, subvolumes[0]['name'])) + + # snapshot info + snap_info = json.loads(self._get_subvolume_snapshot_info(self.volname, subvolume, snapshot)) + for md in snap_md: + self.assertIn(md, snap_info, "'{0}' key not present in metadata of snapshot".format(md)) + self.assertEqual(snap_info["has_pending_clones"], "no") + + # rm --force (allowed but should fail) + try: + self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--force") + except CommandFailedError as ce: + self.assertEqual(ce.exitstatus, errno.ENOTEMPTY, "invalid error code on rm of subvolume with retained snapshots") + else: + self.fail("expected rm of subvolume with retained snapshots to fail") + + # rm (allowed but should fail) + try: + self._fs_cmd("subvolume", "rm", self.volname, subvolume) + except CommandFailedError as ce: + self.assertEqual(ce.exitstatus, errno.ENOTEMPTY, "invalid error code on rm of subvolume with retained snapshots") + else: + self.fail("expected rm of subvolume with retained snapshots to fail") + + ## test disallowed ops + # getpath + try: + self._fs_cmd("subvolume", "getpath", self.volname, subvolume) + except CommandFailedError as ce: + self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on getpath of subvolume with retained snapshots") + else: + self.fail("expected getpath of subvolume with retained snapshots to fail") + + # resize + nsize = self.DEFAULT_FILE_SIZE*1024*1024 + try: + self._fs_cmd("subvolume", "resize", self.volname, subvolume, str(nsize)) + except CommandFailedError as ce: + self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on resize of subvolume with retained snapshots") + else: + self.fail("expected resize of subvolume with retained snapshots to fail") + + # snap-create + try: + self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, "fail") + except CommandFailedError as ce: + self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on snapshot create of subvolume with retained snapshots") + else: + self.fail("expected snapshot create of subvolume with retained snapshots to fail") + + # remove snapshot (should remove volume) + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) + + # verify list subvolumes returns an empty list + subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname)) + self.assertEqual(len(subvolumels), 0) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_retain_snapshot_invalid_recreate(self): + """ + ensure retained subvolume recreate does not leave any incarnations in the subvolume and trash + """ + subvolume = self._generate_random_subvolume_name() + snapshot = self._generate_random_snapshot_name() + + # create subvolume + self._fs_cmd("subvolume", "create", self.volname, subvolume) + + # snapshot subvolume + self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) + + # remove with snapshot retention + self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots") + + # recreate subvolume with an invalid pool + data_pool = "invalid_pool" + try: + self._fs_cmd("subvolume", "create", self.volname, subvolume, "--pool_layout", data_pool) + except CommandFailedError as ce: + self.assertEqual(ce.exitstatus, errno.EINVAL, "invalid error code on recreate of subvolume with invalid poolname") + else: + self.fail("expected recreate of subvolume with invalid poolname to fail") + + # fetch info + subvol_info = json.loads(self._fs_cmd("subvolume", "info", self.volname, subvolume)) + self.assertEqual(subvol_info["state"], "snapshot-retained", + msg="expected state to be 'snapshot-retained', found '{0}".format(subvol_info["state"])) + + # getpath + try: + self._fs_cmd("subvolume", "getpath", self.volname, subvolume) + except CommandFailedError as ce: + self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on getpath of subvolume with retained snapshots") + else: + self.fail("expected getpath of subvolume with retained snapshots to fail") + + # remove snapshot (should remove volume) + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_retain_snapshot_trash_busy_recreate(self): + """ + ensure retained subvolume recreate fails if its trash is not yet purged + """ + subvolume = self._generate_random_subvolume_name() + snapshot = self._generate_random_snapshot_name() + + # create subvolume + self._fs_cmd("subvolume", "create", self.volname, subvolume) + + # snapshot subvolume + self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) + + # remove with snapshot retention + self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots") + + # fake a trash entry + self._update_fake_trash(subvolume) + + # recreate subvolume + try: + self._fs_cmd("subvolume", "create", self.volname, subvolume) + except CommandFailedError as ce: + self.assertEqual(ce.exitstatus, errno.EAGAIN, "invalid error code on recreate of subvolume with purge pending") + else: + self.fail("expected recreate of subvolume with purge pending to fail") + + # clear fake trash entry + self._update_fake_trash(subvolume, create=False) + + # recreate subvolume + self._fs_cmd("subvolume", "create", self.volname, subvolume) + + # remove snapshot + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) + + # remove subvolume + self._fs_cmd("subvolume", "rm", self.volname, subvolume) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_retain_snapshot_trash_busy_recreate_clone(self): + """ + ensure retained clone recreate fails if its trash is not yet purged + """ + subvolume = self._generate_random_subvolume_name() + snapshot = self._generate_random_snapshot_name() + clone = self._generate_random_clone_name() + + # create subvolume + self._fs_cmd("subvolume", "create", self.volname, subvolume) + + # snapshot subvolume + self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) + + # clone subvolume snapshot + self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) + + # check clone status + self._wait_for_clone_to_complete(clone) + + # snapshot clone + self._fs_cmd("subvolume", "snapshot", "create", self.volname, clone, snapshot) + + # remove clone with snapshot retention + self._fs_cmd("subvolume", "rm", self.volname, clone, "--retain-snapshots") + + # fake a trash entry + self._update_fake_trash(clone) + + # clone subvolume snapshot (recreate) + try: + self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) + except CommandFailedError as ce: + self.assertEqual(ce.exitstatus, errno.EAGAIN, "invalid error code on recreate of clone with purge pending") + else: + self.fail("expected recreate of clone with purge pending to fail") + + # clear fake trash entry + self._update_fake_trash(clone, create=False) + + # recreate subvolume + self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) + + # check clone status + self._wait_for_clone_to_complete(clone) + + # remove snapshot + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, clone, snapshot) + + # remove subvolume + self._fs_cmd("subvolume", "rm", self.volname, subvolume) + self._fs_cmd("subvolume", "rm", self.volname, clone) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_retain_snapshot_recreate_subvolume(self): + """ + ensure a retained subvolume can be recreated and further snapshotted + """ + snap_md = ["created_at", "data_pool", "has_pending_clones", "size"] + + subvolume = self._generate_random_subvolume_name() + snapshot1, snapshot2 = self._generate_random_snapshot_name(2) + + # create subvolume + self._fs_cmd("subvolume", "create", self.volname, subvolume) + + # snapshot subvolume + self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot1) + + # remove with snapshot retention + self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots") + + # fetch info + subvol_info = json.loads(self._fs_cmd("subvolume", "info", self.volname, subvolume)) + self.assertEqual(subvol_info["state"], "snapshot-retained", + msg="expected state to be 'snapshot-retained', found '{0}".format(subvol_info["state"])) + + # recreate retained subvolume + self._fs_cmd("subvolume", "create", self.volname, subvolume) + + # fetch info + subvol_info = json.loads(self._fs_cmd("subvolume", "info", self.volname, subvolume)) + self.assertEqual(subvol_info["state"], "complete", + msg="expected state to be 'snapshot-retained', found '{0}".format(subvol_info["state"])) + + # snapshot info (older snapshot) + snap_info = json.loads(self._get_subvolume_snapshot_info(self.volname, subvolume, snapshot1)) + for md in snap_md: + self.assertIn(md, snap_info, "'{0}' key not present in metadata of snapshot".format(md)) + self.assertEqual(snap_info["has_pending_clones"], "no") + + # snap-create (new snapshot) + self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot2) + + # remove with retain snapshots + self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots") + + # list snapshots + subvolsnapshotls = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, subvolume)) + self.assertEqual(len(subvolsnapshotls), 2, "Expected the 'fs subvolume snapshot ls' command to list the" + " created subvolume snapshots") + snapshotnames = [snapshot['name'] for snapshot in subvolsnapshotls] + for snap in [snapshot1, snapshot2]: + self.assertIn(snap, snapshotnames, "Missing snapshot '{0}' in snapshot list".format(snap)) + + # remove snapshots (should remove volume) + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot1) + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot2) + + # verify list subvolumes returns an empty list + subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname)) + self.assertEqual(len(subvolumels), 0) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_retain_snapshot_clone(self): + """ + clone a snapshot from a snapshot retained subvolume + """ + subvolume = self._generate_random_subvolume_name() + snapshot = self._generate_random_snapshot_name() + clone = self._generate_random_clone_name() + + # create subvolume + self._fs_cmd("subvolume", "create", self.volname, subvolume) + + # store path for clone verification + subvol_path = self._get_subvolume_path(self.volname, subvolume) + + # do some IO + self._do_subvolume_io(subvolume, number_of_files=16) + + # snapshot subvolume + self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) + + # remove with snapshot retention + self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots") + + # clone retained subvolume snapshot + self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) + + # check clone status + self._wait_for_clone_to_complete(clone) + + # verify clone + self._verify_clone(subvolume, snapshot, clone, subvol_path=subvol_path) + + # remove snapshots (removes retained volume) + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) + + # remove subvolume + self._fs_cmd("subvolume", "rm", self.volname, clone) + + # verify list subvolumes returns an empty list + subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname)) + self.assertEqual(len(subvolumels), 0) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_retain_snapshot_recreate(self): + """ + recreate a subvolume from one of its retained snapshots + """ + subvolume = self._generate_random_subvolume_name() + snapshot = self._generate_random_snapshot_name() + + # create subvolume + self._fs_cmd("subvolume", "create", self.volname, subvolume) + + # store path for clone verification + subvol_path = self._get_subvolume_path(self.volname, subvolume) + + # do some IO + self._do_subvolume_io(subvolume, number_of_files=16) + + # snapshot subvolume + self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) + + # remove with snapshot retention + self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots") + + # recreate retained subvolume using its own snapshot to clone + self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, subvolume) + + # check clone status + self._wait_for_clone_to_complete(subvolume) + + # verify clone + self._verify_clone(subvolume, snapshot, subvolume, subvol_path=subvol_path) + + # remove snapshot + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) + + # remove subvolume + self._fs_cmd("subvolume", "rm", self.volname, subvolume) + + # verify list subvolumes returns an empty list + subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname)) + self.assertEqual(len(subvolumels), 0) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_clone_retain_snapshot_with_snapshots(self): + """ + retain snapshots of a cloned subvolume and check disallowed operations + """ + subvolume = self._generate_random_subvolume_name() + snapshot1, snapshot2 = self._generate_random_snapshot_name(2) + clone = self._generate_random_clone_name() + + # create subvolume + self._fs_cmd("subvolume", "create", self.volname, subvolume) + + # store path for clone verification + subvol1_path = self._get_subvolume_path(self.volname, subvolume) + + # do some IO + self._do_subvolume_io(subvolume, number_of_files=16) + + # snapshot subvolume + self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot1) + + # remove with snapshot retention + self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots") + + # clone retained subvolume snapshot + self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot1, clone) + + # check clone status + self._wait_for_clone_to_complete(clone) + + # verify clone + self._verify_clone(subvolume, snapshot1, clone, subvol_path=subvol1_path) + + # create a snapshot on the clone + self._fs_cmd("subvolume", "snapshot", "create", self.volname, clone, snapshot2) + + # retain a clone + self._fs_cmd("subvolume", "rm", self.volname, clone, "--retain-snapshots") + + # list snapshots + clonesnapshotls = json.loads(self._fs_cmd('subvolume', 'snapshot', 'ls', self.volname, clone)) + self.assertEqual(len(clonesnapshotls), 1, "Expected the 'fs subvolume snapshot ls' command to list the" + " created subvolume snapshots") + snapshotnames = [snapshot['name'] for snapshot in clonesnapshotls] + for snap in [snapshot2]: + self.assertIn(snap, snapshotnames, "Missing snapshot '{0}' in snapshot list".format(snap)) + + ## check disallowed operations on retained clone + # clone-status + try: + self._fs_cmd("clone", "status", self.volname, clone) + except CommandFailedError as ce: + self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on clone status of clone with retained snapshots") + else: + self.fail("expected clone status of clone with retained snapshots to fail") + + # clone-cancel + try: + self._fs_cmd("clone", "cancel", self.volname, clone) + except CommandFailedError as ce: + self.assertEqual(ce.exitstatus, errno.ENOENT, "invalid error code on clone cancel of clone with retained snapshots") + else: + self.fail("expected clone cancel of clone with retained snapshots to fail") + + # remove snapshots (removes subvolumes as all are in retained state) + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot1) + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, clone, snapshot2) + + # verify list subvolumes returns an empty list + subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname)) + self.assertEqual(len(subvolumels), 0) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_retain_snapshot_clone_from_newer_snapshot(self): + """ + clone a subvolume from recreated subvolume's latest snapshot + """ + subvolume = self._generate_random_subvolume_name() + snapshot1, snapshot2 = self._generate_random_snapshot_name(2) + clone = self._generate_random_clone_name(1) + + # create subvolume + self._fs_cmd("subvolume", "create", self.volname, subvolume) + + # do some IO + self._do_subvolume_io(subvolume, number_of_files=16) + + # snapshot subvolume + self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot1) + + # remove with snapshot retention + self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots") + + # recreate subvolume + self._fs_cmd("subvolume", "create", self.volname, subvolume) + + # get and store path for clone verification + subvol2_path = self._get_subvolume_path(self.volname, subvolume) + + # do some IO + self._do_subvolume_io(subvolume, number_of_files=16) + + # snapshot newer subvolume + self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot2) + + # remove with snapshot retention + self._fs_cmd("subvolume", "rm", self.volname, subvolume, "--retain-snapshots") + + # clone retained subvolume's newer snapshot + self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot2, clone) + + # check clone status + self._wait_for_clone_to_complete(clone) + + # verify clone + self._verify_clone(subvolume, snapshot2, clone, subvol_path=subvol2_path) + + # remove snapshot + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot1) + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot2) + + # remove subvolume + self._fs_cmd("subvolume", "rm", self.volname, clone) + + # verify list subvolumes returns an empty list + subvolumels = json.loads(self._fs_cmd('subvolume', 'ls', self.volname)) + self.assertEqual(len(subvolumels), 0) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_snapshot_protect_unprotect_sanity(self): + """ + Snapshot protect/unprotect commands are deprecated. This test exists to ensure that + invoking the command does not cause errors, till they are removed from a subsequent release. + """ + subvolume = self._generate_random_subvolume_name() + snapshot = self._generate_random_snapshot_name() + clone = self._generate_random_clone_name() + + # create subvolume + self._fs_cmd("subvolume", "create", self.volname, subvolume) + + # do some IO + self._do_subvolume_io(subvolume, number_of_files=64) + + # snapshot subvolume + self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) + + # now, protect snapshot + self._fs_cmd("subvolume", "snapshot", "protect", self.volname, subvolume, snapshot) + + # schedule a clone + self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) + + # check clone status + self._wait_for_clone_to_complete(clone) + + # now, unprotect snapshot + self._fs_cmd("subvolume", "snapshot", "unprotect", self.volname, subvolume, snapshot) + + # verify clone + self._verify_clone(subvolume, snapshot, clone) + + # remove snapshot + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) + + # remove subvolumes + self._fs_cmd("subvolume", "rm", self.volname, subvolume) + self._fs_cmd("subvolume", "rm", self.volname, clone) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_snapshot_clone(self): + subvolume = self._generate_random_subvolume_name() + snapshot = self._generate_random_snapshot_name() + clone = self._generate_random_clone_name() + + # create subvolume + self._fs_cmd("subvolume", "create", self.volname, subvolume) + + # do some IO + self._do_subvolume_io(subvolume, number_of_files=64) + + # snapshot subvolume + self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) + + # schedule a clone + self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) + + # check clone status + self._wait_for_clone_to_complete(clone) + + # verify clone + self._verify_clone(subvolume, snapshot, clone) + + # remove snapshot + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) + + # remove subvolumes + self._fs_cmd("subvolume", "rm", self.volname, subvolume) + self._fs_cmd("subvolume", "rm", self.volname, clone) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_snapshot_reconf_max_concurrent_clones(self): + """ + Validate 'max_concurrent_clones' config option + """ + + # get the default number of cloner threads + default_max_concurrent_clones = int(self.config_get('mgr.x', 'mgr/volumes/max_concurrent_clones')) + self.assertEqual(default_max_concurrent_clones, 4) + + # Increase number of cloner threads + self.config_set('mgr', 'mgr/volumes/max_concurrent_clones', 6) + max_concurrent_clones = int(self.config_get('mgr.x', 'mgr/volumes/max_concurrent_clones')) + self.assertEqual(max_concurrent_clones, 6) + + # Decrease number of cloner threads + self.config_set('mgr', 'mgr/volumes/max_concurrent_clones', 2) + max_concurrent_clones = int(self.config_get('mgr.x', 'mgr/volumes/max_concurrent_clones')) + self.assertEqual(max_concurrent_clones, 2) + + def test_subvolume_snapshot_clone_pool_layout(self): + subvolume = self._generate_random_subvolume_name() + snapshot = self._generate_random_snapshot_name() + clone = self._generate_random_clone_name() + + # add data pool + new_pool = "new_pool" + self.fs.add_data_pool(new_pool) + + # create subvolume + self._fs_cmd("subvolume", "create", self.volname, subvolume) + + # do some IO + self._do_subvolume_io(subvolume, number_of_files=32) + + # snapshot subvolume + self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) + + # schedule a clone + self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone, "--pool_layout", new_pool) + + # check clone status + self._wait_for_clone_to_complete(clone) + + # verify clone + self._verify_clone(subvolume, snapshot, clone, clone_pool=new_pool) + + # remove snapshot + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) + + subvol_path = self._get_subvolume_path(self.volname, clone) + desired_pool = self.mount_a.getfattr(subvol_path, "ceph.dir.layout.pool") + self.assertEqual(desired_pool, new_pool) + + # remove subvolumes + self._fs_cmd("subvolume", "rm", self.volname, subvolume) + self._fs_cmd("subvolume", "rm", self.volname, clone) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_snapshot_clone_with_attrs(self): + subvolume = self._generate_random_subvolume_name() + snapshot = self._generate_random_snapshot_name() + clone = self._generate_random_clone_name() + + mode = "777" + uid = "1000" + gid = "1000" + new_uid = "1001" + new_gid = "1001" + new_mode = "700" + + # create subvolume + self._fs_cmd("subvolume", "create", self.volname, subvolume, "--mode", mode, "--uid", uid, "--gid", gid) + + # do some IO + self._do_subvolume_io(subvolume, number_of_files=32) + + # snapshot subvolume + self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) + + # change subvolume attrs (to ensure clone picks up snapshot attrs) + self._do_subvolume_attr_update(subvolume, new_uid, new_gid, new_mode) + + # schedule a clone + self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) + + # check clone status + self._wait_for_clone_to_complete(clone) + + # verify clone + self._verify_clone(subvolume, snapshot, clone) + + # remove snapshot + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) + + # remove subvolumes + self._fs_cmd("subvolume", "rm", self.volname, subvolume) + self._fs_cmd("subvolume", "rm", self.volname, clone) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_clone_inherit_snapshot_namespace_and_size(self): + subvolume = self._generate_random_subvolume_name() + snapshot = self._generate_random_snapshot_name() + clone = self._generate_random_clone_name() + osize = self.DEFAULT_FILE_SIZE*1024*1024*12 + + # create subvolume, in an isolated namespace with a specified size + self._fs_cmd("subvolume", "create", self.volname, subvolume, "--namespace-isolated", "--size", str(osize)) + + # do some IO + self._do_subvolume_io(subvolume, number_of_files=8) + + # snapshot subvolume + self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) + + # create a pool different from current subvolume pool + subvol_path = self._get_subvolume_path(self.volname, subvolume) + default_pool = self.mount_a.getfattr(subvol_path, "ceph.dir.layout.pool") + new_pool = "new_pool" + self.assertNotEqual(default_pool, new_pool) + self.fs.add_data_pool(new_pool) + + # update source subvolume pool + self._do_subvolume_pool_and_namespace_update(subvolume, pool=new_pool, pool_namespace="") + + # schedule a clone, with NO --pool specification + self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) + + # check clone status + self._wait_for_clone_to_complete(clone) + + # verify clone + self._verify_clone(subvolume, snapshot, clone) + + # remove snapshot + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) + + # remove subvolumes + self._fs_cmd("subvolume", "rm", self.volname, subvolume) + self._fs_cmd("subvolume", "rm", self.volname, clone) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_snapshot_clone_and_reclone(self): + subvolume = self._generate_random_subvolume_name() + snapshot = self._generate_random_snapshot_name() + clone1, clone2 = self._generate_random_clone_name(2) + + # create subvolume + self._fs_cmd("subvolume", "create", self.volname, subvolume) + + # do some IO + self._do_subvolume_io(subvolume, number_of_files=32) + + # snapshot subvolume + self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) + + # schedule a clone + self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1) + + # check clone status + self._wait_for_clone_to_complete(clone1) + + # verify clone + self._verify_clone(subvolume, snapshot, clone1) + + # remove snapshot + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) + + # now the clone is just like a normal subvolume -- snapshot the clone and fork + # another clone. before that do some IO so it's can be differentiated. + self._do_subvolume_io(clone1, create_dir="data", number_of_files=32) + + # snapshot clone -- use same snap name + self._fs_cmd("subvolume", "snapshot", "create", self.volname, clone1, snapshot) + + # schedule a clone + self._fs_cmd("subvolume", "snapshot", "clone", self.volname, clone1, snapshot, clone2) + + # check clone status + self._wait_for_clone_to_complete(clone2) + + # verify clone + self._verify_clone(clone1, snapshot, clone2) + + # remove snapshot + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, clone1, snapshot) + + # remove subvolumes + self._fs_cmd("subvolume", "rm", self.volname, subvolume) + self._fs_cmd("subvolume", "rm", self.volname, clone1) + self._fs_cmd("subvolume", "rm", self.volname, clone2) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_snapshot_clone_under_group(self): + subvolume = self._generate_random_subvolume_name() + snapshot = self._generate_random_snapshot_name() + clone = self._generate_random_clone_name() + group = self._generate_random_group_name() + + # create subvolume + self._fs_cmd("subvolume", "create", self.volname, subvolume) + + # do some IO + self._do_subvolume_io(subvolume, number_of_files=32) + + # snapshot subvolume + self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) + + # create group + self._fs_cmd("subvolumegroup", "create", self.volname, group) + + # schedule a clone + self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone, '--target_group_name', group) + + # check clone status + self._wait_for_clone_to_complete(clone, clone_group=group) + + # verify clone + self._verify_clone(subvolume, snapshot, clone, clone_group=group) + + # remove snapshot + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) + + # remove subvolumes + self._fs_cmd("subvolume", "rm", self.volname, subvolume) + self._fs_cmd("subvolume", "rm", self.volname, clone, group) + + # remove group + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_under_group_snapshot_clone(self): + subvolume = self._generate_random_subvolume_name() + group = self._generate_random_group_name() + snapshot = self._generate_random_snapshot_name() + clone = self._generate_random_clone_name() + + # create group + self._fs_cmd("subvolumegroup", "create", self.volname, group) + + # create subvolume + self._fs_cmd("subvolume", "create", self.volname, subvolume, group) + + # do some IO + self._do_subvolume_io(subvolume, subvolume_group=group, number_of_files=32) + + # snapshot subvolume + self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot, group) + + # schedule a clone + self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone, '--group_name', group) + + # check clone status + self._wait_for_clone_to_complete(clone) + + # verify clone + self._verify_clone(subvolume, snapshot, clone, source_group=group) + + # remove snapshot + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, group) + + # remove subvolumes + self._fs_cmd("subvolume", "rm", self.volname, subvolume, group) + self._fs_cmd("subvolume", "rm", self.volname, clone) + + # remove group + self._fs_cmd("subvolumegroup", "rm", self.volname, group) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_snapshot_clone_different_groups(self): + subvolume = self._generate_random_subvolume_name() + snapshot = self._generate_random_snapshot_name() + clone = self._generate_random_clone_name() + s_group, c_group = self._generate_random_group_name(2) + + # create groups + self._fs_cmd("subvolumegroup", "create", self.volname, s_group) + self._fs_cmd("subvolumegroup", "create", self.volname, c_group) + + # create subvolume + self._fs_cmd("subvolume", "create", self.volname, subvolume, s_group) + + # do some IO + self._do_subvolume_io(subvolume, subvolume_group=s_group, number_of_files=32) + + # snapshot subvolume + self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot, s_group) + + # schedule a clone + self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone, + '--group_name', s_group, '--target_group_name', c_group) + + # check clone status + self._wait_for_clone_to_complete(clone, clone_group=c_group) + + # verify clone + self._verify_clone(subvolume, snapshot, clone, source_group=s_group, clone_group=c_group) + + # remove snapshot + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot, s_group) + + # remove subvolumes + self._fs_cmd("subvolume", "rm", self.volname, subvolume, s_group) + self._fs_cmd("subvolume", "rm", self.volname, clone, c_group) + + # remove groups + self._fs_cmd("subvolumegroup", "rm", self.volname, s_group) + self._fs_cmd("subvolumegroup", "rm", self.volname, c_group) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_snapshot_clone_with_upgrade(self): + """ + yet another poor man's upgrade test -- rather than going through a full + upgrade cycle, emulate old types subvolumes by going through the wormhole + and verify clone operation. + further ensure that a legacy volume is not updated to v2, but clone is. + """ + subvolume = self._generate_random_subvolume_name() + snapshot = self._generate_random_snapshot_name() + clone = self._generate_random_clone_name() + + # emulate a old-fashioned subvolume + createpath = os.path.join(".", "volumes", "_nogroup", subvolume) + self.mount_a.run_shell(['mkdir', '-p', createpath]) + + # add required xattrs to subvolume + default_pool = self.mount_a.getfattr(".", "ceph.dir.layout.pool") + self.mount_a.setfattr(createpath, 'ceph.dir.layout.pool', default_pool) + + # do some IO + self._do_subvolume_io(subvolume, number_of_files=64) + + # snapshot subvolume + self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) + + # ensure metadata file is in legacy location, with required version v1 + self._assert_meta_location_and_version(self.volname, subvolume, version=1, legacy=True) + + # schedule a clone + self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) + + # snapshot should not be deletable now + try: + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) + except CommandFailedError as ce: + self.assertEqual(ce.exitstatus, errno.EAGAIN, msg="invalid error code when removing source snapshot of a clone") + else: + self.fail("expected removing source snapshot of a clone to fail") + + # check clone status + self._wait_for_clone_to_complete(clone) + + # verify clone + self._verify_clone(subvolume, snapshot, clone, source_version=1) + + # remove snapshot + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) + + # ensure metadata file is in v2 location, with required version v2 + self._assert_meta_location_and_version(self.volname, clone) + + # remove subvolumes + self._fs_cmd("subvolume", "rm", self.volname, subvolume) + self._fs_cmd("subvolume", "rm", self.volname, clone) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_clone_in_progress_getpath(self): + subvolume = self._generate_random_subvolume_name() + snapshot = self._generate_random_snapshot_name() + clone = self._generate_random_clone_name() + + # create subvolume + self._fs_cmd("subvolume", "create", self.volname, subvolume) + + # do some IO + self._do_subvolume_io(subvolume, number_of_files=64) + + # snapshot subvolume + self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) + + # schedule a clone + self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) + + # clone should not be accessible right now + try: + self._get_subvolume_path(self.volname, clone) + except CommandFailedError as ce: + if ce.exitstatus != errno.EAGAIN: + raise RuntimeError("invalid error code when fetching path of an pending clone") + else: + raise RuntimeError("expected fetching path of an pending clone to fail") + + # check clone status + self._wait_for_clone_to_complete(clone) + + # clone should be accessible now + subvolpath = self._get_subvolume_path(self.volname, clone) + self.assertNotEqual(subvolpath, None) + + # verify clone + self._verify_clone(subvolume, snapshot, clone) + + # remove snapshot + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) + + # remove subvolumes + self._fs_cmd("subvolume", "rm", self.volname, subvolume) + self._fs_cmd("subvolume", "rm", self.volname, clone) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_clone_in_progress_snapshot_rm(self): + subvolume = self._generate_random_subvolume_name() + snapshot = self._generate_random_snapshot_name() + clone = self._generate_random_clone_name() + + # create subvolume + self._fs_cmd("subvolume", "create", self.volname, subvolume) + + # do some IO + self._do_subvolume_io(subvolume, number_of_files=64) + + # snapshot subvolume + self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) + + # schedule a clone + self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) + + # snapshot should not be deletable now + try: + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) + except CommandFailedError as ce: + self.assertEqual(ce.exitstatus, errno.EAGAIN, msg="invalid error code when removing source snapshot of a clone") + else: + self.fail("expected removing source snapshot of a clone to fail") + + # check clone status + self._wait_for_clone_to_complete(clone) + + # clone should be accessible now + subvolpath = self._get_subvolume_path(self.volname, clone) + self.assertNotEqual(subvolpath, None) + + # verify clone + self._verify_clone(subvolume, snapshot, clone) + + # remove snapshot + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) + + # remove subvolumes + self._fs_cmd("subvolume", "rm", self.volname, subvolume) + self._fs_cmd("subvolume", "rm", self.volname, clone) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_clone_in_progress_source(self): + subvolume = self._generate_random_subvolume_name() + snapshot = self._generate_random_snapshot_name() + clone = self._generate_random_clone_name() + + # create subvolume + self._fs_cmd("subvolume", "create", self.volname, subvolume) + + # do some IO + self._do_subvolume_io(subvolume, number_of_files=64) + + # snapshot subvolume + self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) + + # schedule a clone + self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) + + # verify clone source + result = json.loads(self._fs_cmd("clone", "status", self.volname, clone)) + source = result['status']['source'] + self.assertEqual(source['volume'], self.volname) + self.assertEqual(source['subvolume'], subvolume) + self.assertEqual(source.get('group', None), None) + self.assertEqual(source['snapshot'], snapshot) + + # check clone status + self._wait_for_clone_to_complete(clone) + + # clone should be accessible now + subvolpath = self._get_subvolume_path(self.volname, clone) + self.assertNotEqual(subvolpath, None) + + # verify clone + self._verify_clone(subvolume, snapshot, clone) + + # remove snapshot + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) + + # remove subvolumes + self._fs_cmd("subvolume", "rm", self.volname, subvolume) + self._fs_cmd("subvolume", "rm", self.volname, clone) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_non_clone_status(self): + subvolume = self._generate_random_subvolume_name() + + # create subvolume + self._fs_cmd("subvolume", "create", self.volname, subvolume) + + try: + self._fs_cmd("clone", "status", self.volname, subvolume) + except CommandFailedError as ce: + if ce.exitstatus != errno.ENOTSUP: + raise RuntimeError("invalid error code when fetching status of a non cloned subvolume") + else: + raise RuntimeError("expected fetching of clone status of a subvolume to fail") + + # remove subvolume + self._fs_cmd("subvolume", "rm", self.volname, subvolume) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_snapshot_clone_on_existing_subvolumes(self): + subvolume1, subvolume2 = self._generate_random_subvolume_name(2) + snapshot = self._generate_random_snapshot_name() + clone = self._generate_random_clone_name() + + # create subvolumes + self._fs_cmd("subvolume", "create", self.volname, subvolume1) + self._fs_cmd("subvolume", "create", self.volname, subvolume2) + + # do some IO + self._do_subvolume_io(subvolume1, number_of_files=32) + + # snapshot subvolume + self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume1, snapshot) + + # schedule a clone with target as subvolume2 + try: + self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume1, snapshot, subvolume2) + except CommandFailedError as ce: + if ce.exitstatus != errno.EEXIST: + raise RuntimeError("invalid error code when cloning to existing subvolume") + else: + raise RuntimeError("expected cloning to fail if the target is an existing subvolume") + + self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume1, snapshot, clone) + + # schedule a clone with target as clone + try: + self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume1, snapshot, clone) + except CommandFailedError as ce: + if ce.exitstatus != errno.EEXIST: + raise RuntimeError("invalid error code when cloning to existing clone") + else: + raise RuntimeError("expected cloning to fail if the target is an existing clone") + + # check clone status + self._wait_for_clone_to_complete(clone) + + # verify clone + self._verify_clone(subvolume1, snapshot, clone) + + # remove snapshot + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume1, snapshot) + + # remove subvolumes + self._fs_cmd("subvolume", "rm", self.volname, subvolume1) + self._fs_cmd("subvolume", "rm", self.volname, subvolume2) + self._fs_cmd("subvolume", "rm", self.volname, clone) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_snapshot_clone_fail_with_remove(self): + subvolume = self._generate_random_subvolume_name() + snapshot = self._generate_random_snapshot_name() + clone1, clone2 = self._generate_random_clone_name(2) + + pool_capacity = 32 * 1024 * 1024 + # number of files required to fill up 99% of the pool + nr_files = int((pool_capacity * 0.99) // (TestVolumes.DEFAULT_FILE_SIZE * 1024 * 1024)) + + # create subvolume + self._fs_cmd("subvolume", "create", self.volname, subvolume) + + # do some IO + self._do_subvolume_io(subvolume, number_of_files=nr_files) + + # snapshot subvolume + self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) + + # add data pool + new_pool = "new_pool" + self.fs.add_data_pool(new_pool) + + self.fs.mon_manager.raw_cluster_cmd("osd", "pool", "set-quota", new_pool, + "max_bytes", "{0}".format(pool_capacity // 4)) + + # schedule a clone + self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone1, "--pool_layout", new_pool) + + # check clone status -- this should dramatically overshoot the pool quota + self._wait_for_clone_to_complete(clone1) + + # verify clone + self._verify_clone(subvolume, snapshot, clone1, clone_pool=new_pool) + + # wait a bit so that subsequent I/O will give pool full error + time.sleep(120) + + # schedule a clone + self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone2, "--pool_layout", new_pool) + + # check clone status + self._wait_for_clone_to_fail(clone2) + + # remove snapshot + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) + + # remove subvolumes + self._fs_cmd("subvolume", "rm", self.volname, subvolume) + self._fs_cmd("subvolume", "rm", self.volname, clone1) + try: + self._fs_cmd("subvolume", "rm", self.volname, clone2) + except CommandFailedError as ce: + if ce.exitstatus != errno.EAGAIN: + raise RuntimeError("invalid error code when trying to remove failed clone") + else: + raise RuntimeError("expected error when removing a failed clone") + + # ... and with force, failed clone can be removed + self._fs_cmd("subvolume", "rm", self.volname, clone2, "--force") + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_snapshot_attr_clone(self): + subvolume = self._generate_random_subvolume_name() + snapshot = self._generate_random_snapshot_name() + clone = self._generate_random_clone_name() + + # create subvolume + self._fs_cmd("subvolume", "create", self.volname, subvolume) + + # do some IO + self._do_subvolume_io_mixed(subvolume) + + # snapshot subvolume + self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) + + # schedule a clone + self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) + + # check clone status + self._wait_for_clone_to_complete(clone) + + # verify clone + self._verify_clone(subvolume, snapshot, clone) + + # remove snapshot + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) + + # remove subvolumes + self._fs_cmd("subvolume", "rm", self.volname, subvolume) + self._fs_cmd("subvolume", "rm", self.volname, clone) + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_snapshot_clone_cancel_in_progress(self): + subvolume = self._generate_random_subvolume_name() + snapshot = self._generate_random_snapshot_name() + clone = self._generate_random_clone_name() + + # create subvolume + self._fs_cmd("subvolume", "create", self.volname, subvolume) + + # do some IO + self._do_subvolume_io(subvolume, number_of_files=128) + + # snapshot subvolume + self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) + + # schedule a clone + self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) + + # cancel on-going clone + self._fs_cmd("clone", "cancel", self.volname, clone) + + # verify canceled state + self._check_clone_canceled(clone) + + # remove snapshot + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) + + # remove subvolumes + self._fs_cmd("subvolume", "rm", self.volname, subvolume) + self._fs_cmd("subvolume", "rm", self.volname, clone, "--force") + + # verify trash dir is clean + self._wait_for_trash_empty() + + def test_subvolume_snapshot_clone_cancel_pending(self): + """ + this test is a bit more involved compared to canceling an in-progress clone. + we'd need to ensure that a to-be canceled clone has still not been picked up + by cloner threads. exploit the fact that clones are picked up in an FCFS + fashion and there are four (4) cloner threads by default. When the number of + cloner threads increase, this test _may_ start tripping -- so, the number of + clone operations would need to be jacked up. + """ + # default number of clone threads + NR_THREADS = 4 + # good enough for 4 threads + NR_CLONES = 5 + # yeh, 1gig -- we need the clone to run for sometime + FILE_SIZE_MB = 1024 + + subvolume = self._generate_random_subvolume_name() + snapshot = self._generate_random_snapshot_name() + clones = self._generate_random_clone_name(NR_CLONES) + + # create subvolume + self._fs_cmd("subvolume", "create", self.volname, subvolume) + + # do some IO + self._do_subvolume_io(subvolume, number_of_files=4, file_size=FILE_SIZE_MB) + + # snapshot subvolume + self._fs_cmd("subvolume", "snapshot", "create", self.volname, subvolume, snapshot) + + # schedule clones + for clone in clones: + self._fs_cmd("subvolume", "snapshot", "clone", self.volname, subvolume, snapshot, clone) + + to_wait = clones[0:NR_THREADS] + to_cancel = clones[NR_THREADS:] + + # cancel pending clones and verify + for clone in to_cancel: + status = json.loads(self._fs_cmd("clone", "status", self.volname, clone)) + self.assertEqual(status["status"]["state"], "pending") + self._fs_cmd("clone", "cancel", self.volname, clone) + self._check_clone_canceled(clone) + + # let's cancel on-going clones. handle the case where some of the clones + # _just_ complete + for clone in list(to_wait): + try: + self._fs_cmd("clone", "cancel", self.volname, clone) + to_cancel.append(clone) + to_wait.remove(clone) + except CommandFailedError as ce: + if ce.exitstatus != errno.EINVAL: + raise RuntimeError("invalid error code when cancelling on-going clone") + + # remove snapshot + self._fs_cmd("subvolume", "snapshot", "rm", self.volname, subvolume, snapshot) + + # remove subvolumes + self._fs_cmd("subvolume", "rm", self.volname, subvolume) + for clone in to_wait: + self._fs_cmd("subvolume", "rm", self.volname, clone) + for clone in to_cancel: + self._fs_cmd("subvolume", "rm", self.volname, clone, "--force") + + # verify trash dir is clean + self._wait_for_trash_empty() diff --git a/qa/tasks/cephfs_test_runner.py b/qa/tasks/cephfs_test_runner.py new file mode 100644 index 00000000..4455c086 --- /dev/null +++ b/qa/tasks/cephfs_test_runner.py @@ -0,0 +1,209 @@ +import contextlib +import logging +import os +import unittest +from unittest import suite, loader, case +from teuthology.task import interactive +from teuthology import misc +from tasks.cephfs.filesystem import Filesystem, MDSCluster, CephCluster +from tasks.mgr.mgr_test_case import MgrCluster + +log = logging.getLogger(__name__) + + +class DecoratingLoader(loader.TestLoader): + """ + A specialization of TestLoader that tags some extra attributes + onto test classes as they are loaded. + """ + def __init__(self, params): + self._params = params + super(DecoratingLoader, self).__init__() + + def _apply_params(self, obj): + for k, v in self._params.items(): + setattr(obj, k, v) + + def loadTestsFromTestCase(self, testCaseClass): + self._apply_params(testCaseClass) + return super(DecoratingLoader, self).loadTestsFromTestCase(testCaseClass) + + def loadTestsFromName(self, name, module=None): + result = super(DecoratingLoader, self).loadTestsFromName(name, module) + + # Special case for when we were called with the name of a method, we get + # a suite with one TestCase + tests_in_result = list(result) + if len(tests_in_result) == 1 and isinstance(tests_in_result[0], case.TestCase): + self._apply_params(tests_in_result[0]) + + return result + + +class LogStream(object): + def __init__(self): + self.buffer = "" + + def write(self, data): + self.buffer += data + if "\n" in self.buffer: + lines = self.buffer.split("\n") + for line in lines[:-1]: + log.info(line) + self.buffer = lines[-1] + + def flush(self): + pass + + +class InteractiveFailureResult(unittest.TextTestResult): + """ + Specialization that implements interactive-on-error style + behavior. + """ + ctx = None + + def addFailure(self, test, err): + log.error(self._exc_info_to_string(err, test)) + log.error("Failure in test '{0}', going interactive".format( + self.getDescription(test) + )) + interactive.task(ctx=self.ctx, config=None) + + def addError(self, test, err): + log.error(self._exc_info_to_string(err, test)) + log.error("Error in test '{0}', going interactive".format( + self.getDescription(test) + )) + interactive.task(ctx=self.ctx, config=None) + + +@contextlib.contextmanager +def task(ctx, config): + """ + Run the CephFS test cases. + + Run everything in tasks/cephfs/test_*.py: + + :: + + tasks: + - install: + - ceph: + - ceph-fuse: + - cephfs_test_runner: + + `modules` argument allows running only some specific modules: + + :: + + tasks: + ... + - cephfs_test_runner: + modules: + - tasks.cephfs.test_sessionmap + - tasks.cephfs.test_auto_repair + + By default, any cases that can't be run on the current cluster configuration + will generate a failure. When the optional `fail_on_skip` argument is set + to false, any tests that can't be run on the current configuration will + simply be skipped: + + :: + tasks: + ... + - cephfs_test_runner: + fail_on_skip: false + + """ + + ceph_cluster = CephCluster(ctx) + + if len(list(misc.all_roles_of_type(ctx.cluster, 'mds'))): + mds_cluster = MDSCluster(ctx) + fs = Filesystem(ctx) + else: + mds_cluster = None + fs = None + + if len(list(misc.all_roles_of_type(ctx.cluster, 'mgr'))): + mgr_cluster = MgrCluster(ctx) + else: + mgr_cluster = None + + # Mount objects, sorted by ID + if hasattr(ctx, 'mounts'): + mounts = [v for k, v in sorted(ctx.mounts.items(), key=lambda mount: mount[0])] + else: + # The test configuration has a filesystem but no fuse/kclient mounts + mounts = [] + + decorating_loader = DecoratingLoader({ + "ctx": ctx, + "mounts": mounts, + "fs": fs, + "ceph_cluster": ceph_cluster, + "mds_cluster": mds_cluster, + "mgr_cluster": mgr_cluster, + }) + + fail_on_skip = config.get('fail_on_skip', True) + + # Put useful things onto ctx for interactive debugging + ctx.fs = fs + ctx.mds_cluster = mds_cluster + ctx.mgr_cluster = mgr_cluster + + # Depending on config, either load specific modules, or scan for moduless + if config and 'modules' in config and config['modules']: + module_suites = [] + for mod_name in config['modules']: + # Test names like cephfs.test_auto_repair + module_suites.append(decorating_loader.loadTestsFromName(mod_name)) + overall_suite = suite.TestSuite(module_suites) + else: + # Default, run all tests + overall_suite = decorating_loader.discover( + os.path.join( + os.path.dirname(os.path.abspath(__file__)), + "cephfs/" + ) + ) + + if ctx.config.get("interactive-on-error", False): + InteractiveFailureResult.ctx = ctx + result_class = InteractiveFailureResult + else: + result_class = unittest.TextTestResult + + class LoggingResult(result_class): + def startTest(self, test): + log.info("Starting test: {0}".format(self.getDescription(test))) + return super(LoggingResult, self).startTest(test) + + def addSkip(self, test, reason): + if fail_on_skip: + # Don't just call addFailure because that requires a traceback + self.failures.append((test, reason)) + else: + super(LoggingResult, self).addSkip(test, reason) + + # Execute! + result = unittest.TextTestRunner( + stream=LogStream(), + resultclass=LoggingResult, + verbosity=2, + failfast=True).run(overall_suite) + + if not result.wasSuccessful(): + result.printErrors() # duplicate output at end for convenience + + bad_tests = [] + for test, error in result.errors: + bad_tests.append(str(test)) + for test, failure in result.failures: + bad_tests.append(str(test)) + + raise RuntimeError("Test failure: {0}".format(", ".join(bad_tests))) + + yield diff --git a/qa/tasks/cephfs_upgrade_snap.py b/qa/tasks/cephfs_upgrade_snap.py new file mode 100644 index 00000000..1708d43c --- /dev/null +++ b/qa/tasks/cephfs_upgrade_snap.py @@ -0,0 +1,45 @@ +""" +Upgrade cluster snap format. +""" + +import logging +import time + +from tasks.cephfs.filesystem import Filesystem + +log = logging.getLogger(__name__) + +def task(ctx, config): + """ + Upgrade CephFS file system snap format. + """ + + if config is None: + config = {} + assert isinstance(config, dict), \ + 'snap-upgrade task only accepts a dict for configuration' + + fs = Filesystem(ctx) + + mds_map = fs.get_mds_map() + assert(mds_map['max_mds'] == 1) + + json = fs.rank_tell(["scrub", "start", "/", "force", "recursive", "repair"]) + if not json or json['return_code'] == 0: + log.info("scrub / completed") + else: + log.info("scrub / failed: {}".format(json)) + + json = fs.rank_tell(["scrub", "start", "~mdsdir", "force", "recursive", "repair"]) + if not json or json['return_code'] == 0: + log.info("scrub ~mdsdir completed") + else: + log.info("scrub / failed: {}".format(json)) + + for i in range(0, 10): + mds_map = fs.get_mds_map() + if (mds_map['flags'] & (1<<1)) != 0 and (mds_map['flags'] & (1<<4)) != 0: + break + time.sleep(10) + assert((mds_map['flags'] & (1<<1)) != 0) # Test CEPH_MDSMAP_ALLOW_SNAPS + assert((mds_map['flags'] & (1<<4)) != 0) # Test CEPH_MDSMAP_ALLOW_MULTIMDS_SNAPS diff --git a/qa/tasks/check_counter.py b/qa/tasks/check_counter.py new file mode 100644 index 00000000..daa81973 --- /dev/null +++ b/qa/tasks/check_counter.py @@ -0,0 +1,98 @@ + +import logging +import json + +from teuthology.task import Task +from teuthology import misc + +log = logging.getLogger(__name__) + + +class CheckCounter(Task): + """ + Use this task to validate that some daemon perf counters were + incremented by the nested tasks. + + Config: + 'cluster_name': optional, specify which cluster + 'target': dictionary of daemon type to list of performance counters. + 'dry_run': just log the value of the counters, don't fail if they + aren't nonzero. + + Success condition is that for all of the named counters, at least + one of the daemons of that type has the counter nonzero. + + Example to check cephfs dirfrag splits are happening: + - install: + - ceph: + - ceph-fuse: + - check-counter: + counters: + mds: + - "mds.dir_split" + - workunit: ... + """ + + def start(self): + log.info("START") + + def end(self): + overrides = self.ctx.config.get('overrides', {}) + misc.deep_merge(self.config, overrides.get('check-counter', {})) + + cluster_name = self.config.get('cluster_name', None) + dry_run = self.config.get('dry_run', False) + targets = self.config.get('counters', {}) + + if cluster_name is None: + cluster_name = next(iter(self.ctx.managers.keys())) + + for daemon_type, counters in targets.items(): + # List of 'a', 'b', 'c'... + daemon_ids = list(misc.all_roles_of_type(self.ctx.cluster, daemon_type)) + daemons = dict([(daemon_id, + self.ctx.daemons.get_daemon(daemon_type, daemon_id)) + for daemon_id in daemon_ids]) + + seen = set() + + for daemon_id, daemon in daemons.items(): + if not daemon.running(): + log.info("Ignoring daemon {0}, it isn't running".format(daemon_id)) + continue + else: + log.debug("Getting stats from {0}".format(daemon_id)) + + manager = self.ctx.managers[cluster_name] + proc = manager.admin_socket(daemon_type, daemon_id, ["perf", "dump"]) + response_data = proc.stdout.getvalue().strip() + if response_data: + perf_dump = json.loads(response_data) + else: + log.warning("No admin socket response from {0}, skipping".format(daemon_id)) + continue + + for counter in counters: + subsys, counter_id = counter.split(".") + if subsys not in perf_dump or counter_id not in perf_dump[subsys]: + log.warning("Counter '{0}' not found on daemon {1}.{2}".format( + counter, daemon_type, daemon_id)) + continue + value = perf_dump[subsys][counter_id] + + log.info("Daemon {0}.{1} {2}={3}".format( + daemon_type, daemon_id, counter, value + )) + + if value > 0: + seen.add(counter) + + if not dry_run: + unseen = set(counters) - set(seen) + if unseen: + raise RuntimeError("The following counters failed to be set " + "on {0} daemons: {1}".format( + daemon_type, unseen + )) + +task = CheckCounter diff --git a/qa/tasks/cifs_mount.py b/qa/tasks/cifs_mount.py new file mode 100644 index 00000000..b282b0b7 --- /dev/null +++ b/qa/tasks/cifs_mount.py @@ -0,0 +1,137 @@ +""" +Mount cifs clients. Unmount when finished. +""" +import contextlib +import logging +import os + +from teuthology import misc as teuthology +from teuthology.orchestra import run + +log = logging.getLogger(__name__) + +@contextlib.contextmanager +def task(ctx, config): + """ + Mount/unmount a cifs client. + + The config is optional and defaults to mounting on all clients. If + a config is given, it is expected to be a list of clients to do + this operation on. + + Example that starts smbd and mounts cifs on all nodes:: + + tasks: + - ceph: + - samba: + - cifs-mount: + - interactive: + + Example that splits smbd and cifs: + + tasks: + - ceph: + - samba: [samba.0] + - cifs-mount: [client.0] + - ceph-fuse: [client.1] + - interactive: + + Example that specifies the share name: + + tasks: + - ceph: + - ceph-fuse: + - samba: + samba.0: + cephfuse: "{testdir}/mnt.0" + - cifs-mount: + client.0: + share: cephfuse + + :param ctx: Context + :param config: Configuration + """ + log.info('Mounting cifs clients...') + + if config is None: + config = dict(('client.{id}'.format(id=id_), None) + for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')) + elif isinstance(config, list): + config = dict((name, None) for name in config) + + clients = list(teuthology.get_clients(ctx=ctx, roles=config.keys())) + + from .samba import get_sambas + samba_roles = ['samba.{id_}'.format(id_=id_) for id_ in teuthology.all_roles_of_type(ctx.cluster, 'samba')] + sambas = list(get_sambas(ctx=ctx, roles=samba_roles)) + (ip, _) = sambas[0][1].ssh.get_transport().getpeername() + log.info('samba ip: {ip}'.format(ip=ip)) + + for id_, remote in clients: + mnt = os.path.join(teuthology.get_testdir(ctx), 'mnt.{id}'.format(id=id_)) + log.info('Mounting cifs client.{id} at {remote} {mnt}...'.format( + id=id_, remote=remote,mnt=mnt)) + + remote.run( + args=[ + 'mkdir', + '--', + mnt, + ], + ) + + rolestr = 'client.{id_}'.format(id_=id_) + unc = "ceph" + log.info("config: {c}".format(c=config)) + if config[rolestr] is not None and 'share' in config[rolestr]: + unc = config[rolestr]['share'] + + remote.run( + args=[ + 'sudo', + 'mount', + '-t', + 'cifs', + '//{sambaip}/{unc}'.format(sambaip=ip, unc=unc), + '-o', + 'username=ubuntu,password=ubuntu', + mnt, + ], + ) + + remote.run( + args=[ + 'sudo', + 'chown', + 'ubuntu:ubuntu', + '{m}/'.format(m=mnt), + ], + ) + + try: + yield + finally: + log.info('Unmounting cifs clients...') + for id_, remote in clients: + remote.run( + args=[ + 'sudo', + 'umount', + mnt, + ], + ) + for id_, remote in clients: + while True: + try: + remote.run( + args=[ + 'rmdir', '--', mnt, + run.Raw('2>&1'), + run.Raw('|'), + 'grep', 'Device or resource busy', + ], + ) + import time + time.sleep(1) + except Exception: + break diff --git a/qa/tasks/cram.py b/qa/tasks/cram.py new file mode 100644 index 00000000..d06f0944 --- /dev/null +++ b/qa/tasks/cram.py @@ -0,0 +1,151 @@ +""" +Cram tests +""" +import logging +import os + +import six + +from tasks.util.workunit import get_refspec_after_overrides + +from teuthology import misc as teuthology +from teuthology.parallel import parallel +from teuthology.orchestra import run +from teuthology.config import config as teuth_config + +log = logging.getLogger(__name__) + +def task(ctx, config): + """ + Run all cram tests from the specified paths on the specified + clients. Each client runs tests in parallel. + + Limitations: + Tests must have a .t suffix. Tests with duplicate names will + overwrite each other, so only the last one will run. + + For example:: + + tasks: + - ceph: + - cram: + clients: + client.0: + - qa/test.t + - qa/test2.t] + client.1: [qa/test.t] + branch: foo + + You can also run a list of cram tests on all clients:: + + tasks: + - ceph: + - cram: + clients: + all: [qa/test.t] + + :param ctx: Context + :param config: Configuration + """ + assert isinstance(config, dict) + assert 'clients' in config and isinstance(config['clients'], dict), \ + 'configuration must contain a dictionary of clients' + + clients = teuthology.replace_all_with_clients(ctx.cluster, + config['clients']) + testdir = teuthology.get_testdir(ctx) + + overrides = ctx.config.get('overrides', {}) + refspec = get_refspec_after_overrides(config, overrides) + + git_url = teuth_config.get_ceph_qa_suite_git_url() + log.info('Pulling tests from %s ref %s', git_url, refspec) + + try: + for client, tests in clients.items(): + (remote,) = ctx.cluster.only(client).remotes.keys() + client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir, role=client) + remote.run( + args=[ + 'mkdir', '--', client_dir, + run.Raw('&&'), + 'virtualenv', '{tdir}/virtualenv'.format(tdir=testdir), + run.Raw('&&'), + '{tdir}/virtualenv/bin/pip'.format(tdir=testdir), + 'install', 'cram==0.6', + ], + ) + clone_dir = '{tdir}/clone.{role}'.format(tdir=testdir, role=client) + remote.run(args=refspec.clone(git_url, clone_dir)) + + for test in tests: + assert test.endswith('.t'), 'tests must end in .t' + remote.run( + args=[ + 'cp', '--', os.path.join(clone_dir, test), client_dir, + ], + ) + + with parallel() as p: + for role in clients.keys(): + p.spawn(_run_tests, ctx, role) + finally: + for client, tests in clients.items(): + (remote,) = ctx.cluster.only(client).remotes.keys() + client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir, role=client) + test_files = set([test.rsplit('/', 1)[1] for test in tests]) + + # remove test files unless they failed + for test_file in test_files: + abs_file = os.path.join(client_dir, test_file) + remote.run( + args=[ + 'test', '-f', abs_file + '.err', + run.Raw('||'), + 'rm', '-f', '--', abs_file, + ], + ) + + # ignore failure since more than one client may + # be run on a host, and the client dir should be + # non-empty if the test failed + remote.run( + args=[ + 'rm', '-rf', '--', + '{tdir}/virtualenv'.format(tdir=testdir), + clone_dir, + run.Raw(';'), + 'rmdir', '--ignore-fail-on-non-empty', client_dir, + ], + ) + +def _run_tests(ctx, role): + """ + For each role, check to make sure it's a client, then run the cram on that client + + :param ctx: Context + :param role: Roles + """ + assert isinstance(role, six.string_types) + PREFIX = 'client.' + assert role.startswith(PREFIX) + id_ = role[len(PREFIX):] + (remote,) = ctx.cluster.only(role).remotes.keys() + ceph_ref = ctx.summary.get('ceph-sha1', 'master') + + testdir = teuthology.get_testdir(ctx) + log.info('Running tests for %s...', role) + remote.run( + args=[ + run.Raw('CEPH_REF={ref}'.format(ref=ceph_ref)), + run.Raw('CEPH_ID="{id}"'.format(id=id_)), + run.Raw('PATH=$PATH:/usr/sbin'), + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + '{tdir}/virtualenv/bin/cram'.format(tdir=testdir), + '-v', '--', + run.Raw('{tdir}/archive/cram.{role}/*.t'.format(tdir=testdir, role=role)), + ], + logger=log.getChild(role), + ) diff --git a/qa/tasks/create_verify_lfn_objects.py b/qa/tasks/create_verify_lfn_objects.py new file mode 100644 index 00000000..53254158 --- /dev/null +++ b/qa/tasks/create_verify_lfn_objects.py @@ -0,0 +1,83 @@ +""" +Rados modle-based integration tests +""" +import contextlib +import logging + +log = logging.getLogger(__name__) + +@contextlib.contextmanager +def task(ctx, config): + """ + For each combination of namespace and name_length, create + objects with name length + on entry. On exit, verify that the objects still exist, can + be deleted, and then don't exist. + + Usage:: + + create_verify_lfn_objects.py: + pool: default: 'data' + prefix: default: '' + namespace: [] default: [''] + num_objects: [] default: 10 + name_length: [] default: [400] + """ + pool = config.get('pool', 'data') + num_objects = config.get('num_objects', 10) + name_length = config.get('name_length', [400]) + namespace = config.get('namespace', [None]) + prefix = config.get('prefix', None) + manager = ctx.managers['ceph'] + + objects = [] + for l in name_length: + for ns in namespace: + def object_name(i): + nslength = 0 + if namespace != '': + nslength = len(namespace) + numstr = str(i) + fillerlen = l - nslength - len(prefix) - len(numstr) + assert fillerlen >= 0 + return prefix + ('a'*fillerlen) + numstr + objects += [(ns, object_name(i)) for i in range(num_objects)] + + for ns, name in objects: + err = manager.do_put( + pool, + name, + '/etc/resolv.conf', + namespace=ns) + log.info("err is " + str(err)) + assert err == 0 + + try: + yield + finally: + log.info('ceph_verify_lfn_objects verifying...') + for ns, name in objects: + err = manager.do_get( + pool, + name, + namespace=ns) + log.info("err is " + str(err)) + assert err == 0 + + log.info('ceph_verify_lfn_objects deleting...') + for ns, name in objects: + err = manager.do_rm( + pool, + name, + namespace=ns) + log.info("err is " + str(err)) + assert err == 0 + + log.info('ceph_verify_lfn_objects verifying absent...') + for ns, name in objects: + err = manager.do_get( + pool, + name, + namespace=ns) + log.info("err is " + str(err)) + assert err != 0 diff --git a/qa/tasks/devstack.py b/qa/tasks/devstack.py new file mode 100644 index 00000000..35620f7e --- /dev/null +++ b/qa/tasks/devstack.py @@ -0,0 +1,379 @@ +#!/usr/bin/env python +import contextlib +import logging +import textwrap +from configparser import ConfigParser + +import six +import time + +from teuthology.orchestra import run +from teuthology import misc +from teuthology.contextutil import nested + +log = logging.getLogger(__name__) + +DEVSTACK_GIT_REPO = 'https://github.com/openstack-dev/devstack.git' +DS_STABLE_BRANCHES = ("havana", "grizzly") + +is_devstack_node = lambda role: role.startswith('devstack') +is_osd_node = lambda role: role.startswith('osd') + + +@contextlib.contextmanager +def task(ctx, config): + if config is None: + config = {} + if not isinstance(config, dict): + raise TypeError("config must be a dict") + with nested(lambda: install(ctx=ctx, config=config), + lambda: smoke(ctx=ctx, config=config), + ): + yield + + +@contextlib.contextmanager +def install(ctx, config): + """ + Install OpenStack DevStack and configure it to use a Ceph cluster for + Glance and Cinder. + + Requires one node with a role 'devstack' + + Since devstack runs rampant on the system it's used on, typically you will + want to reprovision that machine after using devstack on it. + + Also, the default 2GB of RAM that is given to vps nodes is insufficient. I + recommend 4GB. Downburst can be instructed to give 4GB to a vps node by + adding this to the yaml: + + downburst: + ram: 4G + + This was created using documentation found here: + https://github.com/openstack-dev/devstack/blob/master/README.md + http://docs.ceph.com/docs/master/rbd/rbd-openstack/ + """ + if config is None: + config = {} + if not isinstance(config, dict): + raise TypeError("config must be a dict") + + devstack_node = next(iter(ctx.cluster.only(is_devstack_node).remotes.keys())) + an_osd_node = next(iter(ctx.cluster.only(is_osd_node).remotes.keys())) + + devstack_branch = config.get("branch", "master") + install_devstack(devstack_node, devstack_branch) + try: + configure_devstack_and_ceph(ctx, config, devstack_node, an_osd_node) + yield + finally: + pass + + +def install_devstack(devstack_node, branch="master"): + log.info("Cloning DevStack repo...") + + args = ['git', 'clone', DEVSTACK_GIT_REPO] + devstack_node.run(args=args) + + if branch != "master": + if branch in DS_STABLE_BRANCHES and not branch.startswith("stable"): + branch = "stable/" + branch + log.info("Checking out {branch} branch...".format(branch=branch)) + cmd = "cd devstack && git checkout " + branch + devstack_node.run(args=cmd) + + log.info("Installing DevStack...") + args = ['cd', 'devstack', run.Raw('&&'), './stack.sh'] + devstack_node.run(args=args) + + +def configure_devstack_and_ceph(ctx, config, devstack_node, ceph_node): + pool_size = config.get('pool_size', '128') + create_pools(ceph_node, pool_size) + distribute_ceph_conf(devstack_node, ceph_node) + # This is where we would install python-ceph and ceph-common but it appears + # the ceph task does that for us. + generate_ceph_keys(ceph_node) + distribute_ceph_keys(devstack_node, ceph_node) + secret_uuid = set_libvirt_secret(devstack_node, ceph_node) + update_devstack_config_files(devstack_node, secret_uuid) + set_apache_servername(devstack_node) + # Rebooting is the most-often-used method of restarting devstack services + misc.reboot(devstack_node) + start_devstack(devstack_node) + restart_apache(devstack_node) + + +def create_pools(ceph_node, pool_size): + log.info("Creating pools on Ceph cluster...") + + for pool_name in ['volumes', 'images', 'backups']: + args = ['sudo', 'ceph', 'osd', 'pool', 'create', pool_name, pool_size] + ceph_node.run(args=args) + + +def distribute_ceph_conf(devstack_node, ceph_node): + log.info("Copying ceph.conf to DevStack node...") + + ceph_conf_path = '/etc/ceph/ceph.conf' + ceph_conf = misc.get_file(ceph_node, ceph_conf_path, sudo=True) + misc.sudo_write_file(devstack_node, ceph_conf_path, ceph_conf) + + +def generate_ceph_keys(ceph_node): + log.info("Generating Ceph keys...") + + ceph_auth_cmds = [ + ['sudo', 'ceph', 'auth', 'get-or-create', 'client.cinder', 'mon', + 'allow r', 'osd', 'allow class-read object_prefix rbd_children, allow rwx pool=volumes, allow rx pool=images'], # noqa + ['sudo', 'ceph', 'auth', 'get-or-create', 'client.glance', 'mon', + 'allow r', 'osd', 'allow class-read object_prefix rbd_children, allow rwx pool=images'], # noqa + ['sudo', 'ceph', 'auth', 'get-or-create', 'client.cinder-backup', 'mon', + 'allow r', 'osd', 'allow class-read object_prefix rbd_children, allow rwx pool=backups'], # noqa + ] + for cmd in ceph_auth_cmds: + ceph_node.run(args=cmd) + + +def distribute_ceph_keys(devstack_node, ceph_node): + log.info("Copying Ceph keys to DevStack node...") + + def copy_key(from_remote, key_name, to_remote, dest_path, owner): + key_stringio = six.StringIO() + from_remote.run( + args=['sudo', 'ceph', 'auth', 'get-or-create', key_name], + stdout=key_stringio) + key_stringio.seek(0) + misc.sudo_write_file(to_remote, dest_path, + key_stringio, owner=owner) + keys = [ + dict(name='client.glance', + path='/etc/ceph/ceph.client.glance.keyring', + # devstack appears to just want root:root + #owner='glance:glance', + ), + dict(name='client.cinder', + path='/etc/ceph/ceph.client.cinder.keyring', + # devstack appears to just want root:root + #owner='cinder:cinder', + ), + dict(name='client.cinder-backup', + path='/etc/ceph/ceph.client.cinder-backup.keyring', + # devstack appears to just want root:root + #owner='cinder:cinder', + ), + ] + for key_dict in keys: + copy_key(ceph_node, key_dict['name'], devstack_node, + key_dict['path'], key_dict.get('owner')) + + +def set_libvirt_secret(devstack_node, ceph_node): + log.info("Setting libvirt secret...") + + cinder_key_stringio = six.StringIO() + ceph_node.run(args=['sudo', 'ceph', 'auth', 'get-key', 'client.cinder'], + stdout=cinder_key_stringio) + cinder_key = cinder_key_stringio.getvalue().strip() + + uuid_stringio = six.StringIO() + devstack_node.run(args=['uuidgen'], stdout=uuid_stringio) + uuid = uuid_stringio.getvalue().strip() + + secret_path = '/tmp/secret.xml' + secret_template = textwrap.dedent(""" + + {uuid} + + client.cinder secret + + """) + misc.sudo_write_file(devstack_node, secret_path, + secret_template.format(uuid=uuid)) + devstack_node.run(args=['sudo', 'virsh', 'secret-define', '--file', + secret_path]) + devstack_node.run(args=['sudo', 'virsh', 'secret-set-value', '--secret', + uuid, '--base64', cinder_key]) + return uuid + + +def update_devstack_config_files(devstack_node, secret_uuid): + log.info("Updating DevStack config files to use Ceph...") + + def backup_config(node, file_name, backup_ext='.orig.teuth'): + node.run(args=['cp', '-f', file_name, file_name + backup_ext]) + + def update_config(config_name, config_stream, update_dict, + section='DEFAULT'): + parser = ConfigParser() + parser.read_file(config_stream) + for (key, value) in update_dict.items(): + parser.set(section, key, value) + out_stream = six.StringIO() + parser.write(out_stream) + out_stream.seek(0) + return out_stream + + updates = [ + dict(name='/etc/glance/glance-api.conf', options=dict( + default_store='rbd', + rbd_store_user='glance', + rbd_store_pool='images', + show_image_direct_url='True',)), + dict(name='/etc/cinder/cinder.conf', options=dict( + volume_driver='cinder.volume.drivers.rbd.RBDDriver', + rbd_pool='volumes', + rbd_ceph_conf='/etc/ceph/ceph.conf', + rbd_flatten_volume_from_snapshot='false', + rbd_max_clone_depth='5', + glance_api_version='2', + rbd_user='cinder', + rbd_secret_uuid=secret_uuid, + backup_driver='cinder.backup.drivers.ceph', + backup_ceph_conf='/etc/ceph/ceph.conf', + backup_ceph_user='cinder-backup', + backup_ceph_chunk_size='134217728', + backup_ceph_pool='backups', + backup_ceph_stripe_unit='0', + backup_ceph_stripe_count='0', + restore_discard_excess_bytes='true', + )), + dict(name='/etc/nova/nova.conf', options=dict( + libvirt_images_type='rbd', + libvirt_images_rbd_pool='volumes', + libvirt_images_rbd_ceph_conf='/etc/ceph/ceph.conf', + rbd_user='cinder', + rbd_secret_uuid=secret_uuid, + libvirt_inject_password='false', + libvirt_inject_key='false', + libvirt_inject_partition='-2', + )), + ] + + for update in updates: + file_name = update['name'] + options = update['options'] + config_data = misc.get_file(devstack_node, file_name, sudo=True) + config_stream = six.StringIO(config_data) + backup_config(devstack_node, file_name) + new_config_stream = update_config(file_name, config_stream, options) + misc.sudo_write_file(devstack_node, file_name, new_config_stream) + + +def set_apache_servername(node): + # Apache complains: "Could not reliably determine the server's fully + # qualified domain name, using 127.0.0.1 for ServerName" + # So, let's make sure it knows its name. + log.info("Setting Apache ServerName...") + + hostname = node.hostname + config_file = '/etc/apache2/conf.d/servername' + misc.sudo_write_file(node, config_file, + "ServerName {name}".format(name=hostname)) + + +def start_devstack(devstack_node): + log.info("Patching devstack start script...") + # This causes screen to start headless - otherwise rejoin-stack.sh fails + # because there is no terminal attached. + cmd = "cd devstack && sed -ie 's/screen -c/screen -dm -c/' rejoin-stack.sh" + devstack_node.run(args=cmd) + + log.info("Starting devstack...") + cmd = "cd devstack && ./rejoin-stack.sh" + devstack_node.run(args=cmd) + + # This was added because I was getting timeouts on Cinder requests - which + # were trying to access Keystone on port 5000. A more robust way to handle + # this would be to introduce a wait-loop on devstack_node that checks to + # see if a service is listening on port 5000. + log.info("Waiting 30s for devstack to start...") + time.sleep(30) + + +def restart_apache(node): + node.run(args=['sudo', '/etc/init.d/apache2', 'restart'], wait=True) + + +@contextlib.contextmanager +def exercise(ctx, config): + log.info("Running devstack exercises...") + + if config is None: + config = {} + if not isinstance(config, dict): + raise TypeError("config must be a dict") + + devstack_node = next(iter(ctx.cluster.only(is_devstack_node).remotes.keys())) + + # TODO: save the log *and* preserve failures + #devstack_archive_dir = create_devstack_archive(ctx, devstack_node) + + try: + #cmd = "cd devstack && ./exercise.sh 2>&1 | tee {dir}/exercise.log".format( # noqa + # dir=devstack_archive_dir) + cmd = "cd devstack && ./exercise.sh" + devstack_node.run(args=cmd, wait=True) + yield + finally: + pass + + +def create_devstack_archive(ctx, devstack_node): + test_dir = misc.get_testdir(ctx) + devstack_archive_dir = "{test_dir}/archive/devstack".format( + test_dir=test_dir) + devstack_node.run(args="mkdir -p " + devstack_archive_dir) + return devstack_archive_dir + + +@contextlib.contextmanager +def smoke(ctx, config): + log.info("Running a basic smoketest...") + + devstack_node = next(iter(ctx.cluster.only(is_devstack_node).remotes.keys())) + an_osd_node = next(iter(ctx.cluster.only(is_osd_node).remotes.keys())) + + try: + create_volume(devstack_node, an_osd_node, 'smoke0', 1) + yield + finally: + pass + + +def create_volume(devstack_node, ceph_node, vol_name, size): + """ + :param size: The size of the volume, in GB + """ + size = str(size) + log.info("Creating a {size}GB volume named {name}...".format( + name=vol_name, + size=size)) + args = ['source', 'devstack/openrc', run.Raw('&&'), 'cinder', 'create', + '--display-name', vol_name, size] + cinder_create = devstack_node.sh(args, wait=True) + vol_info = parse_os_table(cinder_create) + log.debug("Volume info: %s", str(vol_info)) + + try: + rbd_output = ceph_node.sh("rbd --id cinder ls -l volumes", wait=True) + except run.CommandFailedError: + log.debug("Original rbd call failed; retrying without '--id cinder'") + rbd_output = ceph_node.sh("rbd ls -l volumes", wait=True) + + assert vol_info['id'] in rbd_output, \ + "Volume not found on Ceph cluster" + assert vol_info['size'] == size, \ + "Volume size on Ceph cluster is different than specified" + return vol_info['id'] + + +def parse_os_table(table_str): + out_dict = dict() + for line in table_str.split('\n'): + if line.startswith('|'): + items = line.split() + out_dict[items[1]] = items[3] + return out_dict diff --git a/qa/tasks/die_on_err.py b/qa/tasks/die_on_err.py new file mode 100644 index 00000000..a6aa4c63 --- /dev/null +++ b/qa/tasks/die_on_err.py @@ -0,0 +1,70 @@ +""" +Raise exceptions on osd coredumps or test err directories +""" +import contextlib +import logging +import time +from teuthology.orchestra import run + +from tasks import ceph_manager +from teuthology import misc as teuthology + +log = logging.getLogger(__name__) + +@contextlib.contextmanager +def task(ctx, config): + """ + Die if {testdir}/err exists or if an OSD dumps core + """ + if config is None: + config = {} + + first_mon = teuthology.get_first_mon(ctx, config) + (mon,) = ctx.cluster.only(first_mon).remotes.keys() + + num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd') + log.info('num_osds is %s' % num_osds) + + manager = ceph_manager.CephManager( + mon, + ctx=ctx, + logger=log.getChild('ceph_manager'), + ) + + while len(manager.get_osd_status()['up']) < num_osds: + time.sleep(10) + + testdir = teuthology.get_testdir(ctx) + + while True: + for i in range(num_osds): + (osd_remote,) = ctx.cluster.only('osd.%d' % i).remotes.keys() + p = osd_remote.run( + args = [ 'test', '-e', '{tdir}/err'.format(tdir=testdir) ], + wait=True, + check_status=False, + ) + exit_status = p.exitstatus + + if exit_status == 0: + log.info("osd %d has an error" % i) + raise Exception("osd %d error" % i) + + log_path = '/var/log/ceph/osd.%d.log' % (i) + + p = osd_remote.run( + args = [ + 'tail', '-1', log_path, + run.Raw('|'), + 'grep', '-q', 'end dump' + ], + wait=True, + check_status=False, + ) + exit_status = p.exitstatus + + if exit_status == 0: + log.info("osd %d dumped core" % i) + raise Exception("osd %d dumped core" % i) + + time.sleep(5) diff --git a/qa/tasks/divergent_priors.py b/qa/tasks/divergent_priors.py new file mode 100644 index 00000000..e000bb2b --- /dev/null +++ b/qa/tasks/divergent_priors.py @@ -0,0 +1,160 @@ +""" +Special case divergence test +""" +import logging +import time + +from teuthology import misc as teuthology +from tasks.util.rados import rados + + +log = logging.getLogger(__name__) + + +def task(ctx, config): + """ + Test handling of divergent entries with prior_version + prior to log_tail + + overrides: + ceph: + conf: + osd: + debug osd: 5 + + Requires 3 osds on a single test node. + """ + if config is None: + config = {} + assert isinstance(config, dict), \ + 'divergent_priors task only accepts a dict for configuration' + + manager = ctx.managers['ceph'] + + while len(manager.get_osd_status()['up']) < 3: + time.sleep(10) + manager.flush_pg_stats([0, 1, 2]) + manager.raw_cluster_cmd('osd', 'set', 'noout') + manager.raw_cluster_cmd('osd', 'set', 'noin') + manager.raw_cluster_cmd('osd', 'set', 'nodown') + manager.wait_for_clean() + + # something that is always there + dummyfile = '/etc/fstab' + dummyfile2 = '/etc/resolv.conf' + + # create 1 pg pool + log.info('creating foo') + manager.raw_cluster_cmd('osd', 'pool', 'create', 'foo', '1') + + osds = [0, 1, 2] + for i in osds: + manager.set_config(i, osd_min_pg_log_entries=10) + manager.set_config(i, osd_max_pg_log_entries=10) + manager.set_config(i, osd_pg_log_trim_min=5) + + # determine primary + divergent = manager.get_pg_primary('foo', 0) + log.info("primary and soon to be divergent is %d", divergent) + non_divergent = list(osds) + non_divergent.remove(divergent) + + log.info('writing initial objects') + first_mon = teuthology.get_first_mon(ctx, config) + (mon,) = ctx.cluster.only(first_mon).remotes.keys() + # write 100 objects + for i in range(100): + rados(ctx, mon, ['-p', 'foo', 'put', 'existing_%d' % i, dummyfile]) + + manager.wait_for_clean() + + # blackhole non_divergent + log.info("blackholing osds %s", str(non_divergent)) + for i in non_divergent: + manager.set_config(i, objectstore_blackhole=1) + + DIVERGENT_WRITE = 5 + DIVERGENT_REMOVE = 5 + # Write some soon to be divergent + log.info('writing divergent objects') + for i in range(DIVERGENT_WRITE): + rados(ctx, mon, ['-p', 'foo', 'put', 'existing_%d' % i, + dummyfile2], wait=False) + # Remove some soon to be divergent + log.info('remove divergent objects') + for i in range(DIVERGENT_REMOVE): + rados(ctx, mon, ['-p', 'foo', 'rm', + 'existing_%d' % (i + DIVERGENT_WRITE)], wait=False) + time.sleep(10) + mon.run( + args=['killall', '-9', 'rados'], + wait=True, + check_status=False) + + # kill all the osds but leave divergent in + log.info('killing all the osds') + for i in osds: + manager.kill_osd(i) + for i in osds: + manager.mark_down_osd(i) + for i in non_divergent: + manager.mark_out_osd(i) + + # bring up non-divergent + log.info("bringing up non_divergent %s", str(non_divergent)) + for i in non_divergent: + manager.revive_osd(i) + for i in non_divergent: + manager.mark_in_osd(i) + + # write 1 non-divergent object (ensure that old divergent one is divergent) + objname = "existing_%d" % (DIVERGENT_WRITE + DIVERGENT_REMOVE) + log.info('writing non-divergent object ' + objname) + rados(ctx, mon, ['-p', 'foo', 'put', objname, dummyfile2]) + + manager.wait_for_recovery() + + # ensure no recovery of up osds first + log.info('delay recovery') + for i in non_divergent: + manager.wait_run_admin_socket( + 'osd', i, ['set_recovery_delay', '100000']) + + # bring in our divergent friend + log.info("revive divergent %d", divergent) + manager.raw_cluster_cmd('osd', 'set', 'noup') + manager.revive_osd(divergent) + + log.info('delay recovery divergent') + manager.wait_run_admin_socket( + 'osd', divergent, ['set_recovery_delay', '100000']) + + manager.raw_cluster_cmd('osd', 'unset', 'noup') + while len(manager.get_osd_status()['up']) < 3: + time.sleep(10) + + log.info('wait for peering') + rados(ctx, mon, ['-p', 'foo', 'put', 'foo', dummyfile]) + + # At this point the divergent_priors should have been detected + + log.info("killing divergent %d", divergent) + manager.kill_osd(divergent) + log.info("reviving divergent %d", divergent) + manager.revive_osd(divergent) + + time.sleep(20) + + log.info('allowing recovery') + # Set osd_recovery_delay_start back to 0 and kick the queue + for i in osds: + manager.raw_cluster_cmd('tell', 'osd.%d' % i, 'debug', + 'kick_recovery_wq', ' 0') + + log.info('reading divergent objects') + for i in range(DIVERGENT_WRITE + DIVERGENT_REMOVE): + exit_status = rados(ctx, mon, ['-p', 'foo', 'get', 'existing_%d' % i, + '/tmp/existing']) + assert exit_status == 0 + + log.info("success") diff --git a/qa/tasks/divergent_priors2.py b/qa/tasks/divergent_priors2.py new file mode 100644 index 00000000..4d4b07fc --- /dev/null +++ b/qa/tasks/divergent_priors2.py @@ -0,0 +1,192 @@ +""" +Special case divergence test with ceph-objectstore-tool export/remove/import +""" +import logging +import time + +from teuthology.exceptions import CommandFailedError +from teuthology import misc as teuthology +from tasks.util.rados import rados +import os + + +log = logging.getLogger(__name__) + + +def task(ctx, config): + """ + Test handling of divergent entries with prior_version + prior to log_tail and a ceph-objectstore-tool export/import + + overrides: + ceph: + conf: + osd: + debug osd: 5 + + Requires 3 osds on a single test node. + """ + if config is None: + config = {} + assert isinstance(config, dict), \ + 'divergent_priors task only accepts a dict for configuration' + + manager = ctx.managers['ceph'] + + while len(manager.get_osd_status()['up']) < 3: + time.sleep(10) + manager.flush_pg_stats([0, 1, 2]) + manager.raw_cluster_cmd('osd', 'set', 'noout') + manager.raw_cluster_cmd('osd', 'set', 'noin') + manager.raw_cluster_cmd('osd', 'set', 'nodown') + manager.wait_for_clean() + + # something that is always there + dummyfile = '/etc/fstab' + dummyfile2 = '/etc/resolv.conf' + testdir = teuthology.get_testdir(ctx) + + # create 1 pg pool + log.info('creating foo') + manager.raw_cluster_cmd('osd', 'pool', 'create', 'foo', '1') + + osds = [0, 1, 2] + for i in osds: + manager.set_config(i, osd_min_pg_log_entries=10) + manager.set_config(i, osd_max_pg_log_entries=10) + manager.set_config(i, osd_pg_log_trim_min=5) + + # determine primary + divergent = manager.get_pg_primary('foo', 0) + log.info("primary and soon to be divergent is %d", divergent) + non_divergent = list(osds) + non_divergent.remove(divergent) + + log.info('writing initial objects') + first_mon = teuthology.get_first_mon(ctx, config) + (mon,) = ctx.cluster.only(first_mon).remotes.keys() + # write 100 objects + for i in range(100): + rados(ctx, mon, ['-p', 'foo', 'put', 'existing_%d' % i, dummyfile]) + + manager.wait_for_clean() + + # blackhole non_divergent + log.info("blackholing osds %s", str(non_divergent)) + for i in non_divergent: + manager.set_config(i, objectstore_blackhole=1) + + DIVERGENT_WRITE = 5 + DIVERGENT_REMOVE = 5 + # Write some soon to be divergent + log.info('writing divergent objects') + for i in range(DIVERGENT_WRITE): + rados(ctx, mon, ['-p', 'foo', 'put', 'existing_%d' % i, + dummyfile2], wait=False) + # Remove some soon to be divergent + log.info('remove divergent objects') + for i in range(DIVERGENT_REMOVE): + rados(ctx, mon, ['-p', 'foo', 'rm', + 'existing_%d' % (i + DIVERGENT_WRITE)], wait=False) + time.sleep(10) + mon.run( + args=['killall', '-9', 'rados'], + wait=True, + check_status=False) + + # kill all the osds but leave divergent in + log.info('killing all the osds') + for i in osds: + manager.kill_osd(i) + for i in osds: + manager.mark_down_osd(i) + for i in non_divergent: + manager.mark_out_osd(i) + + # bring up non-divergent + log.info("bringing up non_divergent %s", str(non_divergent)) + for i in non_divergent: + manager.revive_osd(i) + for i in non_divergent: + manager.mark_in_osd(i) + + # write 1 non-divergent object (ensure that old divergent one is divergent) + objname = "existing_%d" % (DIVERGENT_WRITE + DIVERGENT_REMOVE) + log.info('writing non-divergent object ' + objname) + rados(ctx, mon, ['-p', 'foo', 'put', objname, dummyfile2]) + + manager.wait_for_recovery() + + # ensure no recovery of up osds first + log.info('delay recovery') + for i in non_divergent: + manager.wait_run_admin_socket( + 'osd', i, ['set_recovery_delay', '100000']) + + # bring in our divergent friend + log.info("revive divergent %d", divergent) + manager.raw_cluster_cmd('osd', 'set', 'noup') + manager.revive_osd(divergent) + + log.info('delay recovery divergent') + manager.wait_run_admin_socket( + 'osd', divergent, ['set_recovery_delay', '100000']) + + manager.raw_cluster_cmd('osd', 'unset', 'noup') + while len(manager.get_osd_status()['up']) < 3: + time.sleep(10) + + log.info('wait for peering') + rados(ctx, mon, ['-p', 'foo', 'put', 'foo', dummyfile]) + + # At this point the divergent_priors should have been detected + + log.info("killing divergent %d", divergent) + manager.kill_osd(divergent) + + # Export a pg + (exp_remote,) = ctx.\ + cluster.only('osd.{o}'.format(o=divergent)).remotes.keys() + FSPATH = manager.get_filepath() + JPATH = os.path.join(FSPATH, "journal") + prefix = ("sudo adjust-ulimits ceph-objectstore-tool " + "--data-path {fpath} --journal-path {jpath} " + "--log-file=" + "/var/log/ceph/objectstore_tool.$$.log ". + format(fpath=FSPATH, jpath=JPATH)) + pid = os.getpid() + expfile = os.path.join(testdir, "exp.{pid}.out".format(pid=pid)) + cmd = ((prefix + "--op export-remove --pgid 2.0 --file {file}"). + format(id=divergent, file=expfile)) + try: + exp_remote.sh(cmd, wait=True) + except CommandFailedError as e: + assert e.exitstatus == 0 + + cmd = ((prefix + "--op import --file {file}"). + format(id=divergent, file=expfile)) + try: + exp_remote.sh(cmd, wait=True) + except CommandFailedError as e: + assert e.exitstatus == 0 + + log.info("reviving divergent %d", divergent) + manager.revive_osd(divergent) + manager.wait_run_admin_socket('osd', divergent, ['dump_ops_in_flight']) + time.sleep(20); + + log.info('allowing recovery') + # Set osd_recovery_delay_start back to 0 and kick the queue + for i in osds: + manager.raw_cluster_cmd('tell', 'osd.%d' % i, 'debug', + 'kick_recovery_wq', ' 0') + + log.info('reading divergent objects') + for i in range(DIVERGENT_WRITE + DIVERGENT_REMOVE): + exit_status = rados(ctx, mon, ['-p', 'foo', 'get', 'existing_%d' % i, + '/tmp/existing']) + assert exit_status == 0 + + cmd = 'rm {file}'.format(file=expfile) + exp_remote.run(args=cmd, wait=True) + log.info("success") diff --git a/qa/tasks/dnsmasq.py b/qa/tasks/dnsmasq.py new file mode 100644 index 00000000..352ed246 --- /dev/null +++ b/qa/tasks/dnsmasq.py @@ -0,0 +1,170 @@ +""" +Task for dnsmasq configuration +""" +import contextlib +import logging + +from teuthology import misc +from teuthology.exceptions import ConfigError +from teuthology import contextutil +from teuthology import packaging +from tasks.util import get_remote_for_role + +log = logging.getLogger(__name__) + +@contextlib.contextmanager +def install_dnsmasq(remote): + """ + If dnsmasq is not installed, install it for the duration of the task. + """ + try: + existing = packaging.get_package_version(remote, 'dnsmasq') + except: + existing = None + + if existing is None: + packaging.install_package('dnsmasq', remote) + try: + yield + finally: + if existing is None: + packaging.remove_package('dnsmasq', remote) + +@contextlib.contextmanager +def backup_resolv(remote, path): + """ + Store a backup of resolv.conf in the testdir and restore it after the task. + """ + remote.run(args=['cp', '/etc/resolv.conf', path]) + try: + yield + finally: + # restore with 'cp' to avoid overwriting its security context + remote.run(args=['sudo', 'cp', path, '/etc/resolv.conf']) + remote.run(args=['rm', path]) + +@contextlib.contextmanager +def replace_resolv(remote, path): + """ + Update resolv.conf to point the nameserver at localhost. + """ + misc.write_file(remote, path, "nameserver 127.0.0.1\n") + try: + # install it + if remote.os.package_type == "rpm": + # for centos ovh resolv.conf has immutable attribute set + remote.run(args=['sudo', 'chattr', '-i', '/etc/resolv.conf'], check_status=False) + remote.run(args=['sudo', 'cp', path, '/etc/resolv.conf']) + yield + finally: + remote.run(args=['rm', path]) + +@contextlib.contextmanager +def setup_dnsmasq(remote, testdir, cnames): + """ configure dnsmasq on the given remote, adding each cname given """ + log.info('Configuring dnsmasq on remote %s..', remote.name) + + # add address entries for each cname + dnsmasq = "server=8.8.8.8\nserver=8.8.4.4\n" + address_template = "address=/{cname}/{ip_address}\n" + for cname, ip_address in cnames.items(): + dnsmasq += address_template.format(cname=cname, ip_address=ip_address) + + # write to temporary dnsmasq file + dnsmasq_tmp = '/'.join((testdir, 'ceph.tmp')) + misc.write_file(remote, dnsmasq_tmp, dnsmasq) + + # move into /etc/dnsmasq.d/ + dnsmasq_path = '/etc/dnsmasq.d/ceph' + remote.run(args=['sudo', 'mv', dnsmasq_tmp, dnsmasq_path]) + # restore selinux context if necessary + remote.run(args=['sudo', 'restorecon', dnsmasq_path], check_status=False) + + # restart dnsmasq + remote.run(args=['sudo', 'systemctl', 'restart', 'dnsmasq']) + # verify dns name is set + remote.run(args=['ping', '-c', '4', next(iter(cnames.keys()))]) + + try: + yield + finally: + log.info('Removing dnsmasq configuration from remote %s..', remote.name) + # remove /etc/dnsmasq.d/ceph + remote.run(args=['sudo', 'rm', dnsmasq_path]) + # restart dnsmasq + remote.run(args=['sudo', 'systemctl', 'restart', 'dnsmasq']) + +@contextlib.contextmanager +def task(ctx, config): + """ + Configures dnsmasq to add cnames for teuthology remotes. The task expects a + dictionary, where each key is a role. If all cnames for that role use the + same address as that role, the cnames can be given as a list. For example, + this entry configures dnsmasq on the remote associated with client.0, adding + two cnames for the ip address associated with client.0: + + - dnsmasq: + client.0: + - client0.example.com + - c0.example.com + + If the addresses do not all match the given role, a dictionary can be given + to specify the ip address by its target role. For example: + + - dnsmasq: + client.0: + client.0.example.com: client.0 + client.1.example.com: client.1 + + Cnames that end with a . are treated as prefix for the existing hostname. + For example, if the remote for client.0 has a hostname of 'example.com', + this task will add cnames for dev.example.com and test.example.com: + + - dnsmasq: + client.0: [dev., test.] + """ + # apply overrides + overrides = config.get('overrides', {}) + misc.deep_merge(config, overrides.get('dnsmasq', {})) + + # multiple roles may map to the same remote, so collect names by remote + remote_names = {} + for role, cnames in config.items(): + remote = get_remote_for_role(ctx, role) + if remote is None: + raise ConfigError('no remote for role %s' % role) + + names = remote_names.get(remote, {}) + + if isinstance(cnames, list): + # when given a list of cnames, point to local ip + for cname in cnames: + if cname.endswith('.'): + cname += remote.hostname + names[cname] = remote.ip_address + elif isinstance(cnames, dict): + # when given a dict, look up the remote ip for each + for cname, client in cnames.items(): + r = get_remote_for_role(ctx, client) + if r is None: + raise ConfigError('no remote for role %s' % client) + if cname.endswith('.'): + cname += r.hostname + names[cname] = r.ip_address + + remote_names[remote] = names + + testdir = misc.get_testdir(ctx) + resolv_bak = '/'.join((testdir, 'resolv.bak')) + resolv_tmp = '/'.join((testdir, 'resolv.tmp')) + + # run subtasks for each unique remote + subtasks = [] + for remote, cnames in remote_names.items(): + subtasks.extend([ lambda r=remote: install_dnsmasq(r) ]) + subtasks.extend([ lambda r=remote: backup_resolv(r, resolv_bak) ]) + subtasks.extend([ lambda r=remote: replace_resolv(r, resolv_tmp) ]) + subtasks.extend([ lambda r=remote, cn=cnames: setup_dnsmasq(r, testdir, cn) ]) + + with contextutil.nested(*subtasks): + yield diff --git a/qa/tasks/dump_stuck.py b/qa/tasks/dump_stuck.py new file mode 100644 index 00000000..fb2823f4 --- /dev/null +++ b/qa/tasks/dump_stuck.py @@ -0,0 +1,161 @@ +""" +Dump_stuck command +""" +import logging +import time + +from tasks import ceph_manager +from teuthology import misc as teuthology + + +log = logging.getLogger(__name__) + +def check_stuck(manager, num_inactive, num_unclean, num_stale, timeout=10): + """ + Do checks. Make sure get_stuck_pgs return the right amount of information, then + extract health information from the raw_cluster_cmd and compare the results with + values passed in. This passes if all asserts pass. + + :param num_manager: Ceph manager + :param num_inactive: number of inaactive pages that are stuck + :param num_unclean: number of unclean pages that are stuck + :paran num_stale: number of stale pages that are stuck + :param timeout: timeout value for get_stuck_pgs calls + """ + inactive = manager.get_stuck_pgs('inactive', timeout) + unclean = manager.get_stuck_pgs('unclean', timeout) + stale = manager.get_stuck_pgs('stale', timeout) + log.info('inactive %s / %d, unclean %s / %d, stale %s / %d', + len(inactive), num_inactive, + len(unclean), num_unclean, + len(stale), num_stale) + assert len(inactive) == num_inactive + assert len(unclean) == num_unclean + assert len(stale) == num_stale + +def task(ctx, config): + """ + Test the dump_stuck command. + + :param ctx: Context + :param config: Configuration + """ + assert config is None, \ + 'dump_stuck requires no configuration' + assert teuthology.num_instances_of_type(ctx.cluster, 'osd') == 2, \ + 'dump_stuck requires exactly 2 osds' + + timeout = 60 + first_mon = teuthology.get_first_mon(ctx, config) + (mon,) = ctx.cluster.only(first_mon).remotes.keys() + + manager = ceph_manager.CephManager( + mon, + ctx=ctx, + logger=log.getChild('ceph_manager'), + ) + + manager.flush_pg_stats([0, 1]) + manager.wait_for_clean(timeout) + + manager.raw_cluster_cmd('tell', 'mon.0', 'injectargs', '--', +# '--mon-osd-report-timeout 90', + '--mon-pg-stuck-threshold 10') + + # all active+clean + check_stuck( + manager, + num_inactive=0, + num_unclean=0, + num_stale=0, + ) + num_pgs = manager.get_num_pgs() + + manager.mark_out_osd(0) + time.sleep(timeout) + manager.flush_pg_stats([1]) + manager.wait_for_recovery(timeout) + + # all active+clean+remapped + check_stuck( + manager, + num_inactive=0, + num_unclean=0, + num_stale=0, + ) + + manager.mark_in_osd(0) + manager.flush_pg_stats([0, 1]) + manager.wait_for_clean(timeout) + + # all active+clean + check_stuck( + manager, + num_inactive=0, + num_unclean=0, + num_stale=0, + ) + + log.info('stopping first osd') + manager.kill_osd(0) + manager.mark_down_osd(0) + manager.wait_for_active(timeout) + + log.info('waiting for all to be unclean') + starttime = time.time() + done = False + while not done: + try: + check_stuck( + manager, + num_inactive=0, + num_unclean=num_pgs, + num_stale=0, + ) + done = True + except AssertionError: + # wait up to 15 minutes to become stale + if time.time() - starttime > 900: + raise + + + log.info('stopping second osd') + manager.kill_osd(1) + manager.mark_down_osd(1) + + log.info('waiting for all to be stale') + starttime = time.time() + done = False + while not done: + try: + check_stuck( + manager, + num_inactive=0, + num_unclean=num_pgs, + num_stale=num_pgs, + ) + done = True + except AssertionError: + # wait up to 15 minutes to become stale + if time.time() - starttime > 900: + raise + + log.info('reviving') + for id_ in teuthology.all_roles_of_type(ctx.cluster, 'osd'): + manager.revive_osd(id_) + manager.mark_in_osd(id_) + while True: + try: + manager.flush_pg_stats([0, 1]) + break + except Exception: + log.exception('osds must not be started yet, waiting...') + time.sleep(1) + manager.wait_for_clean(timeout) + + check_stuck( + manager, + num_inactive=0, + num_unclean=0, + num_stale=0, + ) diff --git a/qa/tasks/ec_lost_unfound.py b/qa/tasks/ec_lost_unfound.py new file mode 100644 index 00000000..e12b6901 --- /dev/null +++ b/qa/tasks/ec_lost_unfound.py @@ -0,0 +1,158 @@ +""" +Lost_unfound +""" +import logging +import time +from tasks import ceph_manager +from tasks.util.rados import rados +from teuthology import misc as teuthology +from teuthology.orchestra import run + +log = logging.getLogger(__name__) + +def task(ctx, config): + """ + Test handling of lost objects on an ec pool. + + A pretty rigid cluster is brought up andtested by this task + """ + if config is None: + config = {} + assert isinstance(config, dict), \ + 'lost_unfound task only accepts a dict for configuration' + first_mon = teuthology.get_first_mon(ctx, config) + (mon,) = ctx.cluster.only(first_mon).remotes.keys() + + manager = ceph_manager.CephManager( + mon, + ctx=ctx, + logger=log.getChild('ceph_manager'), + ) + + manager.wait_for_clean() + + profile = config.get('erasure_code_profile', { + 'k': '2', + 'm': '2', + 'crush-failure-domain': 'osd' + }) + profile_name = profile.get('name', 'lost_unfound') + manager.create_erasure_code_profile(profile_name, profile) + pool = manager.create_pool_with_unique_name( + erasure_code_profile_name=profile_name, + min_size=2) + + # something that is always there, readable and never empty + dummyfile = '/etc/group' + + # kludge to make sure they get a map + rados(ctx, mon, ['-p', pool, 'put', 'dummy', dummyfile]) + + manager.flush_pg_stats([0, 1]) + manager.wait_for_recovery() + + # create old objects + for f in range(1, 10): + rados(ctx, mon, ['-p', pool, 'put', 'existing_%d' % f, dummyfile]) + rados(ctx, mon, ['-p', pool, 'put', 'existed_%d' % f, dummyfile]) + rados(ctx, mon, ['-p', pool, 'rm', 'existed_%d' % f]) + + # delay recovery, and make the pg log very long (to prevent backfill) + manager.raw_cluster_cmd( + 'tell', 'osd.1', + 'injectargs', + '--osd-recovery-delay-start 1000 --osd-min-pg-log-entries 100000000' + ) + + manager.kill_osd(0) + manager.mark_down_osd(0) + manager.kill_osd(3) + manager.mark_down_osd(3) + + for f in range(1, 10): + rados(ctx, mon, ['-p', pool, 'put', 'new_%d' % f, dummyfile]) + rados(ctx, mon, ['-p', pool, 'put', 'existed_%d' % f, dummyfile]) + rados(ctx, mon, ['-p', pool, 'put', 'existing_%d' % f, dummyfile]) + + # take out osd.1 and a necessary shard of those objects. + manager.kill_osd(1) + manager.mark_down_osd(1) + manager.raw_cluster_cmd('osd', 'lost', '1', '--yes-i-really-mean-it') + manager.revive_osd(0) + manager.wait_till_osd_is_up(0) + manager.revive_osd(3) + manager.wait_till_osd_is_up(3) + + manager.flush_pg_stats([0, 2, 3]) + manager.wait_till_active() + manager.flush_pg_stats([0, 2, 3]) + + # verify that there are unfound objects + unfound = manager.get_num_unfound_objects() + log.info("there are %d unfound objects" % unfound) + assert unfound + + testdir = teuthology.get_testdir(ctx) + procs = [] + if config.get('parallel_bench', True): + procs.append(mon.run( + args=[ + "/bin/sh", "-c", + " ".join(['adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage', + 'rados', + '--no-log-to-stderr', + '--name', 'client.admin', + '-b', str(4<<10), + '-p' , pool, + '-t', '20', + 'bench', '240', 'write', + ]).format(tdir=testdir), + ], + logger=log.getChild('radosbench.{id}'.format(id='client.admin')), + stdin=run.PIPE, + wait=False + )) + time.sleep(10) + + # mark stuff lost + pgs = manager.get_pg_stats() + for pg in pgs: + if pg['stat_sum']['num_objects_unfound'] > 0: + # verify that i can list them direct from the osd + log.info('listing missing/lost in %s state %s', pg['pgid'], + pg['state']); + m = manager.list_pg_unfound(pg['pgid']) + log.info('%s' % m) + assert m['num_unfound'] == pg['stat_sum']['num_objects_unfound'] + + log.info("reverting unfound in %s", pg['pgid']) + manager.raw_cluster_cmd('pg', pg['pgid'], + 'mark_unfound_lost', 'delete') + else: + log.info("no unfound in %s", pg['pgid']) + + manager.raw_cluster_cmd('tell', 'osd.0', 'debug', 'kick_recovery_wq', '5') + manager.raw_cluster_cmd('tell', 'osd.2', 'debug', 'kick_recovery_wq', '5') + manager.raw_cluster_cmd('tell', 'osd.3', 'debug', 'kick_recovery_wq', '5') + manager.flush_pg_stats([0, 2, 3]) + manager.wait_for_recovery() + + if not config.get('parallel_bench', True): + time.sleep(20) + + # verify result + for f in range(1, 10): + err = rados(ctx, mon, ['-p', pool, 'get', 'new_%d' % f, '-']) + assert err + err = rados(ctx, mon, ['-p', pool, 'get', 'existed_%d' % f, '-']) + assert err + err = rados(ctx, mon, ['-p', pool, 'get', 'existing_%d' % f, '-']) + assert err + + # see if osd.1 can cope + manager.revive_osd(1) + manager.wait_till_osd_is_up(1) + manager.wait_for_clean() + run.wait(procs) diff --git a/qa/tasks/exec_on_cleanup.py b/qa/tasks/exec_on_cleanup.py new file mode 100644 index 00000000..a7c7ee5d --- /dev/null +++ b/qa/tasks/exec_on_cleanup.py @@ -0,0 +1,61 @@ +""" +Exececute custom commands during unwind/cleanup +""" +import logging +import contextlib + +from teuthology import misc as teuthology + +log = logging.getLogger(__name__) + +@contextlib.contextmanager +def task(ctx, config): + """ + Execute commands on a given role + + tasks: + - ceph: + - kclient: [client.a] + - exec: + client.a: + - "echo 'module libceph +p' > /sys/kernel/debug/dynamic_debug/control" + - "echo 'module ceph +p' > /sys/kernel/debug/dynamic_debug/control" + - interactive: + + It stops and fails with the first command that does not return on success. It means + that if the first command fails, the second won't run at all. + + To avoid confusion it is recommended to explicitly enclose the commands in + double quotes. For instance if the command is false (without double quotes) it will + be interpreted as a boolean by the YAML parser. + + :param ctx: Context + :param config: Configuration + """ + try: + yield + finally: + log.info('Executing custom commands...') + assert isinstance(config, dict), "task exec got invalid config" + + testdir = teuthology.get_testdir(ctx) + + if 'all' in config and len(config) == 1: + a = config['all'] + roles = teuthology.all_roles(ctx.cluster) + config = dict((id_, a) for id_ in roles) + + for role, ls in config.items(): + (remote,) = ctx.cluster.only(role).remotes.keys() + log.info('Running commands on role %s host %s', role, remote.name) + for c in ls: + c.replace('$TESTDIR', testdir) + remote.run( + args=[ + 'sudo', + 'TESTDIR={tdir}'.format(tdir=testdir), + 'bash', + '-c', + c], + ) + diff --git a/qa/tasks/filestore_idempotent.py b/qa/tasks/filestore_idempotent.py new file mode 100644 index 00000000..319bef76 --- /dev/null +++ b/qa/tasks/filestore_idempotent.py @@ -0,0 +1,83 @@ +""" +Filestore/filejournal handler +""" +import logging +from teuthology.orchestra import run +import random + +from teuthology import misc as teuthology + +log = logging.getLogger(__name__) + +def task(ctx, config): + """ + Test filestore/filejournal handling of non-idempotent events. + + Currently this is a kludge; we require the ceph task precedes us just + so that we get the tarball installed to run the test binary. + + :param ctx: Context + :param config: Configuration + """ + assert config is None or isinstance(config, list) \ + or isinstance(config, dict), \ + "task only supports a list or dictionary for configuration" + all_clients = ['client.{id}'.format(id=id_) + for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')] + if config is None: + config = all_clients + if isinstance(config, list): + config = dict.fromkeys(config) + clients = config.keys() + + # just use the first client... + client = next(iter(clients)) + (remote,) = ctx.cluster.only(client).remotes.keys() + + testdir = teuthology.get_testdir(ctx) + + dir = '%s/ceph.data/test.%s' % (testdir, client) + + seed = int(random.uniform(1,100)) + start = 800 + random.randint(800,1200) + end = start + 50 + + try: + log.info('creating a working dir') + remote.run(args=['mkdir', dir]) + remote.run( + args=[ + 'cd', dir, + run.Raw('&&'), + 'wget','-q', '-Orun_seed_to.sh', + 'http://git.ceph.com/?p=ceph.git;a=blob_plain;f=src/test/objectstore/run_seed_to.sh;hb=HEAD', + run.Raw('&&'), + 'wget','-q', '-Orun_seed_to_range.sh', + 'http://git.ceph.com/?p=ceph.git;a=blob_plain;f=src/test/objectstore/run_seed_to_range.sh;hb=HEAD', + run.Raw('&&'), + 'chmod', '+x', 'run_seed_to.sh', 'run_seed_to_range.sh', + ]); + + log.info('running a series of tests') + proc = remote.run( + args=[ + 'cd', dir, + run.Raw('&&'), + './run_seed_to_range.sh', str(seed), str(start), str(end), + ], + wait=False, + check_status=False) + result = proc.wait() + + if result != 0: + remote.run( + args=[ + 'cp', '-a', dir, '{tdir}/archive/idempotent_failure'.format(tdir=testdir), + ]) + raise Exception("./run_seed_to_range.sh errored out") + + finally: + remote.run(args=[ + 'rm', '-rf', '--', dir + ]) + diff --git a/qa/tasks/fs.py b/qa/tasks/fs.py new file mode 100644 index 00000000..ca84dc7a --- /dev/null +++ b/qa/tasks/fs.py @@ -0,0 +1,66 @@ +""" +CephFS sub-tasks. +""" + +import logging +import re +import six + +from tasks.cephfs.filesystem import Filesystem + +log = logging.getLogger(__name__) + +def clients_evicted(ctx, config): + """ + Check clients are evicted, unmount (cleanup) if so. + """ + + if config is None: + config = {} + assert isinstance(config, dict), \ + 'task only accepts a dict for configuration' + + clients = config.get('clients') + + if clients is None: + clients = {("client."+client_id): True for client_id in ctx.mounts} + + log.info("clients is {}".format(str(clients))) + + fs = Filesystem(ctx) + status = fs.status() + + has_session = set() + mounts = {} + for client in clients: + client_id = re.match("^client.([0-9]+)$", client).groups(1)[0] + mounts[client] = ctx.mounts.get(client_id) + + for rank in fs.get_ranks(status=status): + ls = fs.rank_asok(['session', 'ls'], rank=rank['rank'], status=status) + for session in ls: + for client, evicted in six.viewitems(clients): + mount = mounts.get(client) + if mount is not None: + global_id = mount.get_global_id() + if session['id'] == global_id: + if evicted: + raise RuntimeError("client still has session: {}".format(str(session))) + else: + log.info("client {} has a session with MDS {}.{}".format(client, fs.id, rank['rank'])) + has_session.add(client) + + no_session = set(clients) - has_session + should_assert = False + for client, evicted in six.viewitems(clients): + mount = mounts.get(client) + if mount is not None: + if evicted: + log.info("confirming client {} is blacklisted".format(client)) + assert mount.is_blacklisted() + elif client in no_session: + log.info("client {} should not be evicted but has no session with an MDS".format(client)) + mount.is_blacklisted() # for debugging + should_assert = True + if should_assert: + raise RuntimeError("some clients which should not be evicted have no session with an MDS?") diff --git a/qa/tasks/kclient.py b/qa/tasks/kclient.py new file mode 100644 index 00000000..ce0d73f5 --- /dev/null +++ b/qa/tasks/kclient.py @@ -0,0 +1,130 @@ +""" +Mount/unmount a ``kernel`` client. +""" +import contextlib +import logging + +from teuthology.misc import deep_merge +from teuthology.orchestra.run import CommandFailedError +from teuthology import misc +from teuthology.contextutil import MaxWhileTries +from tasks.cephfs.kernel_mount import KernelMount + +log = logging.getLogger(__name__) + +@contextlib.contextmanager +def task(ctx, config): + """ + Mount/unmount a ``kernel`` client. + + The config is optional and defaults to mounting on all clients. If + a config is given, it is expected to be a list of clients to do + this operation on. This lets you e.g. set up one client with + ``ceph-fuse`` and another with ``kclient``. + + Example that mounts all clients:: + + tasks: + - ceph: + - kclient: + - interactive: + + Example that uses both ``kclient` and ``ceph-fuse``:: + + tasks: + - ceph: + - ceph-fuse: [client.0] + - kclient: [client.1] + - interactive: + + + Pass a dictionary instead of lists to specify per-client config: + + tasks: + -kclient: + client.0: + debug: true + + :param ctx: Context + :param config: Configuration + """ + log.info('Mounting kernel clients...') + assert config is None or isinstance(config, list) or isinstance(config, dict), \ + "task kclient got invalid config" + + if config is None: + config = ['client.{id}'.format(id=id_) + for id_ in misc.all_roles_of_type(ctx.cluster, 'client')] + + if isinstance(config, list): + client_roles = config + config = dict([r, dict()] for r in client_roles) + elif isinstance(config, dict): + client_roles = filter(lambda x: 'client.' in x, config.keys()) + else: + raise ValueError("Invalid config object: {0} ({1})".format(config, config.__class__)) + + # config has been converted to a dict by this point + overrides = ctx.config.get('overrides', {}) + deep_merge(config, overrides.get('kclient', {})) + + clients = list(misc.get_clients(ctx=ctx, roles=client_roles)) + + test_dir = misc.get_testdir(ctx) + + mounts = {} + for id_, remote in clients: + client_config = config.get("client.%s" % id_) + if client_config is None: + client_config = {} + + if config.get("disabled", False) or not client_config.get('mounted', True): + continue + + kernel_mount = KernelMount( + ctx, + test_dir, + id_, + remote, + ctx.teuthology_config.get('ipmi_user', None), + ctx.teuthology_config.get('ipmi_password', None), + ctx.teuthology_config.get('ipmi_domain', None) + ) + + mounts[id_] = kernel_mount + + if client_config.get('debug', False): + remote.run(args=["sudo", "bash", "-c", "echo 'module ceph +p' > /sys/kernel/debug/dynamic_debug/control"]) + remote.run(args=["sudo", "bash", "-c", "echo 'module libceph +p' > /sys/kernel/debug/dynamic_debug/control"]) + + kernel_mount.mount() + + + def umount_all(): + log.info('Unmounting kernel clients...') + + forced = False + for mount in mounts.values(): + if mount.is_mounted(): + try: + mount.umount() + except (CommandFailedError, MaxWhileTries): + log.warning("Ordinary umount failed, forcing...") + forced = True + mount.umount_wait(force=True) + + return forced + + ctx.mounts = mounts + try: + yield mounts + except: + umount_all() # ignore forced retval, we are already in error handling + finally: + + forced = umount_all() + if forced: + # The context managers within the kclient manager worked (i.e. + # the test workload passed) but for some reason we couldn't + # umount, so turn this into a test failure. + raise RuntimeError("Kernel mounts did not umount cleanly") diff --git a/qa/tasks/keystone.py b/qa/tasks/keystone.py new file mode 100644 index 00000000..4a22a885 --- /dev/null +++ b/qa/tasks/keystone.py @@ -0,0 +1,397 @@ +""" +Deploy and configure Keystone for Teuthology +""" +import argparse +import contextlib +import logging + +from teuthology import misc as teuthology +from teuthology import contextutil +from teuthology.orchestra import run +from teuthology.packaging import install_package +from teuthology.packaging import remove_package + +log = logging.getLogger(__name__) + + +def get_keystone_dir(ctx): + return '{tdir}/keystone'.format(tdir=teuthology.get_testdir(ctx)) + +def run_in_keystone_dir(ctx, client, args, **kwargs): + return ctx.cluster.only(client).run( + args=[ 'cd', get_keystone_dir(ctx), run.Raw('&&'), ] + args, + **kwargs + ) + +def get_toxvenv_dir(ctx): + return ctx.tox.venv_path + +def toxvenv_sh(ctx, remote, args, **kwargs): + activate = get_toxvenv_dir(ctx) + '/bin/activate' + return remote.sh(['source', activate, run.Raw('&&')] + args, **kwargs) + +def run_in_keystone_venv(ctx, client, args): + run_in_keystone_dir(ctx, client, + [ 'source', + '.tox/venv/bin/activate', + run.Raw('&&') + ] + args) + +def get_keystone_venved_cmd(ctx, cmd, args): + kbindir = get_keystone_dir(ctx) + '/.tox/venv/bin/' + return [ kbindir + 'python', kbindir + cmd ] + args + +@contextlib.contextmanager +def download(ctx, config): + """ + Download the Keystone from github. + Remove downloaded file upon exit. + + The context passed in should be identical to the context + passed in to the main task. + """ + assert isinstance(config, dict) + log.info('Downloading keystone...') + keystonedir = get_keystone_dir(ctx) + + for (client, cconf) in config.items(): + ctx.cluster.only(client).run( + args=[ + 'git', 'clone', + '-b', cconf.get('force-branch', 'master'), + 'https://github.com/openstack/keystone.git', + keystonedir, + ], + ) + + sha1 = cconf.get('sha1') + if sha1 is not None: + run_in_keystone_dir(ctx, client, [ + 'git', 'reset', '--hard', sha1, + ], + ) + + # hax for http://tracker.ceph.com/issues/23659 + run_in_keystone_dir(ctx, client, [ + 'sed', '-i', + 's/pysaml2<4.0.3,>=2.4.0/pysaml2>=4.5.0/', + 'requirements.txt' + ], + ) + try: + yield + finally: + log.info('Removing keystone...') + for client in config: + ctx.cluster.only(client).run( + args=[ 'rm', '-rf', keystonedir ], + ) + +@contextlib.contextmanager +def install_packages(ctx, config): + """ + Download the packaged dependencies of Keystone. + Remove install packages upon exit. + + The context passed in should be identical to the context + passed in to the main task. + """ + assert isinstance(config, dict) + log.info('Installing packages for Keystone...') + + packages = {} + for (client, _) in config.items(): + (remote,) = ctx.cluster.only(client).remotes.keys() + # use bindep to read which dependencies we need from keystone/bindep.txt + toxvenv_sh(ctx, remote, ['pip', 'install', 'bindep']) + packages[client] = toxvenv_sh(ctx, remote, + ['bindep', '--brief', '--file', '{}/bindep.txt'.format(get_keystone_dir(ctx))], + check_status=False).splitlines() # returns 1 on success? + # install python3 as bindep installs python34 which is not supported + # by keystone or tempest's tox based tests. + packages[client].append('python3') + for dep in packages[client]: + install_package(dep, remote) + try: + yield + finally: + log.info('Removing packaged dependencies of Keystone...') + + for (client, _) in config.items(): + (remote,) = ctx.cluster.only(client).remotes.keys() + for dep in packages[client]: + remove_package(dep, remote) + +@contextlib.contextmanager +def setup_venv(ctx, config): + """ + Setup the virtualenv for Keystone using tox. + """ + assert isinstance(config, dict) + log.info('Setting up virtualenv for keystone...') + for (client, _) in config.items(): + run_in_keystone_dir(ctx, client, + [ 'source', + '{tvdir}/bin/activate'.format(tvdir=get_toxvenv_dir(ctx)), + run.Raw('&&'), + 'tox', '-e', 'venv', '--notest' + ]) + + run_in_keystone_venv(ctx, client, + [ 'pip', 'install', 'python-openstackclient' ]) + try: + yield + finally: + pass + +@contextlib.contextmanager +def configure_instance(ctx, config): + assert isinstance(config, dict) + log.info('Configuring keystone...') + + keyrepo_dir = '{kdir}/etc/fernet-keys'.format(kdir=get_keystone_dir(ctx)) + for (client, _) in config.items(): + # prepare the config file + run_in_keystone_dir(ctx, client, + [ + 'cp', '-f', + 'etc/keystone.conf.sample', + 'etc/keystone.conf' + ]) + run_in_keystone_dir(ctx, client, + [ + 'sed', + '-e', 's/#admin_token =.*/admin_token = ADMIN/', + '-i', 'etc/keystone.conf' + ]) + run_in_keystone_dir(ctx, client, + [ + 'sed', + '-e', 's^#key_repository =.*^key_repository = {kr}^'.format(kr = keyrepo_dir), + '-i', 'etc/keystone.conf' + ]) + + # prepare key repository for Fetnet token authenticator + run_in_keystone_dir(ctx, client, [ 'mkdir', '-p', keyrepo_dir ]) + run_in_keystone_venv(ctx, client, [ 'keystone-manage', 'fernet_setup' ]) + + # sync database + run_in_keystone_venv(ctx, client, [ 'keystone-manage', 'db_sync' ]) + yield + +@contextlib.contextmanager +def run_keystone(ctx, config): + assert isinstance(config, dict) + log.info('Configuring keystone...') + + for (client, _) in config.items(): + (remote,) = ctx.cluster.only(client).remotes.keys() + cluster_name, _, client_id = teuthology.split_role(client) + + # start the public endpoint + client_public_with_id = 'keystone.public' + '.' + client_id + + public_host, public_port = ctx.keystone.public_endpoints[client] + run_cmd = get_keystone_venved_cmd(ctx, 'keystone-wsgi-public', + [ '--host', public_host, '--port', str(public_port), + # Let's put the Keystone in background, wait for EOF + # and after receiving it, send SIGTERM to the daemon. + # This crazy hack is because Keystone, in contrast to + # our other daemons, doesn't quit on stdin.close(). + # Teuthology relies on this behaviour. + run.Raw('& { read; kill %1; }') + ] + ) + ctx.daemons.add_daemon( + remote, 'keystone', client_public_with_id, + cluster=cluster_name, + args=run_cmd, + logger=log.getChild(client), + stdin=run.PIPE, + cwd=get_keystone_dir(ctx), + wait=False, + check_status=False, + ) + + # start the admin endpoint + client_admin_with_id = 'keystone.admin' + '.' + client_id + + admin_host, admin_port = ctx.keystone.admin_endpoints[client] + run_cmd = get_keystone_venved_cmd(ctx, 'keystone-wsgi-admin', + [ '--host', admin_host, '--port', str(admin_port), + run.Raw('& { read; kill %1; }') + ] + ) + ctx.daemons.add_daemon( + remote, 'keystone', client_admin_with_id, + cluster=cluster_name, + args=run_cmd, + logger=log.getChild(client), + stdin=run.PIPE, + cwd=get_keystone_dir(ctx), + wait=False, + check_status=False, + ) + + # sleep driven synchronization + run_in_keystone_venv(ctx, client, [ 'sleep', '15' ]) + try: + yield + finally: + log.info('Stopping Keystone admin instance') + ctx.daemons.get_daemon('keystone', client_admin_with_id, + cluster_name).stop() + + log.info('Stopping Keystone public instance') + ctx.daemons.get_daemon('keystone', client_public_with_id, + cluster_name).stop() + + +def dict_to_args(special, items): + """ + Transform + [(key1, val1), (special, val_special), (key3, val3) ] + into: + [ '--key1', 'val1', '--key3', 'val3', 'val_special' ] + """ + args=[] + for (k, v) in items: + if k == special: + special_val = v + else: + args.append('--{k}'.format(k=k)) + args.append(v) + if special_val: + args.append(special_val) + return args + +def run_section_cmds(ctx, cclient, section_cmd, special, + section_config_list): + admin_host, admin_port = ctx.keystone.admin_endpoints[cclient] + + auth_section = [ + ( 'os-token', 'ADMIN' ), + ( 'os-url', 'http://{host}:{port}/v2.0'.format(host=admin_host, + port=admin_port) ), + ] + + for section_item in section_config_list: + run_in_keystone_venv(ctx, cclient, + [ 'openstack' ] + section_cmd.split() + + dict_to_args(special, auth_section + list(section_item.items()))) + +def create_endpoint(ctx, cclient, service, url): + endpoint_section = { + 'service': service, + 'publicurl': url, + } + return run_section_cmds(ctx, cclient, 'endpoint create', 'service', + [ endpoint_section ]) + +@contextlib.contextmanager +def fill_keystone(ctx, config): + assert isinstance(config, dict) + + for (cclient, cconfig) in config.items(): + # configure tenants/projects + run_section_cmds(ctx, cclient, 'project create', 'name', + cconfig['tenants']) + run_section_cmds(ctx, cclient, 'user create', 'name', + cconfig['users']) + run_section_cmds(ctx, cclient, 'role create', 'name', + cconfig['roles']) + run_section_cmds(ctx, cclient, 'role add', 'name', + cconfig['role-mappings']) + run_section_cmds(ctx, cclient, 'service create', 'name', + cconfig['services']) + + public_host, public_port = ctx.keystone.public_endpoints[cclient] + url = 'http://{host}:{port}/v2.0'.format(host=public_host, + port=public_port) + create_endpoint(ctx, cclient, 'keystone', url) + # for the deferred endpoint creation; currently it's used in rgw.py + ctx.keystone.create_endpoint = create_endpoint + + # sleep driven synchronization -- just in case + run_in_keystone_venv(ctx, cclient, [ 'sleep', '3' ]) + try: + yield + finally: + pass + +def assign_ports(ctx, config, initial_port): + """ + Assign port numbers starting from @initial_port + """ + port = initial_port + role_endpoints = {} + for remote, roles_for_host in ctx.cluster.remotes.items(): + for role in roles_for_host: + if role in config: + role_endpoints[role] = (remote.name.split('@')[1], port) + port += 1 + + return role_endpoints + +@contextlib.contextmanager +def task(ctx, config): + """ + Deploy and configure Keystone + + Example of configuration: + + - install: + - ceph: + - tox: [ client.0 ] + - keystone: + client.0: + force-branch: master + tenants: + - name: admin + description: Admin Tenant + users: + - name: admin + password: ADMIN + project: admin + roles: [ name: admin, name: Member ] + role-mappings: + - name: admin + user: admin + project: admin + services: + - name: keystone + type: identity + description: Keystone Identity Service + - name: swift + type: object-store + description: Swift Service + """ + assert config is None or isinstance(config, list) \ + or isinstance(config, dict), \ + "task keystone only supports a list or dictionary for configuration" + + if not ctx.tox: + raise ConfigError('keystone must run after the tox task') + + all_clients = ['client.{id}'.format(id=id_) + for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')] + if config is None: + config = all_clients + if isinstance(config, list): + config = dict.fromkeys(config) + + log.debug('Keystone config is %s', config) + + ctx.keystone = argparse.Namespace() + ctx.keystone.public_endpoints = assign_ports(ctx, config, 5000) + ctx.keystone.admin_endpoints = assign_ports(ctx, config, 35357) + + with contextutil.nested( + lambda: download(ctx=ctx, config=config), + lambda: install_packages(ctx=ctx, config=config), + lambda: setup_venv(ctx=ctx, config=config), + lambda: configure_instance(ctx=ctx, config=config), + lambda: run_keystone(ctx=ctx, config=config), + lambda: fill_keystone(ctx=ctx, config=config), + ): + yield diff --git a/qa/tasks/locktest.py b/qa/tasks/locktest.py new file mode 100755 index 00000000..9de5ba40 --- /dev/null +++ b/qa/tasks/locktest.py @@ -0,0 +1,134 @@ +""" +locktests +""" +import logging + +from teuthology.orchestra import run +from teuthology import misc as teuthology + +log = logging.getLogger(__name__) + +def task(ctx, config): + """ + Run locktests, from the xfstests suite, on the given + clients. Whether the clients are ceph-fuse or kernel does not + matter, and the two clients can refer to the same mount. + + The config is a list of two clients to run the locktest on. The + first client will be the host. + + For example: + tasks: + - ceph: + - ceph-fuse: [client.0, client.1] + - locktest: + [client.0, client.1] + + This task does not yield; there would be little point. + + :param ctx: Context + :param config: Configuration + """ + + assert isinstance(config, list) + log.info('fetching and building locktests...') + (host,) = ctx.cluster.only(config[0]).remotes + (client,) = ctx.cluster.only(config[1]).remotes + ( _, _, host_id) = config[0].partition('.') + ( _, _, client_id) = config[1].partition('.') + testdir = teuthology.get_testdir(ctx) + hostmnt = '{tdir}/mnt.{id}'.format(tdir=testdir, id=host_id) + clientmnt = '{tdir}/mnt.{id}'.format(tdir=testdir, id=client_id) + + try: + for client_name in config: + log.info('building on {client_}'.format(client_=client_name)) + ctx.cluster.only(client_name).run( + args=[ + # explicitly does not support multiple autotest tasks + # in a single run; the result archival would conflict + 'mkdir', '{tdir}/archive/locktest'.format(tdir=testdir), + run.Raw('&&'), + 'mkdir', '{tdir}/locktest'.format(tdir=testdir), + run.Raw('&&'), + 'wget', + '-nv', + 'https://raw.github.com/gregsfortytwo/xfstests-ceph/master/src/locktest.c', + '-O', '{tdir}/locktest/locktest.c'.format(tdir=testdir), + run.Raw('&&'), + 'g++', '{tdir}/locktest/locktest.c'.format(tdir=testdir), + '-o', '{tdir}/locktest/locktest'.format(tdir=testdir) + ], + logger=log.getChild('locktest_client.{id}'.format(id=client_name)), + ) + + log.info('built locktest on each client') + + host.run(args=['sudo', 'touch', + '{mnt}/locktestfile'.format(mnt=hostmnt), + run.Raw('&&'), + 'sudo', 'chown', 'ubuntu.ubuntu', + '{mnt}/locktestfile'.format(mnt=hostmnt) + ] + ) + + log.info('starting on host') + hostproc = host.run( + args=[ + '{tdir}/locktest/locktest'.format(tdir=testdir), + '-p', '6788', + '-d', + '{mnt}/locktestfile'.format(mnt=hostmnt), + ], + wait=False, + logger=log.getChild('locktest.host'), + ) + log.info('starting on client') + (_,_,hostaddr) = host.name.partition('@') + clientproc = client.run( + args=[ + '{tdir}/locktest/locktest'.format(tdir=testdir), + '-p', '6788', + '-d', + '-h', hostaddr, + '{mnt}/locktestfile'.format(mnt=clientmnt), + ], + logger=log.getChild('locktest.client'), + wait=False + ) + + hostresult = hostproc.wait() + clientresult = clientproc.wait() + if (hostresult != 0) or (clientresult != 0): + raise Exception("Did not pass locking test!") + log.info('finished locktest executable with results {r} and {s}'. \ + format(r=hostresult, s=clientresult)) + + finally: + log.info('cleaning up host dir') + host.run( + args=[ + 'mkdir', '-p', '{tdir}/locktest'.format(tdir=testdir), + run.Raw('&&'), + 'rm', '-f', '{tdir}/locktest/locktest.c'.format(tdir=testdir), + run.Raw('&&'), + 'rm', '-f', '{tdir}/locktest/locktest'.format(tdir=testdir), + run.Raw('&&'), + 'rmdir', '{tdir}/locktest' + ], + logger=log.getChild('.{id}'.format(id=config[0])), + ) + log.info('cleaning up client dir') + client.run( + args=[ + 'mkdir', '-p', '{tdir}/locktest'.format(tdir=testdir), + run.Raw('&&'), + 'rm', '-f', '{tdir}/locktest/locktest.c'.format(tdir=testdir), + run.Raw('&&'), + 'rm', '-f', '{tdir}/locktest/locktest'.format(tdir=testdir), + run.Raw('&&'), + 'rmdir', '{tdir}/locktest'.format(tdir=testdir) + ], + logger=log.getChild('.{id}'.format(\ + id=config[1])), + ) diff --git a/qa/tasks/logrotate.conf b/qa/tasks/logrotate.conf new file mode 100644 index 00000000..b0cb8012 --- /dev/null +++ b/qa/tasks/logrotate.conf @@ -0,0 +1,13 @@ +/var/log/ceph/*{daemon_type}*.log {{ + rotate 100 + size {max_size} + compress + sharedscripts + postrotate + killall {daemon_type} -1 || true + endscript + missingok + notifempty + su root root +}} + diff --git a/qa/tasks/lost_unfound.py b/qa/tasks/lost_unfound.py new file mode 100644 index 00000000..ab17a95d --- /dev/null +++ b/qa/tasks/lost_unfound.py @@ -0,0 +1,176 @@ +""" +Lost_unfound +""" +import logging +import time +from tasks import ceph_manager +from tasks.util.rados import rados +from teuthology import misc as teuthology +from teuthology.orchestra import run + +log = logging.getLogger(__name__) + +def task(ctx, config): + """ + Test handling of lost objects. + + A pretty rigid cluseter is brought up andtested by this task + """ + POOL = 'unfound_pool' + if config is None: + config = {} + assert isinstance(config, dict), \ + 'lost_unfound task only accepts a dict for configuration' + first_mon = teuthology.get_first_mon(ctx, config) + (mon,) = ctx.cluster.only(first_mon).remotes.keys() + + manager = ceph_manager.CephManager( + mon, + ctx=ctx, + logger=log.getChild('ceph_manager'), + ) + + while len(manager.get_osd_status()['up']) < 3: + time.sleep(10) + + manager.wait_for_clean() + + manager.create_pool(POOL) + + # something that is always there + dummyfile = '/etc/fstab' + + # take an osd out until the very end + manager.kill_osd(2) + manager.mark_down_osd(2) + manager.mark_out_osd(2) + + # kludge to make sure they get a map + rados(ctx, mon, ['-p', POOL, 'put', 'dummy', dummyfile]) + + manager.flush_pg_stats([0, 1]) + manager.wait_for_recovery() + + # create old objects + for f in range(1, 10): + rados(ctx, mon, ['-p', POOL, 'put', 'existing_%d' % f, dummyfile]) + rados(ctx, mon, ['-p', POOL, 'put', 'existed_%d' % f, dummyfile]) + rados(ctx, mon, ['-p', POOL, 'rm', 'existed_%d' % f]) + + # delay recovery, and make the pg log very long (to prevent backfill) + manager.raw_cluster_cmd( + 'tell', 'osd.1', + 'injectargs', + '--osd-recovery-delay-start 1000 --osd-min-pg-log-entries 100000000' + ) + + manager.kill_osd(0) + manager.mark_down_osd(0) + + for f in range(1, 10): + rados(ctx, mon, ['-p', POOL, 'put', 'new_%d' % f, dummyfile]) + rados(ctx, mon, ['-p', POOL, 'put', 'existed_%d' % f, dummyfile]) + rados(ctx, mon, ['-p', POOL, 'put', 'existing_%d' % f, dummyfile]) + + # bring osd.0 back up, let it peer, but don't replicate the new + # objects... + log.info('osd.0 command_args is %s' % 'foo') + log.info(ctx.daemons.get_daemon('osd', 0).command_args) + ctx.daemons.get_daemon('osd', 0).command_kwargs['args'].extend([ + '--osd-recovery-delay-start', '1000' + ]) + manager.revive_osd(0) + manager.mark_in_osd(0) + manager.wait_till_osd_is_up(0) + + manager.flush_pg_stats([1, 0]) + manager.wait_till_active() + + # take out osd.1 and the only copy of those objects. + manager.kill_osd(1) + manager.mark_down_osd(1) + manager.mark_out_osd(1) + manager.raw_cluster_cmd('osd', 'lost', '1', '--yes-i-really-mean-it') + + # bring up osd.2 so that things would otherwise, in theory, recovery fully + manager.revive_osd(2) + manager.mark_in_osd(2) + manager.wait_till_osd_is_up(2) + + manager.flush_pg_stats([0, 2]) + manager.wait_till_active() + manager.flush_pg_stats([0, 2]) + + # verify that there are unfound objects + unfound = manager.get_num_unfound_objects() + log.info("there are %d unfound objects" % unfound) + assert unfound + + testdir = teuthology.get_testdir(ctx) + procs = [] + if config.get('parallel_bench', True): + procs.append(mon.run( + args=[ + "/bin/sh", "-c", + " ".join(['adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage', + 'rados', + '--no-log-to-stderr', + '--name', 'client.admin', + '-b', str(4<<10), + '-p' , POOL, + '-t', '20', + 'bench', '240', 'write', + ]).format(tdir=testdir), + ], + logger=log.getChild('radosbench.{id}'.format(id='client.admin')), + stdin=run.PIPE, + wait=False + )) + time.sleep(10) + + # mark stuff lost + pgs = manager.get_pg_stats() + for pg in pgs: + if pg['stat_sum']['num_objects_unfound'] > 0: + primary = 'osd.%d' % pg['acting'][0] + + # verify that i can list them direct from the osd + log.info('listing missing/lost in %s state %s', pg['pgid'], + pg['state']); + m = manager.list_pg_unfound(pg['pgid']) + #log.info('%s' % m) + assert m['num_unfound'] == pg['stat_sum']['num_objects_unfound'] + num_unfound=0 + for o in m['objects']: + if len(o['locations']) == 0: + num_unfound += 1 + assert m['num_unfound'] == num_unfound + + log.info("reverting unfound in %s on %s", pg['pgid'], primary) + manager.raw_cluster_cmd('pg', pg['pgid'], + 'mark_unfound_lost', 'revert') + else: + log.info("no unfound in %s", pg['pgid']) + + manager.raw_cluster_cmd('tell', 'osd.0', 'debug', 'kick_recovery_wq', '5') + manager.raw_cluster_cmd('tell', 'osd.2', 'debug', 'kick_recovery_wq', '5') + manager.flush_pg_stats([0, 2]) + manager.wait_for_recovery() + + # verify result + for f in range(1, 10): + err = rados(ctx, mon, ['-p', POOL, 'get', 'new_%d' % f, '-']) + assert err + err = rados(ctx, mon, ['-p', POOL, 'get', 'existed_%d' % f, '-']) + assert err + err = rados(ctx, mon, ['-p', POOL, 'get', 'existing_%d' % f, '-']) + assert not err + + # see if osd.1 can cope + manager.mark_in_osd(1) + manager.revive_osd(1) + manager.wait_till_osd_is_up(1) + manager.wait_for_clean() + run.wait(procs) diff --git a/qa/tasks/manypools.py b/qa/tasks/manypools.py new file mode 100644 index 00000000..7fe7e43e --- /dev/null +++ b/qa/tasks/manypools.py @@ -0,0 +1,73 @@ +""" +Force pg creation on all osds +""" +from teuthology import misc as teuthology +from teuthology.orchestra import run +import logging + +log = logging.getLogger(__name__) + +def task(ctx, config): + """ + Create the specified number of pools and write 16 objects to them (thereby forcing + the PG creation on each OSD). This task creates pools from all the clients, + in parallel. It is easy to add other daemon types which have the appropriate + permissions, but I don't think anything else does. + The config is just the number of pools to create. I recommend setting + "mon create pg interval" to a very low value in your ceph config to speed + this up. + + You probably want to do this to look at memory consumption, and + maybe to test how performance changes with the number of PGs. For example: + + tasks: + - ceph: + config: + mon: + mon create pg interval: 1 + - manypools: 3000 + - radosbench: + clients: [client.0] + time: 360 + """ + + log.info('creating {n} pools'.format(n=config)) + + poolnum = int(config) + creator_remotes = [] + client_roles = teuthology.all_roles_of_type(ctx.cluster, 'client') + log.info('got client_roles={client_roles_}'.format(client_roles_=client_roles)) + for role in client_roles: + log.info('role={role_}'.format(role_=role)) + (creator_remote, ) = ctx.cluster.only('client.{id}'.format(id=role)).remotes.keys() + creator_remotes.append((creator_remote, 'client.{id}'.format(id=role))) + + remaining_pools = poolnum + poolprocs=dict() + while (remaining_pools > 0): + log.info('{n} pools remaining to create'.format(n=remaining_pools)) + for remote, role_ in creator_remotes: + poolnum = remaining_pools + remaining_pools -= 1 + if remaining_pools < 0: + continue + log.info('creating pool{num} on {role}'.format(num=poolnum, role=role_)) + proc = remote.run( + args=[ + 'ceph', + '--name', role_, + 'osd', 'pool', 'create', 'pool{num}'.format(num=poolnum), '8', + run.Raw('&&'), + 'rados', + '--name', role_, + '--pool', 'pool{num}'.format(num=poolnum), + 'bench', '0', 'write', '-t', '16', '--block-size', '1' + ], + wait = False + ) + log.info('waiting for pool and object creates') + poolprocs[remote] = proc + + run.wait(poolprocs.values()) + + log.info('created all {n} pools and wrote 16 objects to each'.format(n=poolnum)) diff --git a/qa/tasks/mds_creation_failure.py b/qa/tasks/mds_creation_failure.py new file mode 100644 index 00000000..58314086 --- /dev/null +++ b/qa/tasks/mds_creation_failure.py @@ -0,0 +1,69 @@ +# FIXME: this file has many undefined vars which are accessed! +# flake8: noqa +import logging +import contextlib +import time +from tasks import ceph_manager +from teuthology import misc +from teuthology.orchestra.run import CommandFailedError, Raw + +log = logging.getLogger(__name__) + + +@contextlib.contextmanager +def task(ctx, config): + """ + Go through filesystem creation with a synthetic failure in an MDS + in its 'up:creating' state, to exercise the retry behaviour. + """ + # Grab handles to the teuthology objects of interest + mdslist = list(misc.all_roles_of_type(ctx.cluster, 'mds')) + if len(mdslist) != 1: + # Require exactly one MDS, the code path for creation failure when + # a standby is available is different + raise RuntimeError("This task requires exactly one MDS") + + mds_id = mdslist[0] + (mds_remote,) = ctx.cluster.only('mds.{_id}'.format(_id=mds_id)).remotes.keys() + manager = ceph_manager.CephManager( + mds_remote, ctx=ctx, logger=log.getChild('ceph_manager'), + ) + + # Stop MDS + self.fs.set_max_mds(0) + self.fs.mds_stop(mds_id) + self.fs.mds_fail(mds_id) + + # Reset the filesystem so that next start will go into CREATING + manager.raw_cluster_cmd('fs', 'rm', "default", "--yes-i-really-mean-it") + manager.raw_cluster_cmd('fs', 'new', "default", "metadata", "data") + + # Start the MDS with mds_kill_create_at set, it will crash during creation + mds.restart_with_args(["--mds_kill_create_at=1"]) + try: + mds.wait_for_exit() + except CommandFailedError as e: + if e.exitstatus == 1: + log.info("MDS creation killed as expected") + else: + log.error("Unexpected status code %s" % e.exitstatus) + raise + + # Since I have intentionally caused a crash, I will clean up the resulting core + # file to avoid task.internal.coredump seeing it as a failure. + log.info("Removing core file from synthetic MDS failure") + mds_remote.run(args=['rm', '-f', Raw("{archive}/coredump/*.core".format(archive=misc.get_archive_dir(ctx)))]) + + # It should have left the MDS map state still in CREATING + status = self.fs.status().get_mds(mds_id) + assert status['state'] == 'up:creating' + + # Start the MDS again without the kill flag set, it should proceed with creation successfully + mds.restart() + + # Wait for state ACTIVE + self.fs.wait_for_state("up:active", timeout=120, mds_id=mds_id) + + # The system should be back up in a happy healthy state, go ahead and run any further tasks + # inside this context. + yield diff --git a/qa/tasks/mds_pre_upgrade.py b/qa/tasks/mds_pre_upgrade.py new file mode 100644 index 00000000..0856d483 --- /dev/null +++ b/qa/tasks/mds_pre_upgrade.py @@ -0,0 +1,43 @@ +""" +Prepare MDS cluster for upgrade. +""" + +import logging +import time + +from tasks.cephfs.filesystem import Filesystem + +log = logging.getLogger(__name__) + +def task(ctx, config): + """ + Prepare MDS cluster for upgrade. + + This task reduces ranks to 1 and stops all standbys. + """ + + if config is None: + config = {} + assert isinstance(config, dict), \ + 'snap-upgrade task only accepts a dict for configuration' + + fs = Filesystem(ctx) + status = fs.getinfo() + + fs.set_max_mds(1) + fs.reach_max_mds() + + # Stop standbys now to minimize time rank 0 is down in subsequent: + # tasks: + # - ceph.stop: [mds.*] + rank0 = fs.get_rank(rank=0, status=status) + for daemon in ctx.daemons.iter_daemons_of_role('mds', fs.mon_manager.cluster): + if rank0['name'] != daemon.id_: + daemon.stop() + + for i in range(1, 10): + time.sleep(5) # time for FSMap to update + status = fs.getinfo() + if len(list(status.get_standbys())) == 0: + break + assert(len(list(status.get_standbys())) == 0) diff --git a/qa/tasks/mds_thrash.py b/qa/tasks/mds_thrash.py new file mode 100644 index 00000000..ac543838 --- /dev/null +++ b/qa/tasks/mds_thrash.py @@ -0,0 +1,543 @@ +""" +Thrash mds by simulating failures +""" +import logging +import contextlib +import itertools +import random +import signal +import time + +from gevent import sleep +from gevent.greenlet import Greenlet +from gevent.event import Event +from teuthology import misc as teuthology + +from tasks import ceph_manager +from tasks.cephfs.filesystem import MDSCluster, Filesystem + +log = logging.getLogger(__name__) + +class DaemonWatchdog(Greenlet): + """ + DaemonWatchdog:: + + Watch Ceph daemons for failures. If an extended failure is detected (i.e. + not intentional), then the watchdog will unmount file systems and send + SIGTERM to all daemons. The duration of an extended failure is configurable + with watchdog_daemon_timeout. + + watchdog_daemon_timeout [default: 300]: number of seconds a daemon + is allowed to be failed before the watchdog will bark. + """ + + def __init__(self, ctx, manager, config, thrashers): + Greenlet.__init__(self) + self.ctx = ctx + self.config = config + self.e = None + self.logger = log.getChild('daemon_watchdog') + self.manager = manager + self.name = 'watchdog' + self.stopping = Event() + self.thrashers = thrashers + + def _run(self): + try: + self.watch() + except Exception as e: + # See _run exception comment for MDSThrasher + self.e = e + self.logger.exception("exception:") + # allow successful completion so gevent doesn't see an exception... + + def log(self, x): + """Write data to logger""" + self.logger.info(x) + + def stop(self): + self.stopping.set() + + def bark(self): + self.log("BARK! unmounting mounts and killing all daemons") + for mount in self.ctx.mounts.values(): + try: + mount.umount_wait(force=True) + except: + self.logger.exception("ignoring exception:") + daemons = [] + daemons.extend(filter(lambda daemon: daemon.running() and not daemon.proc.finished, self.ctx.daemons.iter_daemons_of_role('mds', cluster=self.manager.cluster))) + daemons.extend(filter(lambda daemon: daemon.running() and not daemon.proc.finished, self.ctx.daemons.iter_daemons_of_role('mon', cluster=self.manager.cluster))) + for daemon in daemons: + try: + daemon.signal(signal.SIGTERM) + except: + self.logger.exception("ignoring exception:") + + def watch(self): + self.log("watchdog starting") + daemon_timeout = int(self.config.get('watchdog_daemon_timeout', 300)) + daemon_failure_time = {} + while not self.stopping.is_set(): + bark = False + now = time.time() + + mons = self.ctx.daemons.iter_daemons_of_role('mon', cluster=self.manager.cluster) + mdss = self.ctx.daemons.iter_daemons_of_role('mds', cluster=self.manager.cluster) + clients = self.ctx.daemons.iter_daemons_of_role('client', cluster=self.manager.cluster) + + #for daemon in mons: + # self.log("mon daemon {role}.{id}: running={r}".format(role=daemon.role, id=daemon.id_, r=daemon.running() and not daemon.proc.finished)) + #for daemon in mdss: + # self.log("mds daemon {role}.{id}: running={r}".format(role=daemon.role, id=daemon.id_, r=daemon.running() and not daemon.proc.finished)) + + daemon_failures = [] + daemon_failures.extend(filter(lambda daemon: daemon.running() and daemon.proc.finished, mons)) + daemon_failures.extend(filter(lambda daemon: daemon.running() and daemon.proc.finished, mdss)) + for daemon in daemon_failures: + name = daemon.role + '.' + daemon.id_ + dt = daemon_failure_time.setdefault(name, (daemon, now)) + assert dt[0] is daemon + delta = now-dt[1] + self.log("daemon {name} is failed for ~{t:.0f}s".format(name=name, t=delta)) + if delta > daemon_timeout: + bark = True + + # If a daemon is no longer failed, remove it from tracking: + for name in daemon_failure_time.keys(): + if name not in [d.role + '.' + d.id_ for d in daemon_failures]: + self.log("daemon {name} has been restored".format(name=name)) + del daemon_failure_time[name] + + for thrasher in self.thrashers: + if thrasher.e is not None: + self.log("thrasher on fs.{name} failed".format(name=thrasher.fs.name)) + bark = True + + if bark: + self.bark() + return + + sleep(5) + + self.log("watchdog finished") + +class MDSThrasher(Greenlet): + """ + MDSThrasher:: + + The MDSThrasher thrashes MDSs during execution of other tasks (workunits, etc). + + The config is optional. Many of the config parameters are a a maximum value + to use when selecting a random value from a range. To always use the maximum + value, set no_random to true. The config is a dict containing some or all of: + + max_thrash: [default: 1] the maximum number of active MDSs per FS that will be thrashed at + any given time. + + max_thrash_delay: [default: 30] maximum number of seconds to delay before + thrashing again. + + max_replay_thrash_delay: [default: 4] maximum number of seconds to delay while in + the replay state before thrashing. + + max_revive_delay: [default: 10] maximum number of seconds to delay before + bringing back a thrashed MDS. + + randomize: [default: true] enables randomization and use the max/min values + + seed: [no default] seed the random number generator + + thrash_in_replay: [default: 0.0] likelihood that the MDS will be thrashed + during replay. Value should be between 0.0 and 1.0. + + thrash_max_mds: [default: 0.05] likelihood that the max_mds of the mds + cluster will be modified to a value [1, current) or (current, starting + max_mds]. Value should be between 0.0 and 1.0. + + thrash_while_stopping: [default: false] thrash an MDS while there + are MDS in up:stopping (because max_mds was changed and some + MDS were deactivated). + + thrash_weights: allows specific MDSs to be thrashed more/less frequently. + This option overrides anything specified by max_thrash. This option is a + dict containing mds.x: weight pairs. For example, [mds.a: 0.7, mds.b: + 0.3, mds.c: 0.0]. Each weight is a value from 0.0 to 1.0. Any MDSs not + specified will be automatically given a weight of 0.0 (not thrashed). + For a given MDS, by default the trasher delays for up to + max_thrash_delay, trashes, waits for the MDS to recover, and iterates. + If a non-zero weight is specified for an MDS, for each iteration the + thrasher chooses whether to thrash during that iteration based on a + random value [0-1] not exceeding the weight of that MDS. + + Examples:: + + + The following example sets the likelihood that mds.a will be thrashed + to 80%, mds.b to 20%, and other MDSs will not be thrashed. It also sets the + likelihood that an MDS will be thrashed in replay to 40%. + Thrash weights do not have to sum to 1. + + tasks: + - ceph: + - mds_thrash: + thrash_weights: + - mds.a: 0.8 + - mds.b: 0.2 + thrash_in_replay: 0.4 + - ceph-fuse: + - workunit: + clients: + all: [suites/fsx.sh] + + The following example disables randomization, and uses the max delay values: + + tasks: + - ceph: + - mds_thrash: + max_thrash_delay: 10 + max_revive_delay: 1 + max_replay_thrash_delay: 4 + + """ + + def __init__(self, ctx, manager, config, fs, max_mds): + Greenlet.__init__(self) + + self.config = config + self.ctx = ctx + self.e = None + self.logger = log.getChild('fs.[{f}]'.format(f = fs.name)) + self.fs = fs + self.manager = manager + self.max_mds = max_mds + self.name = 'thrasher.fs.[{f}]'.format(f = fs.name) + self.stopping = Event() + + self.randomize = bool(self.config.get('randomize', True)) + self.thrash_max_mds = float(self.config.get('thrash_max_mds', 0.05)) + self.max_thrash = int(self.config.get('max_thrash', 1)) + self.max_thrash_delay = float(self.config.get('thrash_delay', 120.0)) + self.thrash_in_replay = float(self.config.get('thrash_in_replay', False)) + assert self.thrash_in_replay >= 0.0 and self.thrash_in_replay <= 1.0, 'thrash_in_replay ({v}) must be between [0.0, 1.0]'.format( + v=self.thrash_in_replay) + self.max_replay_thrash_delay = float(self.config.get('max_replay_thrash_delay', 4.0)) + self.max_revive_delay = float(self.config.get('max_revive_delay', 10.0)) + + def _run(self): + try: + self.do_thrash() + except Exception as e: + # Log exceptions here so we get the full backtrace (gevent loses them). + # Also allow successful completion as gevent exception handling is a broken mess: + # + # 2017-02-03T14:34:01.259 CRITICAL:root: File "gevent.libev.corecext.pyx", line 367, in gevent.libev.corecext.loop.handle_error (src/gevent/libev/gevent.corecext.c:5051) + # File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/gevent/hub.py", line 558, in handle_error + # self.print_exception(context, type, value, tb) + # File "/home/teuthworker/src/git.ceph.com_git_teuthology_master/virtualenv/local/lib/python2.7/site-packages/gevent/hub.py", line 605, in print_exception + # traceback.print_exception(type, value, tb, file=errstream) + # File "/usr/lib/python2.7/traceback.py", line 124, in print_exception + # _print(file, 'Traceback (most recent call last):') + # File "/usr/lib/python2.7/traceback.py", line 13, in _print + # file.write(str+terminator) + # 2017-02-03T14:34:01.261 CRITICAL:root:IOError + self.e = e + self.logger.exception("exception:") + # allow successful completion so gevent doesn't see an exception... + + def log(self, x): + """Write data to logger assigned to this MDThrasher""" + self.logger.info(x) + + def stop(self): + self.stopping.set() + + def kill_mds(self, mds): + if self.config.get('powercycle'): + (remote,) = (self.ctx.cluster.only('mds.{m}'.format(m=mds)). + remotes.keys()) + self.log('kill_mds on mds.{m} doing powercycle of {s}'. + format(m=mds, s=remote.name)) + self._assert_ipmi(remote) + remote.console.power_off() + else: + self.ctx.daemons.get_daemon('mds', mds).stop() + + @staticmethod + def _assert_ipmi(remote): + assert remote.console.has_ipmi_credentials, ( + "powercycling requested but RemoteConsole is not " + "initialized. Check ipmi config.") + + def revive_mds(self, mds): + """ + Revive mds -- do an ipmpi powercycle (if indicated by the config) + and then restart. + """ + if self.config.get('powercycle'): + (remote,) = (self.ctx.cluster.only('mds.{m}'.format(m=mds)). + remotes.keys()) + self.log('revive_mds on mds.{m} doing powercycle of {s}'. + format(m=mds, s=remote.name)) + self._assert_ipmi(remote) + remote.console.power_on() + self.manager.make_admin_daemon_dir(self.ctx, remote) + args = [] + self.ctx.daemons.get_daemon('mds', mds).restart(*args) + + def wait_for_stable(self, rank = None, gid = None): + self.log('waiting for mds cluster to stabilize...') + for itercount in itertools.count(): + status = self.fs.status() + max_mds = status.get_fsmap(self.fs.id)['mdsmap']['max_mds'] + ranks = list(status.get_ranks(self.fs.id)) + stopping = sum(1 for _ in ranks if "up:stopping" == _['state']) + actives = sum(1 for _ in ranks + if "up:active" == _['state'] and "laggy_since" not in _) + + if not bool(self.config.get('thrash_while_stopping', False)) and stopping > 0: + if itercount % 5 == 0: + self.log('cluster is considered unstable while MDS are in up:stopping (!thrash_while_stopping)') + else: + if rank is not None: + try: + info = status.get_rank(self.fs.id, rank) + if info['gid'] != gid and "up:active" == info['state']: + self.log('mds.{name} has gained rank={rank}, replacing gid={gid}'.format(name = info['name'], rank = rank, gid = gid)) + return status + except: + pass # no rank present + if actives >= max_mds: + # no replacement can occur! + self.log("cluster has {actives} actives (max_mds is {max_mds}), no MDS can replace rank {rank}".format( + actives=actives, max_mds=max_mds, rank=rank)) + return status + else: + if actives == max_mds: + self.log('mds cluster has {count} alive and active, now stable!'.format(count = actives)) + return status, None + if itercount > 300/2: # 5 minutes + raise RuntimeError('timeout waiting for cluster to stabilize') + elif itercount % 5 == 0: + self.log('mds map: {status}'.format(status=status)) + else: + self.log('no change') + sleep(2) + + def do_thrash(self): + """ + Perform the random thrashing action + """ + + self.log('starting mds_do_thrash for fs {fs}'.format(fs = self.fs.name)) + stats = { + "max_mds": 0, + "deactivate": 0, + "kill": 0, + } + + while not self.stopping.is_set(): + delay = self.max_thrash_delay + if self.randomize: + delay = random.randrange(0.0, self.max_thrash_delay) + + if delay > 0.0: + self.log('waiting for {delay} secs before thrashing'.format(delay=delay)) + self.stopping.wait(delay) + if self.stopping.is_set(): + continue + + status = self.fs.status() + + if random.random() <= self.thrash_max_mds: + max_mds = status.get_fsmap(self.fs.id)['mdsmap']['max_mds'] + options = list(range(1, max_mds))+list(range(max_mds+1, self.max_mds+1)) + if len(options) > 0: + sample = random.sample(options, 1) + new_max_mds = sample[0] + self.log('thrashing max_mds: %d -> %d' % (max_mds, new_max_mds)) + self.fs.set_max_mds(new_max_mds) + stats['max_mds'] += 1 + self.wait_for_stable() + + count = 0 + for info in status.get_ranks(self.fs.id): + name = info['name'] + label = 'mds.' + name + rank = info['rank'] + gid = info['gid'] + + # if thrash_weights isn't specified and we've reached max_thrash, + # we're done + count = count + 1 + if 'thrash_weights' not in self.config and count > self.max_thrash: + break + + weight = 1.0 + if 'thrash_weights' in self.config: + weight = self.config['thrash_weights'].get(label, '0.0') + skip = random.randrange(0.0, 1.0) + if weight <= skip: + self.log('skipping thrash iteration with skip ({skip}) > weight ({weight})'.format(skip=skip, weight=weight)) + continue + + self.log('kill {label} (rank={rank})'.format(label=label, rank=rank)) + self.kill_mds(name) + stats['kill'] += 1 + + # wait for mon to report killed mds as crashed + last_laggy_since = None + itercount = 0 + while True: + status = self.fs.status() + info = status.get_mds(name) + if not info: + break + if 'laggy_since' in info: + last_laggy_since = info['laggy_since'] + break + if any([(f == name) for f in status.get_fsmap(self.fs.id)['mdsmap']['failed']]): + break + self.log( + 'waiting till mds map indicates {label} is laggy/crashed, in failed state, or {label} is removed from mdsmap'.format( + label=label)) + itercount = itercount + 1 + if itercount > 10: + self.log('mds map: {status}'.format(status=status)) + sleep(2) + + if last_laggy_since: + self.log( + '{label} reported laggy/crashed since: {since}'.format(label=label, since=last_laggy_since)) + else: + self.log('{label} down, removed from mdsmap'.format(label=label, since=last_laggy_since)) + + # wait for a standby mds to takeover and become active + status = self.wait_for_stable(rank, gid) + + # wait for a while before restarting old active to become new + # standby + delay = self.max_revive_delay + if self.randomize: + delay = random.randrange(0.0, self.max_revive_delay) + + self.log('waiting for {delay} secs before reviving {label}'.format( + delay=delay, label=label)) + sleep(delay) + + self.log('reviving {label}'.format(label=label)) + self.revive_mds(name) + + for itercount in itertools.count(): + if itercount > 300/2: # 5 minutes + raise RuntimeError('timeout waiting for MDS to revive') + status = self.fs.status() + info = status.get_mds(name) + if info and info['state'] in ('up:standby', 'up:standby-replay', 'up:active'): + self.log('{label} reported in {state} state'.format(label=label, state=info['state'])) + break + self.log( + 'waiting till mds map indicates {label} is in active, standby or standby-replay'.format(label=label)) + sleep(2) + + for stat in stats: + self.log("stat['{key}'] = {value}".format(key = stat, value = stats[stat])) + + # don't do replay thrashing right now +# for info in status.get_replays(self.fs.id): +# # this might race with replay -> active transition... +# if status['state'] == 'up:replay' and random.randrange(0.0, 1.0) < self.thrash_in_replay: +# delay = self.max_replay_thrash_delay +# if self.randomize: +# delay = random.randrange(0.0, self.max_replay_thrash_delay) +# sleep(delay) +# self.log('kill replaying mds.{id}'.format(id=self.to_kill)) +# self.kill_mds(self.to_kill) +# +# delay = self.max_revive_delay +# if self.randomize: +# delay = random.randrange(0.0, self.max_revive_delay) +# +# self.log('waiting for {delay} secs before reviving mds.{id}'.format( +# delay=delay, id=self.to_kill)) +# sleep(delay) +# +# self.log('revive mds.{id}'.format(id=self.to_kill)) +# self.revive_mds(self.to_kill) + + +@contextlib.contextmanager +def task(ctx, config): + """ + Stress test the mds by thrashing while another task/workunit + is running. + + Please refer to MDSThrasher class for further information on the + available options. + """ + + mds_cluster = MDSCluster(ctx) + + if config is None: + config = {} + assert isinstance(config, dict), \ + 'mds_thrash task only accepts a dict for configuration' + mdslist = list(teuthology.all_roles_of_type(ctx.cluster, 'mds')) + assert len(mdslist) > 1, \ + 'mds_thrash task requires at least 2 metadata servers' + + # choose random seed + if 'seed' in config: + seed = int(config['seed']) + else: + seed = int(time.time()) + log.info('mds thrasher using random seed: {seed}'.format(seed=seed)) + random.seed(seed) + + (first,) = ctx.cluster.only('mds.{_id}'.format(_id=mdslist[0])).remotes.keys() + manager = ceph_manager.CephManager( + first, ctx=ctx, logger=log.getChild('ceph_manager'), + ) + + # make sure everyone is in active, standby, or standby-replay + log.info('Wait for all MDSs to reach steady state...') + status = mds_cluster.status() + while True: + steady = True + for info in status.get_all(): + state = info['state'] + if state not in ('up:active', 'up:standby', 'up:standby-replay'): + steady = False + break + if steady: + break + sleep(2) + status = mds_cluster.status() + log.info('Ready to start thrashing') + + thrashers = [] + + watchdog = DaemonWatchdog(ctx, manager, config, thrashers) + watchdog.start() + + manager.wait_for_clean() + assert manager.is_clean() + for fs in status.get_filesystems(): + thrasher = MDSThrasher(ctx, manager, config, Filesystem(ctx, fs['id']), fs['mdsmap']['max_mds']) + thrasher.start() + thrashers.append(thrasher) + + try: + log.debug('Yielding') + yield + finally: + log.info('joining mds_thrashers') + for thrasher in thrashers: + thrasher.stop() + if thrasher.e: + raise RuntimeError('error during thrashing') + thrasher.join() + log.info('done joining') + + watchdog.stop() + watchdog.join() diff --git a/qa/tasks/metadata.yaml b/qa/tasks/metadata.yaml new file mode 100644 index 00000000..ccdc3b07 --- /dev/null +++ b/qa/tasks/metadata.yaml @@ -0,0 +1,2 @@ +instance-id: test +local-hostname: test diff --git a/qa/tasks/mgr/__init__.py b/qa/tasks/mgr/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/qa/tasks/mgr/dashboard/__init__.py b/qa/tasks/mgr/dashboard/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/qa/tasks/mgr/dashboard/helper.py b/qa/tasks/mgr/dashboard/helper.py new file mode 100644 index 00000000..5c430a69 --- /dev/null +++ b/qa/tasks/mgr/dashboard/helper.py @@ -0,0 +1,574 @@ +# -*- coding: utf-8 -*- +# pylint: disable=W0212,too-many-return-statements +from __future__ import absolute_import + +import json +import logging +import random +import string +from collections import namedtuple +import time + +import requests +import six +from teuthology.exceptions import CommandFailedError + +from tasks.mgr.mgr_test_case import MgrTestCase + + +log = logging.getLogger(__name__) + + +class DashboardTestCase(MgrTestCase): + # Display full error diffs + maxDiff = None + + # Increased x3 (20 -> 60) + TIMEOUT_HEALTH_CLEAR = 60 + + MGRS_REQUIRED = 2 + MDSS_REQUIRED = 1 + REQUIRE_FILESYSTEM = True + CLIENTS_REQUIRED = 1 + CEPHFS = False + + _session = None # type: requests.sessions.Session + _token = None + _resp = None # type: requests.models.Response + _loggedin = False + _base_uri = None + + AUTO_AUTHENTICATE = True + + AUTH_ROLES = ['administrator'] + + @classmethod + def create_user(cls, username, password, roles): + try: + cls._ceph_cmd(['dashboard', 'ac-user-show', username]) + cls._ceph_cmd(['dashboard', 'ac-user-delete', username]) + except CommandFailedError as ex: + if ex.exitstatus != 2: + raise ex + + user_create_args = [ + 'dashboard', 'ac-user-create', username + ] + cls._ceph_cmd_with_secret(user_create_args, password) + + set_roles_args = ['dashboard', 'ac-user-set-roles', username] + for idx, role in enumerate(roles): + if isinstance(role, str): + set_roles_args.append(role) + else: + assert isinstance(role, dict) + rolename = 'test_role_{}'.format(idx) + try: + cls._ceph_cmd(['dashboard', 'ac-role-show', rolename]) + cls._ceph_cmd(['dashboard', 'ac-role-delete', rolename]) + except CommandFailedError as ex: + if ex.exitstatus != 2: + raise ex + cls._ceph_cmd(['dashboard', 'ac-role-create', rolename]) + for mod, perms in role.items(): + args = ['dashboard', 'ac-role-add-scope-perms', rolename, mod] + args.extend(perms) + cls._ceph_cmd(args) + set_roles_args.append(rolename) + cls._ceph_cmd(set_roles_args) + + @classmethod + def login(cls, username, password, set_cookies=False): + if cls._loggedin: + cls.logout() + cls._post('/api/auth', {'username': username, + 'password': password}, set_cookies=set_cookies) + cls._assertEq(cls._resp.status_code, 201) + cls._token = cls.jsonBody()['token'] + cls._loggedin = True + + @classmethod + def logout(cls, set_cookies=False): + if cls._loggedin: + cls._post('/api/auth/logout', set_cookies=set_cookies) + cls._assertEq(cls._resp.status_code, 200) + cls._token = None + cls._loggedin = False + + @classmethod + def delete_user(cls, username, roles=None): + if roles is None: + roles = [] + cls._ceph_cmd(['dashboard', 'ac-user-delete', username]) + for idx, role in enumerate(roles): + if isinstance(role, dict): + cls._ceph_cmd(['dashboard', 'ac-role-delete', 'test_role_{}'.format(idx)]) + + @classmethod + def RunAs(cls, username, password, roles): + def wrapper(func): + def execute(self, *args, **kwargs): + self.create_user(username, password, roles) + self.login(username, password) + res = func(self, *args, **kwargs) + self.logout() + self.delete_user(username, roles) + return res + return execute + return wrapper + + @classmethod + def set_jwt_token(cls, token): + cls._token = token + + @classmethod + def setUpClass(cls): + super(DashboardTestCase, cls).setUpClass() + cls._assign_ports("dashboard", "ssl_server_port") + cls._load_module("dashboard") + cls._base_uri = cls._get_uri("dashboard").rstrip('/') + + if cls.CEPHFS: + cls.mds_cluster.clear_firewall() + + # To avoid any issues with e.g. unlink bugs, we destroy and recreate + # the filesystem rather than just doing a rm -rf of files + cls.mds_cluster.mds_stop() + cls.mds_cluster.mds_fail() + cls.mds_cluster.delete_all_filesystems() + cls.fs = None # is now invalid! + + cls.fs = cls.mds_cluster.newfs(create=True) + cls.fs.mds_restart() + + # In case some test messed with auth caps, reset them + # pylint: disable=not-an-iterable + client_mount_ids = [m.client_id for m in cls.mounts] + for client_id in client_mount_ids: + cls.mds_cluster.mon_manager.raw_cluster_cmd_result( + 'auth', 'caps', "client.{0}".format(client_id), + 'mds', 'allow', + 'mon', 'allow r', + 'osd', 'allow rw pool={0}'.format(cls.fs.get_data_pool_name())) + + # wait for mds restart to complete... + cls.fs.wait_for_daemons() + + cls._token = None + cls._session = requests.Session() + cls._resp = None + + cls.create_user('admin', 'admin', cls.AUTH_ROLES) + if cls.AUTO_AUTHENTICATE: + cls.login('admin', 'admin') + + def setUp(self): + super(DashboardTestCase, self).setUp() + if not self._loggedin and self.AUTO_AUTHENTICATE: + self.login('admin', 'admin') + self.wait_for_health_clear(self.TIMEOUT_HEALTH_CLEAR) + + @classmethod + def tearDownClass(cls): + super(DashboardTestCase, cls).tearDownClass() + + # pylint: disable=inconsistent-return-statements, too-many-arguments, too-many-branches + @classmethod + def _request(cls, url, method, data=None, params=None, set_cookies=False): + url = "{}{}".format(cls._base_uri, url) + log.debug("Request %s to %s", method, url) + headers = {} + cookies = {} + if cls._token: + if set_cookies: + cookies['token'] = cls._token + else: + headers['Authorization'] = "Bearer {}".format(cls._token) + + if set_cookies: + if method == 'GET': + cls._resp = cls._session.get(url, params=params, verify=False, + headers=headers, cookies=cookies) + elif method == 'POST': + cls._resp = cls._session.post(url, json=data, params=params, + verify=False, headers=headers, cookies=cookies) + elif method == 'DELETE': + cls._resp = cls._session.delete(url, json=data, params=params, + verify=False, headers=headers, cookies=cookies) + elif method == 'PUT': + cls._resp = cls._session.put(url, json=data, params=params, + verify=False, headers=headers, cookies=cookies) + else: + assert False + else: + if method == 'GET': + cls._resp = cls._session.get(url, params=params, verify=False, + headers=headers) + elif method == 'POST': + cls._resp = cls._session.post(url, json=data, params=params, + verify=False, headers=headers) + elif method == 'DELETE': + cls._resp = cls._session.delete(url, json=data, params=params, + verify=False, headers=headers) + elif method == 'PUT': + cls._resp = cls._session.put(url, json=data, params=params, + verify=False, headers=headers) + else: + assert False + try: + if not cls._resp.ok: + # Output response for easier debugging. + log.error("Request response: %s", cls._resp.text) + content_type = cls._resp.headers['content-type'] + if content_type == 'application/json' and cls._resp.text and cls._resp.text != "": + return cls._resp.json() + return cls._resp.text + except ValueError as ex: + log.exception("Failed to decode response: %s", cls._resp.text) + raise ex + + @classmethod + def _get(cls, url, params=None, set_cookies=False): + return cls._request(url, 'GET', params=params, set_cookies=set_cookies) + + @classmethod + def _view_cache_get(cls, url, retries=5): + retry = True + while retry and retries > 0: + retry = False + res = cls._get(url) + if isinstance(res, dict): + res = [res] + for view in res: + assert 'value' in view + if not view['value']: + retry = True + retries -= 1 + if retries == 0: + raise Exception("{} view cache exceeded number of retries={}" + .format(url, retries)) + return res + + @classmethod + def _post(cls, url, data=None, params=None, set_cookies=False): + cls._request(url, 'POST', data, params, set_cookies=set_cookies) + + @classmethod + def _delete(cls, url, data=None, params=None, set_cookies=False): + cls._request(url, 'DELETE', data, params, set_cookies=set_cookies) + + @classmethod + def _put(cls, url, data=None, params=None, set_cookies=False): + cls._request(url, 'PUT', data, params, set_cookies=set_cookies) + + @classmethod + def _assertEq(cls, v1, v2): + if not v1 == v2: + raise Exception("assertion failed: {} != {}".format(v1, v2)) + + @classmethod + def _assertIn(cls, v1, v2): + if v1 not in v2: + raise Exception("assertion failed: {} not in {}".format(v1, v2)) + + @classmethod + def _assertIsInst(cls, v1, v2): + if not isinstance(v1, v2): + raise Exception("assertion failed: {} not instance of {}".format(v1, v2)) + + # pylint: disable=too-many-arguments + @classmethod + def _task_request(cls, method, url, data, timeout, set_cookies=False): + res = cls._request(url, method, data, set_cookies=set_cookies) + cls._assertIn(cls._resp.status_code, [200, 201, 202, 204, 400, 403, 404]) + + if cls._resp.status_code == 403: + return None + + if cls._resp.status_code != 202: + log.debug("task finished immediately") + return res + + cls._assertIn('name', res) + cls._assertIn('metadata', res) + task_name = res['name'] + task_metadata = res['metadata'] + + retries = int(timeout) + res_task = None + while retries > 0 and not res_task: + retries -= 1 + log.debug("task (%s, %s) is still executing", task_name, task_metadata) + time.sleep(1) + _res = cls._get('/api/task?name={}'.format(task_name)) + cls._assertEq(cls._resp.status_code, 200) + executing_tasks = [task for task in _res['executing_tasks'] if + task['metadata'] == task_metadata] + finished_tasks = [task for task in _res['finished_tasks'] if + task['metadata'] == task_metadata] + if not executing_tasks and finished_tasks: + res_task = finished_tasks[0] + + if retries <= 0: + raise Exception("Waiting for task ({}, {}) to finish timed out. {}" + .format(task_name, task_metadata, _res)) + + log.debug("task (%s, %s) finished", task_name, task_metadata) + if res_task['success']: + if method == 'POST': + cls._resp.status_code = 201 + elif method == 'PUT': + cls._resp.status_code = 200 + elif method == 'DELETE': + cls._resp.status_code = 204 + return res_task['ret_value'] + else: + if 'status' in res_task['exception']: + cls._resp.status_code = res_task['exception']['status'] + else: + cls._resp.status_code = 500 + return res_task['exception'] + + @classmethod + def _task_post(cls, url, data=None, timeout=60, set_cookies=False): + return cls._task_request('POST', url, data, timeout, set_cookies=set_cookies) + + @classmethod + def _task_delete(cls, url, timeout=60, set_cookies=False): + return cls._task_request('DELETE', url, None, timeout, set_cookies=set_cookies) + + @classmethod + def _task_put(cls, url, data=None, timeout=60, set_cookies=False): + return cls._task_request('PUT', url, data, timeout, set_cookies=set_cookies) + + @classmethod + def cookies(cls): + return cls._resp.cookies + + @classmethod + def jsonBody(cls): + return cls._resp.json() + + @classmethod + def reset_session(cls): + cls._session = requests.Session() + + def assertSubset(self, data, biggerData): + for key, value in data.items(): + self.assertEqual(biggerData[key], value) + + def assertJsonBody(self, data): + body = self._resp.json() + self.assertEqual(body, data) + + def assertJsonSubset(self, data): + self.assertSubset(data, self._resp.json()) + + def assertSchema(self, data, schema): + try: + return _validate_json(data, schema) + except _ValError as e: + self.assertEqual(data, str(e)) + + def assertSchemaBody(self, schema): + self.assertSchema(self.jsonBody(), schema) + + def assertBody(self, body): + self.assertEqual(self._resp.text, body) + + def assertStatus(self, status): + if isinstance(status, list): + self.assertIn(self._resp.status_code, status) + else: + self.assertEqual(self._resp.status_code, status) + + def assertHeaders(self, headers): + for name, value in headers.items(): + self.assertIn(name, self._resp.headers) + self.assertEqual(self._resp.headers[name], value) + + def assertError(self, code=None, component=None, detail=None): + body = self._resp.json() + if code: + self.assertEqual(body['code'], code) + if component: + self.assertEqual(body['component'], component) + if detail: + self.assertEqual(body['detail'], detail) + + @classmethod + def _ceph_cmd(cls, cmd): + res = cls.mgr_cluster.mon_manager.raw_cluster_cmd(*cmd) + log.debug("command result: %s", res) + return res + + @classmethod + def _ceph_cmd_result(cls, cmd): + exitstatus = cls.mgr_cluster.mon_manager.raw_cluster_cmd_result(*cmd) + log.debug("command exit status: %d", exitstatus) + return exitstatus + + @classmethod + def _ceph_cmd_with_secret(cls, cmd, secret, return_exit_code=False): + cmd.append('-i') + cmd.append('{}'.format(cls._ceph_create_tmp_file(secret))) + if return_exit_code: + return cls._ceph_cmd_result(cmd) + return cls._ceph_cmd(cmd) + + @classmethod + def _ceph_create_tmp_file(cls, content): + """Create a temporary file in the remote cluster""" + file_name = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(20)) + file_path = '/tmp/{}'.format(file_name) + cls._cmd(['sh', '-c', 'echo -n {} > {}'.format(content, file_path)]) + return file_path + + def set_config_key(self, key, value): + self._ceph_cmd(['config-key', 'set', key, value]) + + def get_config_key(self, key): + return self._ceph_cmd(['config-key', 'get', key]) + + @classmethod + def _cmd(cls, args): + return cls.mgr_cluster.admin_remote.run(args=args) + + @classmethod + def _rbd_cmd(cls, cmd): + args = ['rbd'] + args.extend(cmd) + cls._cmd(args) + + @classmethod + def _radosgw_admin_cmd(cls, cmd): + args = ['radosgw-admin'] + args.extend(cmd) + cls._cmd(args) + + @classmethod + def _rados_cmd(cls, cmd): + args = ['rados'] + args.extend(cmd) + cls._cmd(args) + + @classmethod + def mons(cls): + out = cls.ceph_cluster.mon_manager.raw_cluster_cmd('mon_status') + j = json.loads(out) + return [mon['name'] for mon in j['monmap']['mons']] + + @classmethod + def find_object_in_list(cls, key, value, iterable): + """ + Get the first occurrence of an object within a list with + the specified key/value. + :param key: The name of the key. + :param value: The value to search for. + :param iterable: The list to process. + :return: Returns the found object or None. + """ + for obj in iterable: + if key in obj and obj[key] == value: + return obj + return None + + +class JLeaf(namedtuple('JLeaf', ['typ', 'none'])): + def __new__(cls, typ, none=False): + if typ == str: + typ = six.string_types + return super(JLeaf, cls).__new__(cls, typ, none) + + +JList = namedtuple('JList', ['elem_typ']) + +JTuple = namedtuple('JList', ['elem_typs']) + +JUnion = namedtuple('JUnion', ['elem_typs']) + +class JObj(namedtuple('JObj', ['sub_elems', 'allow_unknown', 'none', 'unknown_schema'])): + def __new__(cls, sub_elems, allow_unknown=False, none=False, unknown_schema=None): + """ + :type sub_elems: dict[str, JAny | JLeaf | JList | JObj | type] + :type allow_unknown: bool + :type none: bool + :type unknown_schema: int, str, JAny | JLeaf | JList | JObj + :return: + """ + return super(JObj, cls).__new__(cls, sub_elems, allow_unknown, none, unknown_schema) + + +JAny = namedtuple('JAny', ['none']) + + +class _ValError(Exception): + def __init__(self, msg, path): + path_str = ''.join('[{}]'.format(repr(p)) for p in path) + super(_ValError, self).__init__('In `input{}`: {}'.format(path_str, msg)) + + +# pylint: disable=dangerous-default-value,inconsistent-return-statements +def _validate_json(val, schema, path=[]): + """ + >>> d = {'a': 1, 'b': 'x', 'c': range(10)} + ... ds = JObj({'a': int, 'b': str, 'c': JList(int)}) + ... _validate_json(d, ds) + True + >>> _validate_json({'num': 1}, JObj({'num': JUnion([int,float])})) + True + >>> _validate_json({'num': 'a'}, JObj({'num': JUnion([int,float])})) + False + """ + if isinstance(schema, JAny): + if not schema.none and val is None: + raise _ValError('val is None', path) + return True + if isinstance(schema, JLeaf): + if schema.none and val is None: + return True + if not isinstance(val, schema.typ): + raise _ValError('val not of type {}'.format(schema.typ), path) + return True + if isinstance(schema, JList): + if not isinstance(val, list): + raise _ValError('val="{}" is not a list'.format(val), path) + return all(_validate_json(e, schema.elem_typ, path + [i]) for i, e in enumerate(val)) + if isinstance(schema, JTuple): + return all(_validate_json(val[i], typ, path + [i]) + for i, typ in enumerate(schema.elem_typs)) + if isinstance(schema, JUnion): + for typ in schema.elem_typs: + try: + if _validate_json(val, typ, path): + return True + except _ValError: + pass + return False + if isinstance(schema, JObj): + if val is None and schema.none: + return True + elif val is None: + raise _ValError('val is None', path) + if not hasattr(val, 'keys'): + raise _ValError('val="{}" is not a dict'.format(val), path) + missing_keys = set(schema.sub_elems.keys()).difference(set(val.keys())) + if missing_keys: + raise _ValError('missing keys: {}'.format(missing_keys), path) + unknown_keys = set(val.keys()).difference(set(schema.sub_elems.keys())) + if not schema.allow_unknown and unknown_keys: + raise _ValError('unknown keys: {}'.format(unknown_keys), path) + result = all( + _validate_json(val[key], sub_schema, path + [key]) + for key, sub_schema in schema.sub_elems.items() + ) + if unknown_keys and schema.allow_unknown and schema.unknown_schema: + result += all( + _validate_json(val[key], schema.unknown_schema, path + [key]) + for key in unknown_keys + ) + return result + if schema in [str, int, float, bool, six.string_types]: + return _validate_json(val, JLeaf(schema), path) + + assert False, str(path) diff --git a/qa/tasks/mgr/dashboard/test_auth.py b/qa/tasks/mgr/dashboard/test_auth.py new file mode 100644 index 00000000..df5485d4 --- /dev/null +++ b/qa/tasks/mgr/dashboard/test_auth.py @@ -0,0 +1,240 @@ +# -*- coding: utf-8 -*- + +from __future__ import absolute_import + +import time + +import jwt +from teuthology.orchestra.run import \ + CommandFailedError # pylint: disable=import-error + +from .helper import DashboardTestCase + + +class AuthTest(DashboardTestCase): + + AUTO_AUTHENTICATE = False + + def setUp(self): + super(AuthTest, self).setUp() + self.reset_session() + + def _validate_jwt_token(self, token, username, permissions): + payload = jwt.decode(token, options={'verify_signature': False}) + self.assertIn('username', payload) + self.assertEqual(payload['username'], username) + + for scope, perms in permissions.items(): + self.assertIsNotNone(scope) + self.assertIn('read', perms) + self.assertIn('update', perms) + self.assertIn('create', perms) + self.assertIn('delete', perms) + + def test_login_without_password(self): + with self.assertRaises(CommandFailedError): + self.create_user('admin2', '', ['administrator']) + + def test_a_set_login_credentials(self): + # test with Authorization header + self.create_user('admin2', 'admin2', ['administrator']) + self._post("/api/auth", {'username': 'admin2', 'password': 'admin2'}) + self.assertStatus(201) + data = self.jsonBody() + self._validate_jwt_token(data['token'], "admin2", data['permissions']) + self.delete_user('admin2') + + # test with Cookies set + self.create_user('admin2', 'admin2', ['administrator']) + self._post("/api/auth", {'username': 'admin2', 'password': 'admin2'}, set_cookies=True) + self.assertStatus(201) + data = self.jsonBody() + self._validate_jwt_token(data['token'], "admin2", data['permissions']) + self.delete_user('admin2') + + def test_login_valid(self): + # test with Authorization header + self._post("/api/auth", {'username': 'admin', 'password': 'admin'}) + self.assertStatus(201) + data = self.jsonBody() + self._validate_jwt_token(data['token'], "admin", data['permissions']) + + # test with Cookies set + self._post("/api/auth", {'username': 'admin', 'password': 'admin'}, set_cookies=True) + self.assertStatus(201) + data = self.jsonBody() + self._validate_jwt_token(data['token'], "admin", data['permissions']) + + def test_login_invalid(self): + # test with Authorization header + self._post("/api/auth", {'username': 'admin', 'password': 'inval'}) + self.assertStatus(400) + self.assertJsonBody({ + "component": "auth", + "code": "invalid_credentials", + "detail": "Invalid credentials" + }) + + # test with Cookies set + self._post("/api/auth", {'username': 'admin', 'password': 'inval'}, set_cookies=True) + self.assertStatus(400) + self.assertJsonBody({ + "component": "auth", + "code": "invalid_credentials", + "detail": "Invalid credentials" + }) + + def test_logout(self): + # test with Authorization header + self._post("/api/auth", {'username': 'admin', 'password': 'admin'}) + self.assertStatus(201) + data = self.jsonBody() + self._validate_jwt_token(data['token'], "admin", data['permissions']) + self.set_jwt_token(data['token']) + self._post("/api/auth/logout") + self.assertStatus(200) + self.assertJsonBody({ + "redirect_url": "#/login" + }) + self._get("/api/host") + self.assertStatus(401) + self.set_jwt_token(None) + + # test with Cookies set + self._post("/api/auth", {'username': 'admin', 'password': 'admin'}, set_cookies=True) + self.assertStatus(201) + data = self.jsonBody() + self._validate_jwt_token(data['token'], "admin", data['permissions']) + self.set_jwt_token(data['token']) + self._post("/api/auth/logout", set_cookies=True) + self.assertStatus(200) + self.assertJsonBody({ + "redirect_url": "#/login" + }) + self._get("/api/host", set_cookies=True) + self.assertStatus(401) + self.set_jwt_token(None) + + def test_token_ttl(self): + # test with Authorization header + self._ceph_cmd(['dashboard', 'set-jwt-token-ttl', '5']) + self._post("/api/auth", {'username': 'admin', 'password': 'admin'}) + self.assertStatus(201) + self.set_jwt_token(self.jsonBody()['token']) + self._get("/api/host") + self.assertStatus(200) + time.sleep(6) + self._get("/api/host") + self.assertStatus(401) + self._ceph_cmd(['dashboard', 'set-jwt-token-ttl', '28800']) + self.set_jwt_token(None) + + # test with Cookies set + self._ceph_cmd(['dashboard', 'set-jwt-token-ttl', '5']) + self._post("/api/auth", {'username': 'admin', 'password': 'admin'}, set_cookies=True) + self.assertStatus(201) + self.set_jwt_token(self.jsonBody()['token']) + self._get("/api/host", set_cookies=True) + self.assertStatus(200) + time.sleep(6) + self._get("/api/host") + self.assertStatus(401) + self._ceph_cmd(['dashboard', 'set-jwt-token-ttl', '28800']) + self.set_jwt_token(None) + + def test_remove_from_blacklist(self): + # test with Authorization header + self._ceph_cmd(['dashboard', 'set-jwt-token-ttl', '5']) + self._post("/api/auth", {'username': 'admin', 'password': 'admin'}) + self.assertStatus(201) + self.set_jwt_token(self.jsonBody()['token']) + # the following call adds the token to the blacklist + self._post("/api/auth/logout") + self.assertStatus(200) + self._get("/api/host") + self.assertStatus(401) + time.sleep(6) + self._ceph_cmd(['dashboard', 'set-jwt-token-ttl', '28800']) + self.set_jwt_token(None) + self._post("/api/auth", {'username': 'admin', 'password': 'admin'}) + self.assertStatus(201) + self.set_jwt_token(self.jsonBody()['token']) + # the following call removes expired tokens from the blacklist + self._post("/api/auth/logout") + self.assertStatus(200) + + # test with Cookies set + self._ceph_cmd(['dashboard', 'set-jwt-token-ttl', '5']) + self._post("/api/auth", {'username': 'admin', 'password': 'admin'}, set_cookies=True) + self.assertStatus(201) + self.set_jwt_token(self.jsonBody()['token']) + # the following call adds the token to the blocklist + self._post("/api/auth/logout", set_cookies=True) + self.assertStatus(200) + self._get("/api/host", set_cookies=True) + self.assertStatus(401) + time.sleep(6) + self._ceph_cmd(['dashboard', 'set-jwt-token-ttl', '28800']) + self.set_jwt_token(None) + self._post("/api/auth", {'username': 'admin', 'password': 'admin'}, set_cookies=True) + self.assertStatus(201) + self.set_jwt_token(self.jsonBody()['token']) + # the following call removes expired tokens from the blocklist + self._post("/api/auth/logout", set_cookies=True) + self.assertStatus(200) + + def test_unauthorized(self): + # test with Authorization header + self._get("/api/host") + self.assertStatus(401) + + # test with Cookies set + self._get("/api/host", set_cookies=True) + self.assertStatus(401) + + def test_invalidate_token_by_admin(self): + # test with Authorization header + self._get("/api/host") + self.assertStatus(401) + self.create_user('user', 'user', ['read-only']) + time.sleep(1) + self._post("/api/auth", {'username': 'user', 'password': 'user'}) + self.assertStatus(201) + self.set_jwt_token(self.jsonBody()['token']) + self._get("/api/host") + self.assertStatus(200) + time.sleep(1) + self._ceph_cmd_with_secret(['dashboard', 'ac-user-set-password', 'user'], 'user2') + time.sleep(1) + self._get("/api/host") + self.assertStatus(401) + self.set_jwt_token(None) + self._post("/api/auth", {'username': 'user', 'password': 'user2'}) + self.assertStatus(201) + self.set_jwt_token(self.jsonBody()['token']) + self._get("/api/host") + self.assertStatus(200) + self.delete_user("user") + + # test with Cookies set + self._get("/api/host", set_cookies=True) + self.assertStatus(401) + self.create_user('user', 'user', ['read-only']) + time.sleep(1) + self._post("/api/auth", {'username': 'user', 'password': 'user'}, set_cookies=True) + self.assertStatus(201) + self.set_jwt_token(self.jsonBody()['token']) + self._get("/api/host", set_cookies=True) + self.assertStatus(200) + time.sleep(1) + self._ceph_cmd_with_secret(['dashboard', 'ac-user-set-password', 'user'], 'user2') + time.sleep(1) + self._get("/api/host", set_cookies=True) + self.assertStatus(401) + self.set_jwt_token(None) + self._post("/api/auth", {'username': 'user', 'password': 'user2'}, set_cookies=True) + self.assertStatus(201) + self.set_jwt_token(self.jsonBody()['token']) + self._get("/api/host", set_cookies=True) + self.assertStatus(200) + self.delete_user("user") diff --git a/qa/tasks/mgr/dashboard/test_cephfs.py b/qa/tasks/mgr/dashboard/test_cephfs.py new file mode 100644 index 00000000..24350cce --- /dev/null +++ b/qa/tasks/mgr/dashboard/test_cephfs.py @@ -0,0 +1,70 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import + +from contextlib import contextmanager + +from .helper import DashboardTestCase + + +class CephfsTest(DashboardTestCase): + CEPHFS = True + + AUTH_ROLES = ['cephfs-manager'] + + @DashboardTestCase.RunAs('test', 'test', ['block-manager']) + def test_access_permissions(self): + fs_id = self.fs.get_namespace_id() + self._get("/api/cephfs/{}/clients".format(fs_id)) + self.assertStatus(403) + self._get("/api/cephfs/{}".format(fs_id)) + self.assertStatus(403) + self._get("/api/cephfs/{}/mds_counters".format(fs_id)) + self.assertStatus(403) + + def test_cephfs_clients(self): + fs_id = self.fs.get_namespace_id() + data = self._get("/api/cephfs/{}/clients".format(fs_id)) + self.assertStatus(200) + + self.assertIn('status', data) + self.assertIn('data', data) + + def test_cephfs_get(self): + fs_id = self.fs.get_namespace_id() + data = self._get("/api/cephfs/{}/".format(fs_id)) + self.assertStatus(200) + + self.assertIn('cephfs', data) + self.assertIn('standbys', data) + self.assertIn('versions', data) + self.assertIsNotNone(data['cephfs']) + self.assertIsNotNone(data['standbys']) + self.assertIsNotNone(data['versions']) + + def test_cephfs_mds_counters(self): + fs_id = self.fs.get_namespace_id() + data = self._get("/api/cephfs/{}/mds_counters".format(fs_id)) + self.assertStatus(200) + + self.assertIsInstance(data, dict) + self.assertIsNotNone(data) + + def test_cephfs_mds_counters_wrong(self): + self._get("/api/cephfs/baadbaad/mds_counters") + self.assertStatus(400) + self.assertJsonBody({ + "component": 'cephfs', + "code": "invalid_cephfs_id", + "detail": "Invalid cephfs ID baadbaad" + }) + + def test_cephfs_list(self): + data = self._get("/api/cephfs/") + self.assertStatus(200) + self.assertIsInstance(data, list) + + cephfs = data[0] + self.assertIn('id', cephfs) + self.assertIn('mdsmap', cephfs) + self.assertIsNotNone(cephfs['id']) + self.assertIsNotNone(cephfs['mdsmap']) diff --git a/qa/tasks/mgr/dashboard/test_cluster_configuration.py b/qa/tasks/mgr/dashboard/test_cluster_configuration.py new file mode 100644 index 00000000..798afe9c --- /dev/null +++ b/qa/tasks/mgr/dashboard/test_cluster_configuration.py @@ -0,0 +1,388 @@ +from __future__ import absolute_import + +import time + +from .helper import DashboardTestCase + + +class ClusterConfigurationTest(DashboardTestCase): + + def test_list(self): + data = self._get('/api/cluster_conf') + self.assertStatus(200) + self.assertIsInstance(data, list) + self.assertGreater(len(data), 1000) + for conf in data: + self._validate_single(conf) + + def test_get(self): + data = self._get('/api/cluster_conf/admin_socket') + self.assertStatus(200) + self._validate_single(data) + self.assertIn('enum_values', data) + + data = self._get('/api/cluster_conf/fantasy_name') + self.assertStatus(404) + + def test_get_specific_db_config_option(self): + config_name = 'mon_allow_pool_delete' + + orig_value = self._get_config_by_name(config_name) + + self._ceph_cmd(['config', 'set', 'mon', config_name, 'true']) + result = self._wait_for_expected_get_result(self._get_config_by_name, config_name, + [{'section': 'mon', 'value': 'true'}]) + self.assertEqual(result, [{'section': 'mon', 'value': 'true'}]) + + self._ceph_cmd(['config', 'set', 'mon', config_name, 'false']) + result = self._wait_for_expected_get_result(self._get_config_by_name, config_name, + [{'section': 'mon', 'value': 'false'}]) + self.assertEqual(result, [{'section': 'mon', 'value': 'false'}]) + + # restore value + if orig_value: + self._ceph_cmd(['config', 'set', 'mon', config_name, orig_value[0]['value']]) + + def test_filter_config_options(self): + config_names = ['osd_scrub_during_recovery', 'osd_scrub_begin_hour', 'osd_scrub_end_hour'] + data = self._get('/api/cluster_conf/filter?names={}'.format(','.join(config_names))) + self.assertStatus(200) + self.assertIsInstance(data, list) + self.assertEqual(len(data), 3) + for conf in data: + self._validate_single(conf) + self.assertIn(conf['name'], config_names) + + def test_filter_config_options_empty_names(self): + self._get('/api/cluster_conf/filter?names=') + self.assertStatus(404) + self.assertEqual(self._resp.json()['detail'], 'Config options `` not found') + + def test_filter_config_options_unknown_name(self): + self._get('/api/cluster_conf/filter?names=abc') + self.assertStatus(404) + self.assertEqual(self._resp.json()['detail'], 'Config options `abc` not found') + + def test_filter_config_options_contains_unknown_name(self): + config_names = ['osd_scrub_during_recovery', 'osd_scrub_begin_hour', 'abc'] + data = self._get('/api/cluster_conf/filter?names={}'.format(','.join(config_names))) + self.assertStatus(200) + self.assertIsInstance(data, list) + self.assertEqual(len(data), 2) + for conf in data: + self._validate_single(conf) + self.assertIn(conf['name'], config_names) + + def test_create(self): + config_name = 'debug_ms' + orig_value = self._get_config_by_name(config_name) + + # remove all existing settings for equal preconditions + self._clear_all_values_for_config_option(config_name) + + expected_result = [{'section': 'mon', 'value': '0/3'}] + + self._post('/api/cluster_conf', { + 'name': config_name, + 'value': expected_result + }) + self.assertStatus(201) + result = self._wait_for_expected_get_result(self._get_config_by_name, config_name, + expected_result) + self.assertEqual(result, expected_result) + + # reset original value + self._clear_all_values_for_config_option(config_name) + self._reset_original_values(config_name, orig_value) + + def test_delete(self): + config_name = 'debug_ms' + orig_value = self._get_config_by_name(config_name) + + # set a config option + expected_result = [{'section': 'mon', 'value': '0/3'}] + self._post('/api/cluster_conf', { + 'name': config_name, + 'value': expected_result + }) + self.assertStatus(201) + self._wait_for_expected_get_result(self._get_config_by_name, config_name, expected_result) + + # delete it and check if it's deleted + self._delete('/api/cluster_conf/{}?section={}'.format(config_name, 'mon')) + self.assertStatus(204) + result = self._wait_for_expected_get_result(self._get_config_by_name, config_name, None) + self.assertEqual(result, None) + + # reset original value + self._clear_all_values_for_config_option(config_name) + self._reset_original_values(config_name, orig_value) + + def test_create_cant_update_at_runtime(self): + config_name = 'public_bind_addr' # not updatable + config_value = [{'section': 'global', 'value': 'true'}] + orig_value = self._get_config_by_name(config_name) + + # try to set config option and check if it fails + self._post('/api/cluster_conf', { + 'name': config_name, + 'value': config_value + }) + self.assertStatus(400) + self.assertError(code='config_option_not_updatable_at_runtime', + component='cluster_configuration', + detail='Config option {} is/are not updatable at runtime'.format( + config_name)) + + # check if config option value is still the original one + result = self._wait_for_expected_get_result(self._get_config_by_name, config_name, + orig_value) + self.assertEqual(result, orig_value) + + def test_create_two_values(self): + config_name = 'debug_ms' + orig_value = self._get_config_by_name(config_name) + + # remove all existing settings for equal preconditions + self._clear_all_values_for_config_option(config_name) + + expected_result = [{'section': 'mon', 'value': '0/3'}, + {'section': 'osd', 'value': '0/5'}] + + self._post('/api/cluster_conf', { + 'name': config_name, + 'value': expected_result + }) + self.assertStatus(201) + result = self._wait_for_expected_get_result(self._get_config_by_name, config_name, + expected_result) + self.assertEqual(result, expected_result) + + # reset original value + self._clear_all_values_for_config_option(config_name) + self._reset_original_values(config_name, orig_value) + + def test_create_can_handle_none_values(self): + config_name = 'debug_ms' + orig_value = self._get_config_by_name(config_name) + + # remove all existing settings for equal preconditions + self._clear_all_values_for_config_option(config_name) + + self._post('/api/cluster_conf', { + 'name': config_name, + 'value': [{'section': 'mon', 'value': '0/3'}, + {'section': 'osd', 'value': None}] + }) + self.assertStatus(201) + + expected_result = [{'section': 'mon', 'value': '0/3'}] + result = self._wait_for_expected_get_result(self._get_config_by_name, config_name, + expected_result) + self.assertEqual(result, expected_result) + + # reset original value + self._clear_all_values_for_config_option(config_name) + self._reset_original_values(config_name, orig_value) + + def test_create_can_handle_boolean_values(self): + config_name = 'mon_allow_pool_delete' + orig_value = self._get_config_by_name(config_name) + + # remove all existing settings for equal preconditions + self._clear_all_values_for_config_option(config_name) + + expected_result = [{'section': 'mon', 'value': 'true'}] + + self._post('/api/cluster_conf', { + 'name': config_name, + 'value': [{'section': 'mon', 'value': True}]}) + self.assertStatus(201) + + result = self._wait_for_expected_get_result(self._get_config_by_name, config_name, + expected_result) + self.assertEqual(result, expected_result) + + # reset original value + self._clear_all_values_for_config_option(config_name) + self._reset_original_values(config_name, orig_value) + + def test_bulk_set(self): + expected_result = { + 'osd_max_backfills': {'section': 'osd', 'value': '1'}, + 'osd_recovery_max_active': {'section': 'osd', 'value': '3'}, + 'osd_recovery_max_single_start': {'section': 'osd', 'value': '1'}, + 'osd_recovery_sleep': {'section': 'osd', 'value': '2.000000'} + } + orig_values = dict() + + for config_name in expected_result: + orig_values[config_name] = self._get_config_by_name(config_name) + + # remove all existing settings for equal preconditions + self._clear_all_values_for_config_option(config_name) + + self._put('/api/cluster_conf', {'options': expected_result}) + self.assertStatus(200) + + for config_name, value in expected_result.items(): + result = self._wait_for_expected_get_result(self._get_config_by_name, config_name, + [value]) + self.assertEqual(result, [value]) + + # reset original value + self._clear_all_values_for_config_option(config_name) + self._reset_original_values(config_name, orig_values[config_name]) + + def test_bulk_set_cant_update_at_runtime(self): + config_options = { + 'public_bind_addr': {'section': 'global', 'value': '1.2.3.4:567'}, # not updatable + 'public_network': {'section': 'global', 'value': '10.0.0.0/8'} # not updatable + } + orig_values = dict() + + for config_name in config_options: + orig_values[config_name] = self._get_config_by_name(config_name) + + # try to set config options and see if it fails + self._put('/api/cluster_conf', {'options': config_options}) + self.assertStatus(400) + self.assertError(code='config_option_not_updatable_at_runtime', + component='cluster_configuration', + detail='Config option {} is/are not updatable at runtime'.format( + ', '.join(config_options.keys()))) + + # check if config option values are still the original ones + for config_name, value in orig_values.items(): + result = self._wait_for_expected_get_result(self._get_config_by_name, config_name, + value) + self.assertEqual(result, value) + + def test_bulk_set_cant_update_at_runtime_partial(self): + config_options = { + 'public_bind_addr': {'section': 'global', 'value': 'true'}, # not updatable + 'log_to_stderr': {'section': 'global', 'value': 'true'} # updatable + } + orig_values = dict() + + for config_name in config_options: + orig_values[config_name] = self._get_config_by_name(config_name) + + # try to set config options and see if it fails + self._put('/api/cluster_conf', {'options': config_options}) + self.assertStatus(400) + self.assertError(code='config_option_not_updatable_at_runtime', + component='cluster_configuration', + detail='Config option {} is/are not updatable at runtime'.format( + 'public_bind_addr')) + + # check if config option values are still the original ones + for config_name, value in orig_values.items(): + result = self._wait_for_expected_get_result(self._get_config_by_name, config_name, + value) + self.assertEqual(result, value) + + def test_check_existence(self): + """ + This test case is intended to check the existence of all hard coded config options used by + the dashboard. + If you include further hard coded options in the dashboard, feel free to add them to the + list. + """ + hard_coded_options = [ + 'osd_max_backfills', # osd-recv-speed + 'osd_recovery_max_active', # osd-recv-speed + 'osd_recovery_max_single_start', # osd-recv-speed + 'osd_recovery_sleep', # osd-recv-speed + 'osd_scrub_during_recovery', # osd-pg-scrub + 'osd_scrub_begin_hour', # osd-pg-scrub + 'osd_scrub_end_hour', # osd-pg-scrub + 'osd_scrub_begin_week_day', # osd-pg-scrub + 'osd_scrub_end_week_day', # osd-pg-scrub + 'osd_scrub_min_interval', # osd-pg-scrub + 'osd_scrub_max_interval', # osd-pg-scrub + 'osd_deep_scrub_interval', # osd-pg-scrub + 'osd_scrub_auto_repair', # osd-pg-scrub + 'osd_max_scrubs', # osd-pg-scrub + 'osd_scrub_priority', # osd-pg-scrub + 'osd_scrub_sleep', # osd-pg-scrub + 'osd_scrub_auto_repair_num_errors', # osd-pg-scrub + 'osd_debug_deep_scrub_sleep', # osd-pg-scrub + 'osd_deep_scrub_keys', # osd-pg-scrub + 'osd_deep_scrub_large_omap_object_key_threshold', # osd-pg-scrub + 'osd_deep_scrub_large_omap_object_value_sum_threshold', # osd-pg-scrub + 'osd_deep_scrub_randomize_ratio', # osd-pg-scrub + 'osd_deep_scrub_stride', # osd-pg-scrub + 'osd_deep_scrub_update_digest_min_age', # osd-pg-scrub + 'osd_op_queue_mclock_scrub_lim', # osd-pg-scrub + 'osd_op_queue_mclock_scrub_res', # osd-pg-scrub + 'osd_op_queue_mclock_scrub_wgt', # osd-pg-scrub + 'osd_requested_scrub_priority', # osd-pg-scrub + 'osd_scrub_backoff_ratio', # osd-pg-scrub + 'osd_scrub_chunk_max', # osd-pg-scrub + 'osd_scrub_chunk_min', # osd-pg-scrub + 'osd_scrub_cost', # osd-pg-scrub + 'osd_scrub_interval_randomize_ratio', # osd-pg-scrub + 'osd_scrub_invalid_stats', # osd-pg-scrub + 'osd_scrub_load_threshold', # osd-pg-scrub + 'osd_scrub_max_preemptions', # osd-pg-scrub + 'mon_allow_pool_delete' # pool-list + ] + + for config_option in hard_coded_options: + self._get('/api/cluster_conf/{}'.format(config_option)) + self.assertStatus(200) + + def _validate_single(self, data): + self.assertIn('name', data) + self.assertIn('daemon_default', data) + self.assertIn('long_desc', data) + self.assertIn('level', data) + self.assertIn('default', data) + self.assertIn('see_also', data) + self.assertIn('tags', data) + self.assertIn('min', data) + self.assertIn('max', data) + self.assertIn('services', data) + self.assertIn('type', data) + self.assertIn('desc', data) + self.assertIn(data['type'], ['str', 'bool', 'float', 'int', 'size', 'uint', 'addr', 'uuid', + 'secs']) + + if 'value' in data: + self.assertIn('source', data) + self.assertIsInstance(data['value'], list) + + for entry in data['value']: + self.assertIsInstance(entry, dict) + self.assertIn('section', entry) + self.assertIn('value', entry) + + def _wait_for_expected_get_result(self, get_func, get_params, expected_result, max_attempts=30, + sleep_time=1): + attempts = 0 + while attempts < max_attempts: + get_result = get_func(get_params) + if get_result == expected_result: + self.assertStatus(200) + return get_result + + time.sleep(sleep_time) + attempts += 1 + + def _get_config_by_name(self, conf_name): + data = self._get('/api/cluster_conf/{}'.format(conf_name)) + if 'value' in data: + return data['value'] + return None + + def _clear_all_values_for_config_option(self, config_name): + values = self._get_config_by_name(config_name) + if values: + for value in values: + self._ceph_cmd(['config', 'rm', value['section'], config_name]) + + def _reset_original_values(self, config_name, orig_values): + if orig_values: + for value in orig_values: + self._ceph_cmd(['config', 'set', value['section'], config_name, value['value']]) diff --git a/qa/tasks/mgr/dashboard/test_erasure_code_profile.py b/qa/tasks/mgr/dashboard/test_erasure_code_profile.py new file mode 100644 index 00000000..9fcce30f --- /dev/null +++ b/qa/tasks/mgr/dashboard/test_erasure_code_profile.py @@ -0,0 +1,110 @@ +# -*- coding: utf-8 -*- + +from __future__ import absolute_import + +import six + +from .helper import DashboardTestCase, JObj, JList + + +class ECPTest(DashboardTestCase): + + AUTH_ROLES = ['pool-manager'] + + @DashboardTestCase.RunAs('test', 'test', ['rgw-manager']) + def test_read_access_permissions(self): + self._get('/api/erasure_code_profile') + self.assertStatus(403) + + @DashboardTestCase.RunAs('test', 'test', ['read-only']) + def test_write_access_permissions(self): + self._get('/api/erasure_code_profile') + self.assertStatus(200) + data = {'name': 'ecp32', 'k': 3, 'm': 2} + self._post('/api/erasure_code_profile', data) + self.assertStatus(403) + self._delete('/api/erasure_code_profile/default') + self.assertStatus(403) + + @classmethod + def tearDownClass(cls): + super(ECPTest, cls).tearDownClass() + cls._ceph_cmd(['osd', 'erasure-code-profile', 'rm', 'ecp32']) + cls._ceph_cmd(['osd', 'erasure-code-profile', 'rm', 'lrc']) + + def test_list(self): + data = self._get('/api/erasure_code_profile') + self.assertStatus(200) + + default = [p for p in data if p['name'] == 'default'] + if default: + default_ecp = { + 'k': 2, + 'technique': 'reed_sol_van', + 'm': 1, + 'name': 'default', + 'plugin': 'jerasure' + } + if 'crush-failure-domain' in default[0]: + default_ecp['crush-failure-domain'] = default[0]['crush-failure-domain'] + self.assertSubset(default_ecp, default[0]) + get_data = self._get('/api/erasure_code_profile/default') + self.assertEqual(get_data, default[0]) + + + def test_create(self): + data = {'name': 'ecp32', 'k': 3, 'm': 2} + self._post('/api/erasure_code_profile', data) + self.assertStatus(201) + + self._get('/api/erasure_code_profile/ecp32') + self.assertJsonSubset({ + 'crush-device-class': '', + 'crush-failure-domain': 'osd', + 'crush-root': 'default', + 'jerasure-per-chunk-alignment': 'false', + 'k': 3, + 'm': 2, + 'name': 'ecp32', + 'plugin': 'jerasure', + 'technique': 'reed_sol_van', + }) + + self.assertStatus(200) + + self._delete('/api/erasure_code_profile/ecp32') + self.assertStatus(204) + + def test_create_plugin(self): + data = {'name': 'lrc', 'k': '2', 'm': '2', 'l': '2', 'plugin': 'lrc'} + self._post('/api/erasure_code_profile', data) + self.assertJsonBody(None) + self.assertStatus(201) + + self._get('/api/erasure_code_profile/lrc') + self.assertJsonBody({ + 'crush-device-class': '', + 'crush-failure-domain': 'host', + 'crush-root': 'default', + 'k': 2, + 'l': '2', + 'm': 2, + 'name': 'lrc', + 'plugin': 'lrc' + }) + + self.assertStatus(200) + + self._delete('/api/erasure_code_profile/lrc') + self.assertStatus(204) + + def test_ecp_info(self): + self._get('/api/erasure_code_profile/_info') + self.assertSchemaBody(JObj({ + 'names': JList(six.string_types), + 'failure_domains': JList(six.string_types), + 'plugins': JList(six.string_types), + 'devices': JList(six.string_types), + 'directory': six.string_types, + })) + diff --git a/qa/tasks/mgr/dashboard/test_ganesha.py b/qa/tasks/mgr/dashboard/test_ganesha.py new file mode 100644 index 00000000..b90bb4af --- /dev/null +++ b/qa/tasks/mgr/dashboard/test_ganesha.py @@ -0,0 +1,168 @@ +# -*- coding: utf-8 -*- +# pylint: disable=too-many-public-methods + +from __future__ import absolute_import + + +from .helper import DashboardTestCase + + +class GaneshaTest(DashboardTestCase): + CEPHFS = True + AUTH_ROLES = ['pool-manager', 'ganesha-manager'] + + @classmethod + def create_pool(cls, name, pg_num, pool_type, application='rbd'): + data = { + 'pool': name, + 'pg_num': pg_num, + 'pool_type': pool_type, + 'application_metadata': [application] + } + if pool_type == 'erasure': + data['flags'] = ['ec_overwrites'] + cls._task_post("/api/pool", data) + + @classmethod + def setUpClass(cls): + super(GaneshaTest, cls).setUpClass() + cls.create_pool('ganesha', 2**2, 'replicated') + cls._rados_cmd(['-p', 'ganesha', '-N', 'ganesha1', 'create', 'conf-node1']) + cls._rados_cmd(['-p', 'ganesha', '-N', 'ganesha1', 'create', 'conf-node2']) + cls._rados_cmd(['-p', 'ganesha', '-N', 'ganesha1', 'create', 'conf-node3']) + cls._rados_cmd(['-p', 'ganesha', '-N', 'ganesha2', 'create', 'conf-node1']) + cls._rados_cmd(['-p', 'ganesha', '-N', 'ganesha2', 'create', 'conf-node2']) + cls._rados_cmd(['-p', 'ganesha', '-N', 'ganesha2', 'create', 'conf-node3']) + cls._ceph_cmd(['dashboard', 'set-ganesha-clusters-rados-pool-namespace', 'cluster1:ganesha/ganesha1,cluster2:ganesha/ganesha2']) + + # RGW setup + cls._radosgw_admin_cmd([ + 'user', 'create', '--uid', 'admin', '--display-name', 'admin', + '--system', '--access-key', 'admin', '--secret', 'admin' + ]) + cls._ceph_cmd_with_secret(['dashboard', 'set-rgw-api-secret-key'], 'admin') + cls._ceph_cmd_with_secret(['dashboard', 'set-rgw-api-access-key'], 'admin') + + @classmethod + def tearDownClass(cls): + super(GaneshaTest, cls).tearDownClass() + cls._radosgw_admin_cmd(['user', 'rm', '--uid', 'admin', '--purge-data']) + cls._ceph_cmd(['osd', 'pool', 'delete', 'ganesha', 'ganesha', '--yes-i-really-really-mean-it']) + + @DashboardTestCase.RunAs('test', 'test', [{'rbd-image': ['create', 'update', 'delete']}]) + def test_read_access_permissions(self): + self._get('/api/nfs-ganesha/export') + self.assertStatus(403) + + def test_list_daemons(self): + daemons = self._get("/api/nfs-ganesha/daemon") + self.assertEqual(len(daemons), 6) + daemons = [(d['daemon_id'], d['cluster_id']) for d in daemons] + self.assertIn(('node1', 'cluster1'), daemons) + self.assertIn(('node2', 'cluster1'), daemons) + self.assertIn(('node3', 'cluster1'), daemons) + self.assertIn(('node1', 'cluster2'), daemons) + self.assertIn(('node2', 'cluster2'), daemons) + self.assertIn(('node3', 'cluster2'), daemons) + + @classmethod + def create_export(cls, path, cluster_id, daemons, fsal, sec_label_xattr=None): + if fsal == 'CEPH': + fsal = {"name": "CEPH", "user_id":"admin", "fs_name": None, "sec_label_xattr": sec_label_xattr} + pseudo = "/cephfs{}".format(path) + else: + fsal = {"name": "RGW", "rgw_user_id": "admin"} + pseudo = "/rgw/{}".format(path if path[0] != '/' else "") + ex_json = { + "path": path, + "fsal": fsal, + "cluster_id": cluster_id, + "daemons": ["node1", "node3"], + "pseudo": pseudo, + "tag": None, + "access_type": "RW", + "squash": "no_root_squash", + "security_label": sec_label_xattr is not None, + "protocols": [4], + "transports": ["TCP"], + "clients": [{ + "addresses":["10.0.0.0/8"], + "access_type": "RO", + "squash": "root" + }] + } + return cls._task_post('/api/nfs-ganesha/export', ex_json) + + def tearDown(self): + super(GaneshaTest, self).tearDown() + exports = self._get("/api/nfs-ganesha/export") + if self._resp.status_code != 200: + return + self.assertIsInstance(exports, list) + for exp in exports: + self._task_delete("/api/nfs-ganesha/export/{}/{}" + .format(exp['cluster_id'], exp['export_id'])) + + def test_create_export(self): + exports = self._get("/api/nfs-ganesha/export") + self.assertEqual(len(exports), 0) + + data = self.create_export("/foo", 'cluster1', ['node1', 'node2'], 'CEPH', "security.selinux") + + exports = self._get("/api/nfs-ganesha/export") + self.assertEqual(len(exports), 1) + self.assertDictEqual(exports[0], data) + return data + + def test_update_export(self): + export = self.test_create_export() + export['access_type'] = 'RO' + export['daemons'] = ['node1', 'node3'] + export['security_label'] = True + data = self._task_put('/api/nfs-ganesha/export/{}/{}' + .format(export['cluster_id'], export['export_id']), + export) + exports = self._get("/api/nfs-ganesha/export") + self.assertEqual(len(exports), 1) + self.assertDictEqual(exports[0], data) + self.assertEqual(exports[0]['daemons'], ['node1', 'node3']) + self.assertEqual(exports[0]['security_label'], True) + + def test_delete_export(self): + export = self.test_create_export() + self._task_delete("/api/nfs-ganesha/export/{}/{}" + .format(export['cluster_id'], export['export_id'])) + self.assertStatus(204) + + def test_get_export(self): + exports = self._get("/api/nfs-ganesha/export") + self.assertEqual(len(exports), 0) + + data1 = self.create_export("/foo", 'cluster2', ['node1', 'node2'], 'CEPH') + data2 = self.create_export("mybucket", 'cluster2', ['node2', 'node3'], 'RGW') + + export1 = self._get("/api/nfs-ganesha/export/cluster2/1") + self.assertDictEqual(export1, data1) + + export2 = self._get("/api/nfs-ganesha/export/cluster2/2") + self.assertDictEqual(export2, data2) + + def test_invalid_status(self): + self._ceph_cmd(['dashboard', 'set-ganesha-clusters-rados-pool-namespace', '']) + + data = self._get('/api/nfs-ganesha/status') + self.assertStatus(200) + self.assertIn('available', data) + self.assertIn('message', data) + self.assertFalse(data['available']) + self.assertIn('Ganesha config location is not configured. Please set the GANESHA_RADOS_POOL_NAMESPACE setting.', + data['message']) + + self._ceph_cmd(['dashboard', 'set-ganesha-clusters-rados-pool-namespace', 'cluster1:ganesha/ganesha1,cluster2:ganesha/ganesha2']) + + def test_valid_status(self): + data = self._get('/api/nfs-ganesha/status') + self.assertStatus(200) + self.assertIn('available', data) + self.assertIn('message', data) + self.assertTrue(data['available']) diff --git a/qa/tasks/mgr/dashboard/test_health.py b/qa/tasks/mgr/dashboard/test_health.py new file mode 100644 index 00000000..a9334edc --- /dev/null +++ b/qa/tasks/mgr/dashboard/test_health.py @@ -0,0 +1,305 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import + +from .helper import DashboardTestCase, JAny, JLeaf, JList, JObj + + +class HealthTest(DashboardTestCase): + CEPHFS = True + + __pg_info_schema = JObj({ + 'object_stats': JObj({ + 'num_objects': int, + 'num_object_copies': int, + 'num_objects_degraded': int, + 'num_objects_misplaced': int, + 'num_objects_unfound': int + }), + 'pgs_per_osd': float, + 'statuses': JObj({}, allow_unknown=True, unknown_schema=int) + }) + + __mdsmap_schema = JObj({ + 'session_autoclose': int, + 'balancer': str, + 'up': JObj({}, allow_unknown=True), + 'last_failure_osd_epoch': int, + 'in': JList(int), + 'last_failure': int, + 'max_file_size': int, + 'explicitly_allowed_features': int, + 'damaged': JList(int), + 'tableserver': int, + 'failed': JList(int), + 'metadata_pool': int, + 'epoch': int, + 'stopped': JList(int), + 'max_mds': int, + 'compat': JObj({ + 'compat': JObj({}, allow_unknown=True), + 'ro_compat': JObj({}, allow_unknown=True), + 'incompat': JObj({}, allow_unknown=True) + }), + 'min_compat_client': str, + 'data_pools': JList(int), + 'info': JObj({}, allow_unknown=True), + 'fs_name': str, + 'created': str, + 'standby_count_wanted': int, + 'enabled': bool, + 'modified': str, + 'session_timeout': int, + 'flags': int, + 'ever_allowed_features': int, + 'root': int + }) + + def test_minimal_health(self): + data = self._get('/api/health/minimal') + self.assertStatus(200) + schema = JObj({ + 'client_perf': JObj({ + 'read_bytes_sec': int, + 'read_op_per_sec': int, + 'recovering_bytes_per_sec': int, + 'write_bytes_sec': int, + 'write_op_per_sec': int + }), + 'df': JObj({ + 'stats': JObj({ + 'total_avail_bytes': int, + 'total_bytes': int, + 'total_used_raw_bytes': int, + }) + }), + 'fs_map': JObj({ + 'filesystems': JList( + JObj({ + 'mdsmap': self.__mdsmap_schema + }), + ), + 'standbys': JList(JObj({}, allow_unknown=True)), + }), + 'health': JObj({ + 'checks': JList(str), + 'status': str, + }), + 'hosts': int, + 'iscsi_daemons': int, + 'mgr_map': JObj({ + 'active_name': str, + 'standbys': JList(JLeaf(dict)) + }), + 'mon_status': JObj({ + 'monmap': JObj({ + 'mons': JList(JLeaf(dict)), + }), + 'quorum': JList(int) + }), + 'osd_map': JObj({ + 'osds': JList( + JObj({ + 'in': int, + 'up': int, + })), + }), + 'pg_info': self.__pg_info_schema, + 'pools': JList(JLeaf(dict)), + 'rgw': int, + 'scrub_status': str + }) + self.assertSchema(data, schema) + + def test_full_health(self): + data = self._get('/api/health/full') + self.assertStatus(200) + module_info_schema = JObj({ + 'can_run': bool, + 'error_string': str, + 'name': str, + 'module_options': JObj( + {}, + allow_unknown=True, + unknown_schema=JObj({ + 'name': str, + 'type': str, + 'level': str, + 'flags': int, + 'default_value': str, + 'min': str, + 'max': str, + 'enum_allowed': JList(str), + 'see_also': JList(str), + 'desc': str, + 'long_desc': str, + 'tags': JList(str), + })), + }) + schema = JObj({ + 'client_perf': JObj({ + 'read_bytes_sec': int, + 'read_op_per_sec': int, + 'recovering_bytes_per_sec': int, + 'write_bytes_sec': int, + 'write_op_per_sec': int + }), + 'df': JObj({ + 'pools': JList(JObj({ + 'stats': JObj({ + 'stored': int, + 'objects': int, + 'kb_used': int, + 'bytes_used': int, + 'percent_used': float, + 'max_avail': int, + 'quota_objects': int, + 'quota_bytes': int, + 'dirty': int, + 'rd': int, + 'rd_bytes': int, + 'wr': int, + 'wr_bytes': int, + 'compress_bytes_used': int, + 'compress_under_bytes': int, + 'stored_raw': int, + 'avail_raw': int + }), + 'name': str, + 'id': int + })), + 'stats': JObj({ + 'total_avail_bytes': int, + 'total_bytes': int, + 'total_used_bytes': int, + 'total_used_raw_bytes': int, + 'total_used_raw_ratio': float, + 'num_osds': int, + 'num_per_pool_osds': int + }) + }), + 'fs_map': JObj({ + 'compat': JObj({ + 'compat': JObj({}, allow_unknown=True, unknown_schema=str), + 'incompat': JObj( + {}, allow_unknown=True, unknown_schema=str), + 'ro_compat': JObj( + {}, allow_unknown=True, unknown_schema=str) + }), + 'default_fscid': int, + 'epoch': int, + 'feature_flags': JObj( + {}, allow_unknown=True, unknown_schema=bool), + 'filesystems': JList( + JObj({ + 'id': int, + 'mdsmap': self.__mdsmap_schema + }), + ), + 'standbys': JList(JObj({}, allow_unknown=True)), + }), + 'health': JObj({ + 'checks': JList(str), + 'status': str, + }), + 'hosts': int, + 'iscsi_daemons': int, + 'mgr_map': JObj({ + 'active_addr': str, + 'active_addrs': JObj({ + 'addrvec': JList(JObj({ + 'addr': str, + 'nonce': int, + 'type': str + })) + }), + 'active_change': str, # timestamp + 'active_gid': int, + 'active_name': str, + 'always_on_modules': JObj( + {}, + allow_unknown=True, unknown_schema=JList(str) + ), + 'available': bool, + 'available_modules': JList(module_info_schema), + 'epoch': int, + 'modules': JList(str), + 'services': JObj( + {'dashboard': str}, # This module should always be present + allow_unknown=True, unknown_schema=str + ), + 'standbys': JList(JObj({ + 'available_modules': JList(module_info_schema), + 'gid': int, + 'name': str + })) + }), + 'mon_status': JObj({ + 'election_epoch': int, + 'extra_probe_peers': JList(JAny(none=True)), + 'feature_map': JObj( + {}, allow_unknown=True, unknown_schema=JList(JObj({ + 'features': str, + 'num': int, + 'release': str + })) + ), + 'features': JObj({ + 'quorum_con': str, + 'quorum_mon': JList(str), + 'required_con': str, + 'required_mon': JList(str) + }), + 'monmap': JObj({ + # TODO: expand on monmap schema + 'mons': JList(JLeaf(dict)), + }, allow_unknown=True), + 'name': str, + 'outside_quorum': JList(int), + 'quorum': JList(int), + 'quorum_age': int, + 'rank': int, + 'state': str, + # TODO: What type should be expected here? + 'sync_provider': JList(JAny(none=True)) + }), + 'osd_map': JObj({ + # TODO: define schema for crush map and osd_metadata, among + # others + 'osds': JList( + JObj({ + 'in': int, + 'up': int, + }, allow_unknown=True)), + }, allow_unknown=True), + 'pg_info': self.__pg_info_schema, + 'pools': JList(JLeaf(dict)), + 'rgw': int, + 'scrub_status': str + }) + self.assertSchema(data, schema) + + cluster_pools = self.ceph_cluster.mon_manager.list_pools() + self.assertEqual(len(cluster_pools), len(data['pools'])) + for pool in data['pools']: + self.assertIn(pool['pool_name'], cluster_pools) + + @DashboardTestCase.RunAs('test', 'test', ['pool-manager']) + def test_health_permissions(self): + data = self._get('/api/health/full') + self.assertStatus(200) + + schema = JObj({ + 'client_perf': JObj({}, allow_unknown=True), + 'df': JObj({}, allow_unknown=True), + 'health': JObj({ + 'checks': JList(str), + 'status': str + }), + 'pools': JList(JLeaf(dict)), + }) + self.assertSchema(data, schema) + + cluster_pools = self.ceph_cluster.mon_manager.list_pools() + self.assertEqual(len(cluster_pools), len(data['pools'])) + for pool in data['pools']: + self.assertIn(pool['pool_name'], cluster_pools) diff --git a/qa/tasks/mgr/dashboard/test_host.py b/qa/tasks/mgr/dashboard/test_host.py new file mode 100644 index 00000000..f9acbeff --- /dev/null +++ b/qa/tasks/mgr/dashboard/test_host.py @@ -0,0 +1,31 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import + +from .helper import DashboardTestCase, JList, JObj + + +class HostControllerTest(DashboardTestCase): + + AUTH_ROLES = ['read-only'] + + @DashboardTestCase.RunAs('test', 'test', ['block-manager']) + def test_access_permissions(self): + self._get('/api/host') + self.assertStatus(403) + + def test_host_list(self): + data = self._get('/api/host') + self.assertStatus(200) + + for server in data: + self.assertIn('services', server) + self.assertIn('hostname', server) + self.assertIn('ceph_version', server) + self.assertIsNotNone(server['hostname']) + self.assertIsNotNone(server['ceph_version']) + self.assertGreaterEqual(len(server['services']), 1) + for service in server['services']: + self.assertIn('type', service) + self.assertIn('id', service) + self.assertIsNotNone(service['type']) + self.assertIsNotNone(service['id']) diff --git a/qa/tasks/mgr/dashboard/test_logs.py b/qa/tasks/mgr/dashboard/test_logs.py new file mode 100644 index 00000000..17d5d830 --- /dev/null +++ b/qa/tasks/mgr/dashboard/test_logs.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import + +from .helper import DashboardTestCase, JList, JObj + + +class LogsTest(DashboardTestCase): + CEPHFS = True + + def test_logs(self): + data = self._get("/api/logs/all") + self.assertStatus(200) + log_entry_schema = JList(JObj({ + 'addrs': JObj({ + 'addrvec': JList(JObj({ + 'addr': str, + 'nonce': int, + 'type': str + })) + }), + 'channel': str, + 'message': str, + 'name': str, + 'priority': str, + 'rank': str, + 'seq': int, + 'stamp': str + })) + schema = JObj({ + 'audit_log': log_entry_schema, + 'clog': log_entry_schema + }) + self.assertSchema(data, schema) + + @DashboardTestCase.RunAs('test', 'test', ['pool-manager']) + def test_log_perms(self): + self._get("/api/logs/all") + self.assertStatus(403) diff --git a/qa/tasks/mgr/dashboard/test_mgr_module.py b/qa/tasks/mgr/dashboard/test_mgr_module.py new file mode 100644 index 00000000..080b8b64 --- /dev/null +++ b/qa/tasks/mgr/dashboard/test_mgr_module.py @@ -0,0 +1,160 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import + +import logging +import requests + +from .helper import DashboardTestCase, JAny, JObj, JList, JLeaf + +logger = logging.getLogger(__name__) + + +class MgrModuleTestCase(DashboardTestCase): + MGRS_REQUIRED = 1 + + def wait_until_rest_api_accessible(self): + """ + Wait until the REST API is accessible. + """ + + def _check_connection(): + try: + # Try reaching an API endpoint successfully. + self._get('/api/mgr/module') + if self._resp.status_code == 200: + return True + except requests.ConnectionError: + pass + return False + + self.wait_until_true(_check_connection, timeout=30) + + +class MgrModuleTest(MgrModuleTestCase): + def test_list_disabled_module(self): + self._ceph_cmd(['mgr', 'module', 'disable', 'iostat']) + self.wait_until_rest_api_accessible() + data = self._get('/api/mgr/module') + self.assertStatus(200) + self.assertSchema( + data, + JList( + JObj(sub_elems={ + 'name': JLeaf(str), + 'enabled': JLeaf(bool), + 'always_on': JLeaf(bool), + 'options': JObj( + {}, + allow_unknown=True, + unknown_schema=JObj({ + 'name': str, + 'type': str, + 'level': str, + 'flags': int, + 'default_value': JAny(none=False), + 'min': JAny(none=False), + 'max': JAny(none=False), + 'enum_allowed': JList(str), + 'see_also': JList(str), + 'desc': str, + 'long_desc': str, + 'tags': JList(str) + })) + }))) + module_info = self.find_object_in_list('name', 'iostat', data) + self.assertIsNotNone(module_info) + self.assertFalse(module_info['enabled']) + + def test_list_enabled_module(self): + self._ceph_cmd(['mgr', 'module', 'enable', 'iostat']) + self.wait_until_rest_api_accessible() + data = self._get('/api/mgr/module') + self.assertStatus(200) + self.assertSchema( + data, + JList( + JObj(sub_elems={ + 'name': JLeaf(str), + 'enabled': JLeaf(bool), + 'always_on': JLeaf(bool), + 'options': JObj( + {}, + allow_unknown=True, + unknown_schema=JObj({ + 'name': str, + 'type': str, + 'level': str, + 'flags': int, + 'default_value': JAny(none=False), + 'min': JAny(none=False), + 'max': JAny(none=False), + 'enum_allowed': JList(str), + 'see_also': JList(str), + 'desc': str, + 'long_desc': str, + 'tags': JList(str) + })) + }))) + module_info = self.find_object_in_list('name', 'iostat', data) + self.assertIsNotNone(module_info) + self.assertTrue(module_info['enabled']) + + +class MgrModuleTelemetryTest(MgrModuleTestCase): + def test_get(self): + data = self._get('/api/mgr/module/telemetry') + self.assertStatus(200) + self.assertSchema( + data, + JObj( + allow_unknown=True, + sub_elems={ + 'channel_basic': bool, + 'channel_ident': bool, + 'channel_crash': bool, + 'channel_device': bool, + 'contact': str, + 'description': str, + 'enabled': bool, + 'interval': int, + 'last_opt_revision': int, + 'leaderboard': bool, + 'organization': str, + 'proxy': str, + 'url': str + })) + + def test_put(self): + self.set_config_key('config/mgr/mgr/telemetry/contact', '') + self.set_config_key('config/mgr/mgr/telemetry/description', '') + self.set_config_key('config/mgr/mgr/telemetry/enabled', 'True') + self.set_config_key('config/mgr/mgr/telemetry/interval', '72') + self.set_config_key('config/mgr/mgr/telemetry/leaderboard', 'False') + self.set_config_key('config/mgr/mgr/telemetry/organization', '') + self.set_config_key('config/mgr/mgr/telemetry/proxy', '') + self.set_config_key('config/mgr/mgr/telemetry/url', '') + self._put( + '/api/mgr/module/telemetry', + data={ + 'config': { + 'contact': 'tux@suse.com', + 'description': 'test', + 'enabled': False, + 'interval': 4711, + 'leaderboard': True, + 'organization': 'SUSE Linux', + 'proxy': 'foo', + 'url': 'https://foo.bar/report' + } + }) + self.assertStatus(200) + data = self._get('/api/mgr/module/telemetry') + self.assertStatus(200) + self.assertEqual(data['contact'], 'tux@suse.com') + self.assertEqual(data['description'], 'test') + self.assertFalse(data['enabled']) + self.assertEqual(data['interval'], 4711) + self.assertTrue(data['leaderboard']) + self.assertEqual(data['organization'], 'SUSE Linux') + self.assertEqual(data['proxy'], 'foo') + self.assertEqual(data['url'], 'https://foo.bar/report') diff --git a/qa/tasks/mgr/dashboard/test_monitor.py b/qa/tasks/mgr/dashboard/test_monitor.py new file mode 100644 index 00000000..0cf7e25a --- /dev/null +++ b/qa/tasks/mgr/dashboard/test_monitor.py @@ -0,0 +1,25 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import + +from .helper import DashboardTestCase + + +class MonitorTest(DashboardTestCase): + AUTH_ROLES = ['cluster-manager'] + + @DashboardTestCase.RunAs('test', 'test', ['block-manager']) + def test_access_permissions(self): + self._get('/api/monitor') + self.assertStatus(403) + + + def test_monitor_default(self): + data = self._get("/api/monitor") + self.assertStatus(200) + + self.assertIn('mon_status', data) + self.assertIn('in_quorum', data) + self.assertIn('out_quorum', data) + self.assertIsNotNone(data['mon_status']) + self.assertIsNotNone(data['in_quorum']) + self.assertIsNotNone(data['out_quorum']) diff --git a/qa/tasks/mgr/dashboard/test_osd.py b/qa/tasks/mgr/dashboard/test_osd.py new file mode 100644 index 00000000..c6c7c5aa --- /dev/null +++ b/qa/tasks/mgr/dashboard/test_osd.py @@ -0,0 +1,157 @@ +# -*- coding: utf-8 -*- + +from __future__ import absolute_import + +import json + +from .helper import DashboardTestCase, JObj, JAny, JList, JLeaf, JTuple + + +class OsdTest(DashboardTestCase): + + AUTH_ROLES = ['cluster-manager'] + + def tearDown(self): + self._post('/api/osd/0/mark_in') + + @DashboardTestCase.RunAs('test', 'test', ['block-manager']) + def test_access_permissions(self): + self._get('/api/osd') + self.assertStatus(403) + self._get('/api/osd/0') + self.assertStatus(403) + + def assert_in_and_not_none(self, data, properties): + self.assertSchema(data, JObj({p: JAny(none=False) for p in properties}, allow_unknown=True)) + + def test_list(self): + data = self._get('/api/osd') + self.assertStatus(200) + + self.assertGreaterEqual(len(data), 1) + data = data[0] + self.assert_in_and_not_none(data, ['host', 'tree', 'state', 'stats', 'stats_history']) + self.assert_in_and_not_none(data['host'], ['name']) + self.assert_in_and_not_none(data['tree'], ['id']) + self.assert_in_and_not_none(data['stats'], ['numpg', 'stat_bytes_used', 'stat_bytes', + 'op_r', 'op_w']) + self.assert_in_and_not_none(data['stats_history'], ['op_out_bytes', 'op_in_bytes']) + self.assertSchema(data['stats_history']['op_out_bytes'], + JList(JTuple([JLeaf(int), JLeaf(float)]))) + + def test_details(self): + data = self._get('/api/osd/0') + self.assertStatus(200) + self.assert_in_and_not_none(data, ['osd_metadata', 'histogram']) + self.assert_in_and_not_none(data['histogram'], ['osd']) + self.assert_in_and_not_none(data['histogram']['osd'], ['op_w_latency_in_bytes_histogram', + 'op_r_latency_out_bytes_histogram']) + + def test_scrub(self): + self._post('/api/osd/0/scrub?deep=False') + self.assertStatus(200) + + self._post('/api/osd/0/scrub?deep=True') + self.assertStatus(200) + + def test_mark_out_and_in(self): + self._post('/api/osd/0/mark_out') + self.assertStatus(200) + + self._post('/api/osd/0/mark_in') + self.assertStatus(200) + + def test_mark_down(self): + self._post('/api/osd/0/mark_down') + self.assertStatus(200) + + def test_reweight(self): + self._post('/api/osd/0/reweight', {'weight': 0.4}) + self.assertStatus(200) + + def get_reweight_value(): + self._get('/api/osd/0') + response = self.jsonBody() + if 'osd_map' in response and 'weight' in response['osd_map']: + return round(response['osd_map']['weight'], 1) + self.wait_until_equal(get_reweight_value, 0.4, 10) + self.assertStatus(200) + + # Undo + self._post('/api/osd/0/reweight', {'weight': 1}) + + def test_create_lost_destroy_remove(self): + # Create + self._post('/api/osd', { + 'uuid': 'f860ca2e-757d-48ce-b74a-87052cad563f', + 'svc_id': 5 + }) + self.assertStatus(201) + # Lost + self._post('/api/osd/5/mark_lost') + self.assertStatus(200) + # Destroy + self._post('/api/osd/5/destroy') + self.assertStatus(200) + # Purge + self._post('/api/osd/5/purge') + self.assertStatus(200) + + def test_safe_to_destroy(self): + osd_dump = json.loads(self._ceph_cmd(['osd', 'dump', '-f', 'json'])) + unused_osd_id = max(map(lambda e: e['osd'], osd_dump['osds'])) + 10 + self._get('/api/osd/{}/safe_to_destroy'.format(unused_osd_id)) + self.assertStatus(200) + self.assertJsonBody({ + 'is_safe_to_destroy': True, + 'active': [], + 'missing_stats': [], + 'safe_to_destroy': [unused_osd_id], + 'stored_pgs': [], + }) + + def get_destroy_status(): + self._get('/api/osd/0/safe_to_destroy') + if 'is_safe_to_destroy' in self.jsonBody(): + return self.jsonBody()['is_safe_to_destroy'] + return None + self.wait_until_equal(get_destroy_status, False, 10) + self.assertStatus(200) + + +class OsdFlagsTest(DashboardTestCase): + def __init__(self, *args, **kwargs): + super(OsdFlagsTest, self).__init__(*args, **kwargs) + self._initial_flags = sorted( # These flags cannot be unset + ['sortbitwise', 'recovery_deletes', 'purged_snapdirs', + 'pglog_hardlimit']) + + @classmethod + def _get_cluster_osd_flags(cls): + return sorted( + json.loads(cls._ceph_cmd(['osd', 'dump', + '--format=json']))['flags_set']) + + @classmethod + def _put_flags(cls, flags): + cls._put('/api/osd/flags', data={'flags': flags}) + return sorted(cls._resp.json()) + + def test_list_osd_flags(self): + flags = self._get('/api/osd/flags') + self.assertStatus(200) + self.assertEqual(len(flags), 4) + self.assertEqual(sorted(flags), self._initial_flags) + + def test_add_osd_flag(self): + flags = self._put_flags([ + 'sortbitwise', 'recovery_deletes', 'purged_snapdirs', 'noout', + 'pause', 'pglog_hardlimit' + ]) + self.assertEqual(flags, sorted([ + 'sortbitwise', 'recovery_deletes', 'purged_snapdirs', 'noout', + 'pause', 'pglog_hardlimit' + ])) + + # Restore flags + self._put_flags(self._initial_flags) diff --git a/qa/tasks/mgr/dashboard/test_perf_counters.py b/qa/tasks/mgr/dashboard/test_perf_counters.py new file mode 100644 index 00000000..c01368bc --- /dev/null +++ b/qa/tasks/mgr/dashboard/test_perf_counters.py @@ -0,0 +1,71 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import + +from .helper import DashboardTestCase, JObj + + +class PerfCountersControllerTest(DashboardTestCase): + + def test_perf_counters_list(self): + data = self._get('/api/perf_counters') + self.assertStatus(200) + + self.assertIsInstance(data, dict) + for mon in self.mons(): + self.assertIn('mon.{}'.format(mon), data) + + osds = self.ceph_cluster.mon_manager.get_osd_dump() + for osd in osds: + self.assertIn('osd.{}'.format(osd['osd']), data) + + def _validate_perf(self, srv_id, srv_type, data, allow_empty): + self.assertIsInstance(data, dict) + self.assertEqual(srv_type, data['service']['type']) + self.assertEqual(str(srv_id), data['service']['id']) + self.assertIsInstance(data['counters'], list) + if not allow_empty: + self.assertGreater(len(data['counters']), 0) + for counter in data['counters'][0:1]: + self.assertIsInstance(counter, dict) + self.assertIn('description', counter) + self.assertIn('name', counter) + self.assertIn('unit', counter) + self.assertIn('value', counter) + + def test_perf_counters_mon_get(self): + mon = self.mons()[0] + data = self._get('/api/perf_counters/mon/{}'.format(mon)) + self.assertStatus(200) + self._validate_perf(mon, 'mon', data, allow_empty=False) + + def test_perf_counters_mgr_get(self): + mgr = list(self.mgr_cluster.mgr_ids)[0] + data = self._get('/api/perf_counters/mgr/{}'.format(mgr)) + self.assertStatus(200) + self._validate_perf(mgr, 'mgr', data, allow_empty=False) + + def test_perf_counters_mds_get(self): + for mds in self.mds_cluster.mds_ids: + data = self._get('/api/perf_counters/mds/{}'.format(mds)) + self.assertStatus(200) + self._validate_perf(mds, 'mds', data, allow_empty=True) + + def test_perf_counters_osd_get(self): + for osd in self.ceph_cluster.mon_manager.get_osd_dump(): + osd = osd['osd'] + data = self._get('/api/perf_counters/osd/{}'.format(osd)) + self.assertStatus(200) + self._validate_perf(osd, 'osd', data, allow_empty=False) + + def test_perf_counters_not_found(self): + osds = self.ceph_cluster.mon_manager.get_osd_dump() + unused_id = int(list(map(lambda o: o['osd'], osds)).pop()) + 1 + + self._get('/api/perf_counters/osd/{}'.format(unused_id)) + self.assertStatus(404) + schema = JObj(sub_elems={ + 'status': str, + 'detail': str, + }, allow_unknown=True) + self.assertEqual(self._resp.json()['detail'], "'osd.{}' not found".format(unused_id)) + self.assertSchemaBody(schema) diff --git a/qa/tasks/mgr/dashboard/test_pool.py b/qa/tasks/mgr/dashboard/test_pool.py new file mode 100644 index 00000000..ae81f779 --- /dev/null +++ b/qa/tasks/mgr/dashboard/test_pool.py @@ -0,0 +1,364 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import + +import logging +import six +import time +from contextlib import contextmanager + +from .helper import DashboardTestCase, JAny, JList, JObj, JUnion + +log = logging.getLogger(__name__) + + +class PoolTest(DashboardTestCase): + AUTH_ROLES = ['pool-manager'] + + pool_schema = JObj(sub_elems={ + 'pool_name': str, + 'type': str, + 'application_metadata': JList(str), + 'flags': int, + 'flags_names': str, + }, allow_unknown=True) + + pool_list_stat_schema = JObj(sub_elems={ + 'latest': JUnion([int,float]), + 'rate': float, + 'rates': JList(JAny(none=False)), + }) + + pool_list_stats_schema = JObj(sub_elems={ + 'avail_raw': pool_list_stat_schema, + 'bytes_used': pool_list_stat_schema, + 'max_avail': pool_list_stat_schema, + 'percent_used': pool_list_stat_schema, + 'rd_bytes': pool_list_stat_schema, + 'wr_bytes': pool_list_stat_schema, + 'rd': pool_list_stat_schema, + 'wr': pool_list_stat_schema, + }, allow_unknown=True) + + pool_rbd_conf_schema = JList(JObj(sub_elems={ + 'name': str, + 'value': str, + 'source': int + })) + + @contextmanager + def __yield_pool(self, name=None, data=None, deletion_name=None): + """ + Use either just a name or whole description of a pool to create one. + This also validates the correct creation and deletion after the pool was used. + + :param name: Name of the pool + :param data: Describes the pool in full length + :param deletion_name: Only needed if the pool was renamed + :return: + """ + data = self._create_pool(name, data) + yield data + self._delete_pool(deletion_name or data['pool']) + + def _create_pool(self, name, data): + data = data or { + 'pool': name, + 'pg_num': '4', + 'pool_type': 'replicated', + 'compression_algorithm': 'snappy', + 'compression_mode': 'passive', + 'compression_max_blob_size': '131072', + 'compression_required_ratio': '0.875', + 'application_metadata': ['rbd'], + 'configuration': { + 'rbd_qos_bps_limit': 1024000, + 'rbd_qos_iops_limit': 5000, + } + } + self._task_post('/api/pool/', data) + self.assertStatus(201) + self._validate_pool_properties(data, self._get_pool(data['pool'])) + return data + + def _delete_pool(self, name): + self._task_delete('/api/pool/' + name) + self.assertStatus(204) + + def _validate_pool_properties(self, data, pool, timeout=DashboardTestCase.TIMEOUT_HEALTH_CLEAR): + for prop, value in data.items(): + if prop == 'pool_type': + self.assertEqual(pool['type'], value) + elif prop == 'size': + self.assertEqual(pool[prop], int(value), + '{}: {} != {}'.format(prop, pool[prop], value)) + elif prop == 'pg_num': + self._check_pg_num(value, pool) + elif prop == 'application_metadata': + self.assertIsInstance(pool[prop], list) + self.assertEqual(value, pool[prop]) + elif prop == 'pool': + self.assertEqual(pool['pool_name'], value) + elif prop.startswith('compression'): + if value is not None: + if prop.endswith('size'): + value = int(value) + elif prop.endswith('ratio'): + value = float(value) + self.assertEqual(pool['options'][prop], value) + else: + self.assertEqual(pool['options'], {}) + elif prop == 'configuration': + # configuration cannot really be checked here for two reasons: + # 1. The default value cannot be given to this method, which becomes relevant + # when resetting a value, because it's not always zero. + # 2. The expected `source` cannot be given to this method, and it cannot + # reliably be determined (see 1) + pass + else: + self.assertEqual(pool[prop], value, '{}: {} != {}'.format(prop, pool[prop], value)) + + self.wait_until_equal(self._get_health_status, 'HEALTH_OK', timeout) + + def _get_health_status(self): + return self._get('/api/health/minimal')['health']['status'] + + def _get_pool(self, pool_name): + pool = self._get("/api/pool/" + pool_name) + self.assertStatus(200) + self.assertSchemaBody(self.pool_schema) + return pool + + def _check_pg_num(self, value, pool): + """ + If both properties have not the same value, the cluster goes into a warning state, which + will only happen during a pg update on an existing pool. The test that does that is + currently commented out because our QA systems can't deal with the change. Feel free to test + it locally. + """ + pgp_prop = 'pg_placement_num' + t = 0 + while (int(value) != pool[pgp_prop] or self._get_health_status() != 'HEALTH_OK') \ + and t < 180: + time.sleep(2) + t += 2 + pool = self._get_pool(pool['pool_name']) + for p in ['pg_num', pgp_prop]: # Should have the same values + self.assertEqual(pool[p], int(value), '{}: {} != {}'.format(p, pool[p], value)) + + @DashboardTestCase.RunAs('test', 'test', [{'pool': ['create', 'update', 'delete']}]) + def test_read_access_permissions(self): + self._get('/api/pool') + self.assertStatus(403) + self._get('/api/pool/bla') + self.assertStatus(403) + + @DashboardTestCase.RunAs('test', 'test', [{'pool': ['read', 'update', 'delete']}]) + def test_create_access_permissions(self): + self._task_post('/api/pool/', {}) + self.assertStatus(403) + + @DashboardTestCase.RunAs('test', 'test', [{'pool': ['read', 'create', 'update']}]) + def test_delete_access_permissions(self): + self._delete('/api/pool/ddd') + self.assertStatus(403) + + def test_pool_list(self): + data = self._get("/api/pool") + self.assertStatus(200) + + cluster_pools = self.ceph_cluster.mon_manager.list_pools() + self.assertEqual(len(cluster_pools), len(data)) + self.assertSchemaBody(JList(self.pool_schema)) + for pool in data: + self.assertNotIn('pg_status', pool) + self.assertNotIn('stats', pool) + self.assertIn(pool['pool_name'], cluster_pools) + + def test_pool_list_attrs(self): + data = self._get("/api/pool?attrs=type,flags") + self.assertStatus(200) + + cluster_pools = self.ceph_cluster.mon_manager.list_pools() + self.assertEqual(len(cluster_pools), len(data)) + for pool in data: + self.assertIn('pool_name', pool) + self.assertIn('type', pool) + self.assertIn('flags', pool) + self.assertNotIn('flags_names', pool) + self.assertNotIn('pg_status', pool) + self.assertNotIn('stats', pool) + self.assertIn(pool['pool_name'], cluster_pools) + + def test_pool_list_stats(self): + data = self._get("/api/pool?stats=true") + self.assertStatus(200) + + cluster_pools = self.ceph_cluster.mon_manager.list_pools() + self.assertEqual(len(cluster_pools), len(data)) + self.assertSchemaBody(JList(self.pool_schema)) + for pool in data: + self.assertIn('pool_name', pool) + self.assertIn('type', pool) + self.assertIn('application_metadata', pool) + self.assertIn('flags', pool) + self.assertIn('pg_status', pool) + self.assertSchema(pool['stats'], self.pool_list_stats_schema) + self.assertIn('flags_names', pool) + self.assertIn(pool['pool_name'], cluster_pools) + + def test_pool_get(self): + cluster_pools = self.ceph_cluster.mon_manager.list_pools() + pool = self._get("/api/pool/{}?stats=true&attrs=type,flags,stats" + .format(cluster_pools[0])) + self.assertEqual(pool['pool_name'], cluster_pools[0]) + self.assertIn('type', pool) + self.assertIn('flags', pool) + self.assertNotIn('pg_status', pool) + self.assertSchema(pool['stats'], self.pool_list_stats_schema) + self.assertNotIn('flags_names', pool) + self.assertSchema(pool['configuration'], self.pool_rbd_conf_schema) + + def test_pool_create_with_two_applications(self): + self.__yield_pool(None, { + 'pool': 'dashboard_pool1', + 'pg_num': '8', + 'pool_type': 'replicated', + 'application_metadata': ['rbd', 'sth'], + }) + + def test_pool_create_with_ecp_and_rule(self): + self._ceph_cmd(['osd', 'crush', 'rule', 'create-erasure', 'ecrule']) + self._ceph_cmd( + ['osd', 'erasure-code-profile', 'set', 'ecprofile', 'crush-failure-domain=osd']) + self.__yield_pool(None, { + 'pool': 'dashboard_pool2', + 'pg_num': '8', + 'pool_type': 'erasure', + 'application_metadata': ['rbd'], + 'erasure_code_profile': 'ecprofile', + 'crush_rule': 'ecrule', + }) + self._ceph_cmd(['osd', 'erasure-code-profile', 'rm', 'ecprofile']) + + def test_pool_create_with_compression(self): + pool = { + 'pool': 'dashboard_pool3', + 'pg_num': '8', + 'pool_type': 'replicated', + 'compression_algorithm': 'zstd', + 'compression_mode': 'aggressive', + 'compression_max_blob_size': '10000000', + 'compression_required_ratio': '0.8', + 'configuration': { + 'rbd_qos_bps_limit': 2048, + 'rbd_qos_iops_limit': None, + }, + } + with self.__yield_pool(None, pool): + expected_configuration = [{ + 'name': 'rbd_qos_bps_limit', + 'source': 1, + 'value': '2048', + }, { + 'name': 'rbd_qos_iops_limit', + 'source': 0, + 'value': '0', + }] + new_pool = self._get_pool(pool['pool']) + for conf in expected_configuration: + self.assertIn(conf, new_pool['configuration']) + + def test_pool_update_metadata(self): + pool_name = 'pool_update_metadata' + with self.__yield_pool(pool_name): + props = {'application_metadata': ['rbd', 'sth']} + self._task_put('/api/pool/{}'.format(pool_name), props) + self._validate_pool_properties(props, self._get_pool(pool_name), + self.TIMEOUT_HEALTH_CLEAR * 2) + + properties = {'application_metadata': ['rgw']} + self._task_put('/api/pool/' + pool_name, properties) + self._validate_pool_properties(properties, self._get_pool(pool_name), + self.TIMEOUT_HEALTH_CLEAR * 2) + + properties = {'application_metadata': ['rbd', 'sth']} + self._task_put('/api/pool/' + pool_name, properties) + self._validate_pool_properties(properties, self._get_pool(pool_name), + self.TIMEOUT_HEALTH_CLEAR * 2) + + properties = {'application_metadata': ['rgw']} + self._task_put('/api/pool/' + pool_name, properties) + self._validate_pool_properties(properties, self._get_pool(pool_name), + self.TIMEOUT_HEALTH_CLEAR * 2) + + def test_pool_update_configuration(self): + pool_name = 'pool_update_configuration' + with self.__yield_pool(pool_name): + configuration = { + 'rbd_qos_bps_limit': 1024, + 'rbd_qos_iops_limit': None, + } + expected_configuration = [{ + 'name': 'rbd_qos_bps_limit', + 'source': 1, + 'value': '1024', + }, { + 'name': 'rbd_qos_iops_limit', + 'source': 0, + 'value': '0', + }] + self._task_put('/api/pool/' + pool_name, {'configuration': configuration}) + time.sleep(5) + pool_config = self._get_pool(pool_name)['configuration'] + for conf in expected_configuration: + self.assertIn(conf, pool_config) + + def test_pool_update_compression(self): + pool_name = 'pool_update_compression' + with self.__yield_pool(pool_name): + properties = { + 'compression_algorithm': 'zstd', + 'compression_mode': 'aggressive', + 'compression_max_blob_size': '10000000', + 'compression_required_ratio': '0.8', + } + self._task_put('/api/pool/' + pool_name, properties) + time.sleep(5) + self._validate_pool_properties(properties, self._get_pool(pool_name)) + + def test_pool_update_unset_compression(self): + pool_name = 'pool_update_unset_compression' + with self.__yield_pool(pool_name): + self._task_put('/api/pool/' + pool_name, {'compression_mode': 'unset'}) + time.sleep(5) + self._validate_pool_properties({ + 'compression_algorithm': None, + 'compression_mode': None, + 'compression_max_blob_size': None, + 'compression_required_ratio': None, + }, self._get_pool(pool_name)) + + def test_pool_create_fail(self): + data = {'pool_type': u'replicated', 'rule_name': u'dnf', 'pg_num': u'8', 'pool': u'sadfs'} + self._task_post('/api/pool/', data) + self.assertStatus(400) + self.assertJsonBody({ + 'component': 'pool', + 'code': "2", + 'detail': "[errno -2] specified rule dnf doesn't exist" + }) + + def test_pool_info(self): + self._get("/api/pool/_info") + self.assertSchemaBody(JObj({ + 'pool_names': JList(six.string_types), + 'compression_algorithms': JList(six.string_types), + 'compression_modes': JList(six.string_types), + 'is_all_bluestore': bool, + 'bluestore_compression_algorithm': six.string_types, + 'osd_count': int, + 'crush_rules_replicated': JList(JObj({}, allow_unknown=True)), + 'crush_rules_erasure': JList(JObj({}, allow_unknown=True)), + 'pg_autoscale_default_mode': six.string_types, + 'pg_autoscale_modes': JList(six.string_types), + })) diff --git a/qa/tasks/mgr/dashboard/test_rbd.py b/qa/tasks/mgr/dashboard/test_rbd.py new file mode 100644 index 00000000..68af93d9 --- /dev/null +++ b/qa/tasks/mgr/dashboard/test_rbd.py @@ -0,0 +1,797 @@ +# -*- coding: utf-8 -*- +# pylint: disable=too-many-public-methods + +from __future__ import absolute_import + +import time + +from .helper import DashboardTestCase, JObj, JLeaf, JList + + +class RbdTest(DashboardTestCase): + AUTH_ROLES = ['pool-manager', 'block-manager'] + + @classmethod + def create_pool(cls, name, pg_num, pool_type, application='rbd'): + data = { + 'pool': name, + 'pg_num': pg_num, + 'pool_type': pool_type, + 'application_metadata': [application] + } + if pool_type == 'erasure': + data['flags'] = ['ec_overwrites'] + cls._task_post("/api/pool", data) + + @DashboardTestCase.RunAs('test', 'test', [{'rbd-image': ['create', 'update', 'delete']}]) + def test_read_access_permissions(self): + self._get('/api/block/image') + self.assertStatus(403) + self._get('/api/block/image/pool/image') + self.assertStatus(403) + + @DashboardTestCase.RunAs('test', 'test', [{'rbd-image': ['read', 'update', 'delete']}]) + def test_create_access_permissions(self): + self.create_image('pool', 'name', 0) + self.assertStatus(403) + self.create_snapshot('pool', 'image', 'snapshot') + self.assertStatus(403) + self.copy_image('src_pool', 'src_image', 'dest_pool', 'dest_image') + self.assertStatus(403) + self.clone_image('parent_pool', 'parent_image', 'parent_snap', 'pool', 'name') + self.assertStatus(403) + + @DashboardTestCase.RunAs('test', 'test', [{'rbd-image': ['read', 'create', 'delete']}]) + def test_update_access_permissions(self): + self.edit_image('pool', 'image') + self.assertStatus(403) + self.update_snapshot('pool', 'image', 'snapshot', None, None) + self.assertStatus(403) + self._task_post('/api/block/image/rbd/rollback_img/snap/snap1/rollback') + self.assertStatus(403) + self.flatten_image('pool', 'image') + self.assertStatus(403) + + @DashboardTestCase.RunAs('test', 'test', [{'rbd-image': ['read', 'create', 'update']}]) + def test_delete_access_permissions(self): + self.remove_image('pool', 'image') + self.assertStatus(403) + self.remove_snapshot('pool', 'image', 'snapshot') + self.assertStatus(403) + + @classmethod + def create_image(cls, pool, name, size, **kwargs): + data = {'name': name, 'pool_name': pool, 'size': size} + data.update(kwargs) + return cls._task_post('/api/block/image', data) + + @classmethod + def clone_image(cls, parent_pool, parent_image, parent_snap, pool, name, + **kwargs): + # pylint: disable=too-many-arguments + data = {'child_image_name': name, 'child_pool_name': pool} + data.update(kwargs) + return cls._task_post('/api/block/image/{}/{}/snap/{}/clone' + .format(parent_pool, parent_image, parent_snap), + data) + + @classmethod + def copy_image(cls, src_pool, src_image, dest_pool, dest_image, **kwargs): + # pylint: disable=too-many-arguments + data = {'dest_image_name': dest_image, 'dest_pool_name': dest_pool} + data.update(kwargs) + return cls._task_post('/api/block/image/{}/{}/copy' + .format(src_pool, src_image), data) + + @classmethod + def remove_image(cls, pool, image): + return cls._task_delete('/api/block/image/{}/{}'.format(pool, image)) + + # pylint: disable=too-many-arguments + @classmethod + def edit_image(cls, pool, image, name=None, size=None, features=None, **kwargs): + kwargs.update({'name': name, 'size': size, 'features': features}) + return cls._task_put('/api/block/image/{}/{}'.format(pool, image), kwargs) + + @classmethod + def flatten_image(cls, pool, image): + return cls._task_post('/api/block/image/{}/{}/flatten'.format(pool, image)) + + @classmethod + def create_snapshot(cls, pool, image, snapshot): + return cls._task_post('/api/block/image/{}/{}/snap'.format(pool, image), + {'snapshot_name': snapshot}) + + @classmethod + def remove_snapshot(cls, pool, image, snapshot): + return cls._task_delete('/api/block/image/{}/{}/snap/{}'.format(pool, image, snapshot)) + + @classmethod + def update_snapshot(cls, pool, image, snapshot, new_name, is_protected): + return cls._task_put('/api/block/image/{}/{}/snap/{}'.format(pool, image, snapshot), + {'new_snap_name': new_name, 'is_protected': is_protected}) + + @classmethod + def setUpClass(cls): + super(RbdTest, cls).setUpClass() + cls.create_pool('rbd', 2**3, 'replicated') + cls.create_pool('rbd_iscsi', 2**3, 'replicated') + + cls.create_image('rbd', 'img1', 2**30) + cls.create_image('rbd', 'img2', 2*2**30) + cls.create_image('rbd_iscsi', 'img1', 2**30) + cls.create_image('rbd_iscsi', 'img2', 2*2**30) + + osd_metadata = cls.ceph_cluster.mon_manager.get_osd_metadata() + cls.bluestore_support = True + for osd in osd_metadata: + if osd['osd_objectstore'] != 'bluestore': + cls.bluestore_support = False + break + + @classmethod + def tearDownClass(cls): + super(RbdTest, cls).tearDownClass() + cls._ceph_cmd(['osd', 'pool', 'delete', 'rbd', 'rbd', '--yes-i-really-really-mean-it']) + cls._ceph_cmd(['osd', 'pool', 'delete', 'rbd_iscsi', 'rbd_iscsi', + '--yes-i-really-really-mean-it']) + cls._ceph_cmd(['osd', 'pool', 'delete', 'rbd_data', 'rbd_data', + '--yes-i-really-really-mean-it']) + + @classmethod + def create_image_in_trash(cls, pool, name, delay=0): + cls.create_image(pool, name, 10240) + img = cls._get('/api/block/image/{}/{}'.format(pool, name)) + + cls._task_post("/api/block/image/{}/{}/move_trash".format(pool, name), + {'delay': delay}) + + return img['id'] + + @classmethod + def remove_trash(cls, pool, image_id, image_name, force=False): + return cls._task_delete('/api/block/image/trash/{}/{}/?image_name={}&force={}'.format('rbd', image_id, image_name, force)) + + @classmethod + def get_trash(cls, pool, image_id): + trash = cls._get('/api/block/image/trash/?pool_name={}'.format(pool)) + if isinstance(trash, list): + for pool in trash: + for image in pool['value']: + if image['id'] == image_id: + return image + + return None + + def _validate_image(self, img, **kwargs): + """ + Example of an RBD image json: + + { + "size": 1073741824, + "obj_size": 4194304, + "num_objs": 256, + "order": 22, + "block_name_prefix": "rbd_data.10ae2ae8944a", + "name": "img1", + "pool_name": "rbd", + "features": 61, + "features_name": ["deep-flatten", "exclusive-lock", "fast-diff", "layering", + "object-map"] + } + """ + schema = JObj(sub_elems={ + 'size': JLeaf(int), + 'obj_size': JLeaf(int), + 'num_objs': JLeaf(int), + 'order': JLeaf(int), + 'block_name_prefix': JLeaf(str), + 'name': JLeaf(str), + 'id': JLeaf(str), + 'unique_id': JLeaf(str), + 'image_format': JLeaf(int), + 'pool_name': JLeaf(str), + 'features': JLeaf(int), + 'features_name': JList(JLeaf(str)), + 'stripe_count': JLeaf(int, none=True), + 'stripe_unit': JLeaf(int, none=True), + 'parent': JObj(sub_elems={'pool_name': JLeaf(str), + 'image_name': JLeaf(str), + 'snap_name': JLeaf(str)}, none=True), + 'data_pool': JLeaf(str, none=True), + 'snapshots': JList(JLeaf(dict)), + 'timestamp': JLeaf(str, none=True), + 'disk_usage': JLeaf(int, none=True), + 'total_disk_usage': JLeaf(int, none=True), + 'configuration': JList(JObj(sub_elems={ + 'name': JLeaf(str), + 'source': JLeaf(int), + 'value': JLeaf(str), + })), + }) + self.assertSchema(img, schema) + + for k, v in kwargs.items(): + if isinstance(v, list): + self.assertSetEqual(set(img[k]), set(v)) + else: + self.assertEqual(img[k], v) + + def _validate_snapshot(self, snap, **kwargs): + self.assertIn('id', snap) + self.assertIn('name', snap) + self.assertIn('is_protected', snap) + self.assertIn('timestamp', snap) + self.assertIn('size', snap) + self.assertIn('children', snap) + + for k, v in kwargs.items(): + if isinstance(v, list): + self.assertSetEqual(set(snap[k]), set(v)) + else: + self.assertEqual(snap[k], v) + + def _validate_snapshot_list(self, snap_list, snap_name=None, **kwargs): + found = False + for snap in snap_list: + self.assertIn('name', snap) + if snap_name and snap['name'] == snap_name: + found = True + self._validate_snapshot(snap, **kwargs) + break + if snap_name and not found: + self.fail("Snapshot {} not found".format(snap_name)) + + def test_list(self): + data = self._view_cache_get('/api/block/image') + self.assertStatus(200) + self.assertEqual(len(data), 2) + + for pool_view in data: + self.assertEqual(pool_view['status'], 0) + self.assertIsNotNone(pool_view['value']) + self.assertIn('pool_name', pool_view) + self.assertIn(pool_view['pool_name'], ['rbd', 'rbd_iscsi']) + image_list = pool_view['value'] + self.assertEqual(len(image_list), 2) + + for img in image_list: + self.assertIn('name', img) + self.assertIn('pool_name', img) + self.assertIn(img['pool_name'], ['rbd', 'rbd_iscsi']) + if img['name'] == 'img1': + self._validate_image(img, size=1073741824, + num_objs=256, obj_size=4194304, + features_name=['deep-flatten', + 'exclusive-lock', + 'fast-diff', + 'layering', + 'object-map']) + elif img['name'] == 'img2': + self._validate_image(img, size=2147483648, + num_objs=512, obj_size=4194304, + features_name=['deep-flatten', + 'exclusive-lock', + 'fast-diff', + 'layering', + 'object-map']) + else: + assert False, "Unexcepted image '{}' in result list".format(img['name']) + + def test_create(self): + rbd_name = 'test_rbd' + self.create_image('rbd', rbd_name, 10240) + self.assertStatus(201) + + img = self._get('/api/block/image/rbd/test_rbd') + self.assertStatus(200) + + self._validate_image(img, name=rbd_name, size=10240, + num_objs=1, obj_size=4194304, + features_name=['deep-flatten', + 'exclusive-lock', + 'fast-diff', 'layering', + 'object-map']) + + self.remove_image('rbd', rbd_name) + + def test_create_with_configuration(self): + pool = 'rbd' + image_name = 'image_with_config' + size = 10240 + configuration = { + 'rbd_qos_bps_limit': 10240, + 'rbd_qos_bps_burst': 10240 * 2, + } + expected = [{ + 'name': 'rbd_qos_bps_limit', + 'source': 2, + 'value': str(10240), + }, { + 'name': 'rbd_qos_bps_burst', + 'source': 2, + 'value': str(10240 * 2), + }] + + self.create_image(pool, image_name, size, configuration=configuration) + self.assertStatus(201) + img = self._get('/api/block/image/rbd/{}'.format(image_name)) + self.assertStatus(200) + for conf in expected: + self.assertIn(conf, img['configuration']) + + self.remove_image(pool, image_name) + + def test_create_rbd_in_data_pool(self): + if not self.bluestore_support: + self.skipTest('requires bluestore cluster') + + self.create_pool('data_pool', 2**4, 'erasure') + + rbd_name = 'test_rbd_in_data_pool' + self.create_image('rbd', rbd_name, 10240, data_pool='data_pool') + self.assertStatus(201) + + img = self._get('/api/block/image/rbd/test_rbd_in_data_pool') + self.assertStatus(200) + + self._validate_image(img, name=rbd_name, size=10240, + num_objs=1, obj_size=4194304, + data_pool='data_pool', + features_name=['data-pool', 'deep-flatten', + 'exclusive-lock', + 'fast-diff', 'layering', + 'object-map']) + + self.remove_image('rbd', rbd_name) + self.assertStatus(204) + self._ceph_cmd(['osd', 'pool', 'delete', 'data_pool', 'data_pool', + '--yes-i-really-really-mean-it']) + + def test_create_rbd_twice(self): + res = self.create_image('rbd', 'test_rbd_twice', 10240) + + res = self.create_image('rbd', 'test_rbd_twice', 10240) + self.assertStatus(400) + self.assertEqual(res, {"code": '17', 'status': 400, "component": "rbd", + "detail": "[errno 17] error creating image", + 'task': {'name': 'rbd/create', + 'metadata': {'pool_name': 'rbd', + 'image_name': 'test_rbd_twice'}}}) + self.remove_image('rbd', 'test_rbd_twice') + self.assertStatus(204) + + def test_snapshots_and_clone_info(self): + self.create_snapshot('rbd', 'img1', 'snap1') + self.create_snapshot('rbd', 'img1', 'snap2') + self._rbd_cmd(['snap', 'protect', 'rbd/img1@snap1']) + self._rbd_cmd(['clone', 'rbd/img1@snap1', 'rbd_iscsi/img1_clone']) + + img = self._get('/api/block/image/rbd/img1') + self.assertStatus(200) + self._validate_image(img, name='img1', size=1073741824, + num_objs=256, obj_size=4194304, parent=None, + features_name=['deep-flatten', 'exclusive-lock', + 'fast-diff', 'layering', + 'object-map']) + for snap in img['snapshots']: + if snap['name'] == 'snap1': + self._validate_snapshot(snap, is_protected=True) + self.assertEqual(len(snap['children']), 1) + self.assertDictEqual(snap['children'][0], + {'pool_name': 'rbd_iscsi', + 'image_name': 'img1_clone'}) + elif snap['name'] == 'snap2': + self._validate_snapshot(snap, is_protected=False) + + img = self._get('/api/block/image/rbd_iscsi/img1_clone') + self.assertStatus(200) + self._validate_image(img, name='img1_clone', size=1073741824, + num_objs=256, obj_size=4194304, + parent={'pool_name': 'rbd', 'image_name': 'img1', + 'snap_name': 'snap1'}, + features_name=['deep-flatten', 'exclusive-lock', + 'fast-diff', 'layering', + 'object-map']) + self.remove_image('rbd_iscsi', 'img1_clone') + self.assertStatus(204) + + def test_disk_usage(self): + self._rbd_cmd(['bench', '--io-type', 'write', '--io-total', '50M', 'rbd/img2']) + self.create_snapshot('rbd', 'img2', 'snap1') + self._rbd_cmd(['bench', '--io-type', 'write', '--io-total', '20M', 'rbd/img2']) + self.create_snapshot('rbd', 'img2', 'snap2') + self._rbd_cmd(['bench', '--io-type', 'write', '--io-total', '10M', 'rbd/img2']) + self.create_snapshot('rbd', 'img2', 'snap3') + self._rbd_cmd(['bench', '--io-type', 'write', '--io-total', '5M', 'rbd/img2']) + img = self._get('/api/block/image/rbd/img2') + self.assertStatus(200) + self._validate_image(img, name='img2', size=2147483648, + total_disk_usage=268435456, disk_usage=67108864) + + def test_delete_non_existent_image(self): + res = self.remove_image('rbd', 'i_dont_exist') + self.assertStatus(400) + self.assertEqual(res, {u'code': u'2', "status": 400, "component": "rbd", + "detail": "[errno 2] error removing image", + 'task': {'name': 'rbd/delete', + 'metadata': {'pool_name': 'rbd', + 'image_name': 'i_dont_exist'}}}) + + def test_image_delete(self): + self.create_image('rbd', 'delete_me', 2**30) + self.assertStatus(201) + self.create_snapshot('rbd', 'delete_me', 'snap1') + self.assertStatus(201) + self.create_snapshot('rbd', 'delete_me', 'snap2') + self.assertStatus(201) + + img = self._get('/api/block/image/rbd/delete_me') + self.assertStatus(200) + self._validate_image(img, name='delete_me', size=2**30) + self.assertEqual(len(img['snapshots']), 2) + + self.remove_snapshot('rbd', 'delete_me', 'snap1') + self.assertStatus(204) + self.remove_snapshot('rbd', 'delete_me', 'snap2') + self.assertStatus(204) + + img = self._get('/api/block/image/rbd/delete_me') + self.assertStatus(200) + self._validate_image(img, name='delete_me', size=2**30) + self.assertEqual(len(img['snapshots']), 0) + + self.remove_image('rbd', 'delete_me') + self.assertStatus(204) + + def test_image_rename(self): + self.create_image('rbd', 'edit_img', 2**30) + self.assertStatus(201) + self._get('/api/block/image/rbd/edit_img') + self.assertStatus(200) + self.edit_image('rbd', 'edit_img', 'new_edit_img') + self.assertStatus(200) + self._get('/api/block/image/rbd/edit_img') + self.assertStatus(404) + self._get('/api/block/image/rbd/new_edit_img') + self.assertStatus(200) + self.remove_image('rbd', 'new_edit_img') + self.assertStatus(204) + + def test_image_resize(self): + self.create_image('rbd', 'edit_img', 2**30) + self.assertStatus(201) + img = self._get('/api/block/image/rbd/edit_img') + self.assertStatus(200) + self._validate_image(img, size=2**30) + self.edit_image('rbd', 'edit_img', size=2*2**30) + self.assertStatus(200) + img = self._get('/api/block/image/rbd/edit_img') + self.assertStatus(200) + self._validate_image(img, size=2*2**30) + self.remove_image('rbd', 'edit_img') + self.assertStatus(204) + + def test_image_change_features(self): + self.create_image('rbd', 'edit_img', 2**30, features=["layering"]) + self.assertStatus(201) + img = self._get('/api/block/image/rbd/edit_img') + self.assertStatus(200) + self._validate_image(img, features_name=["layering"]) + self.edit_image('rbd', 'edit_img', + features=["fast-diff", "object-map", "exclusive-lock"]) + self.assertStatus(200) + img = self._get('/api/block/image/rbd/edit_img') + self.assertStatus(200) + self._validate_image(img, features_name=['exclusive-lock', + 'fast-diff', 'layering', + 'object-map']) + self.edit_image('rbd', 'edit_img', + features=["journaling", "exclusive-lock"]) + self.assertStatus(200) + img = self._get('/api/block/image/rbd/edit_img') + self.assertStatus(200) + self._validate_image(img, features_name=['exclusive-lock', + 'journaling', 'layering']) + self.remove_image('rbd', 'edit_img') + self.assertStatus(204) + + def test_image_change_config(self): + pool = 'rbd' + image = 'image_with_config' + initial_conf = { + 'rbd_qos_bps_limit': 10240, + 'rbd_qos_write_iops_limit': None + } + initial_expect = [{ + 'name': 'rbd_qos_bps_limit', + 'source': 2, + 'value': '10240', + }, { + 'name': 'rbd_qos_write_iops_limit', + 'source': 0, + 'value': '0', + }] + new_conf = { + 'rbd_qos_bps_limit': 0, + 'rbd_qos_bps_burst': 20480, + 'rbd_qos_write_iops_limit': None + } + new_expect = [{ + 'name': 'rbd_qos_bps_limit', + 'source': 2, + 'value': '0', + }, { + 'name': 'rbd_qos_bps_burst', + 'source': 2, + 'value': '20480', + }, { + 'name': 'rbd_qos_write_iops_limit', + 'source': 0, + 'value': '0', + }] + + self.create_image(pool, image, 2**30, configuration=initial_conf) + self.assertStatus(201) + img = self._get('/api/block/image/{}/{}'.format(pool, image)) + self.assertStatus(200) + for conf in initial_expect: + self.assertIn(conf, img['configuration']) + + self.edit_image(pool, image, configuration=new_conf) + img = self._get('/api/block/image/{}/{}'.format(pool, image)) + self.assertStatus(200) + for conf in new_expect: + self.assertIn(conf, img['configuration']) + + self.remove_image(pool, image) + self.assertStatus(204) + + def test_update_snapshot(self): + self.create_snapshot('rbd', 'img1', 'snap5') + self.assertStatus(201) + img = self._get('/api/block/image/rbd/img1') + self._validate_snapshot_list(img['snapshots'], 'snap5', is_protected=False) + + self.update_snapshot('rbd', 'img1', 'snap5', 'snap6', None) + self.assertStatus(200) + img = self._get('/api/block/image/rbd/img1') + self._validate_snapshot_list(img['snapshots'], 'snap6', is_protected=False) + + self.update_snapshot('rbd', 'img1', 'snap6', None, True) + self.assertStatus(200) + img = self._get('/api/block/image/rbd/img1') + self._validate_snapshot_list(img['snapshots'], 'snap6', is_protected=True) + + self.update_snapshot('rbd', 'img1', 'snap6', 'snap5', False) + self.assertStatus(200) + img = self._get('/api/block/image/rbd/img1') + self._validate_snapshot_list(img['snapshots'], 'snap5', is_protected=False) + + self.remove_snapshot('rbd', 'img1', 'snap5') + self.assertStatus(204) + + def test_snapshot_rollback(self): + self.create_image('rbd', 'rollback_img', 2**30, + features=["layering", "exclusive-lock", "fast-diff", + "object-map"]) + self.assertStatus(201) + self.create_snapshot('rbd', 'rollback_img', 'snap1') + self.assertStatus(201) + + img = self._get('/api/block/image/rbd/rollback_img') + self.assertStatus(200) + self.assertEqual(img['disk_usage'], 0) + + self._rbd_cmd(['bench', '--io-type', 'write', '--io-total', '5M', + 'rbd/rollback_img']) + + img = self._get('/api/block/image/rbd/rollback_img') + self.assertStatus(200) + self.assertGreater(img['disk_usage'], 0) + + self._task_post('/api/block/image/rbd/rollback_img/snap/snap1/rollback') + self.assertStatus([201, 200]) + + img = self._get('/api/block/image/rbd/rollback_img') + self.assertStatus(200) + self.assertEqual(img['disk_usage'], 0) + + self.remove_snapshot('rbd', 'rollback_img', 'snap1') + self.assertStatus(204) + self.remove_image('rbd', 'rollback_img') + self.assertStatus(204) + + def test_clone(self): + self.create_image('rbd', 'cimg', 2**30, features=["layering"]) + self.assertStatus(201) + self.create_snapshot('rbd', 'cimg', 'snap1') + self.assertStatus(201) + self.update_snapshot('rbd', 'cimg', 'snap1', None, True) + self.assertStatus(200) + self.clone_image('rbd', 'cimg', 'snap1', 'rbd', 'cimg-clone', + features=["layering", "exclusive-lock", "fast-diff", + "object-map"]) + self.assertStatus([200, 201]) + + img = self._get('/api/block/image/rbd/cimg-clone') + self.assertStatus(200) + self._validate_image(img, features_name=['exclusive-lock', + 'fast-diff', 'layering', + 'object-map'], + parent={'pool_name': 'rbd', 'image_name': 'cimg', + 'snap_name': 'snap1'}) + + res = self.remove_image('rbd', 'cimg') + self.assertStatus(400) + self.assertIn('code', res) + self.assertEqual(res['code'], '39') + + self.remove_image('rbd', 'cimg-clone') + self.assertStatus(204) + self.update_snapshot('rbd', 'cimg', 'snap1', None, False) + self.assertStatus(200) + self.remove_snapshot('rbd', 'cimg', 'snap1') + self.assertStatus(204) + self.remove_image('rbd', 'cimg') + self.assertStatus(204) + + def test_copy(self): + self.create_image('rbd', 'coimg', 2**30, + features=["layering", "exclusive-lock", "fast-diff", + "object-map"]) + self.assertStatus(201) + + self._rbd_cmd(['bench', '--io-type', 'write', '--io-total', '5M', + 'rbd/coimg']) + + self.copy_image('rbd', 'coimg', 'rbd_iscsi', 'coimg-copy', + features=["layering", "fast-diff", "exclusive-lock", + "object-map"]) + self.assertStatus([200, 201]) + + img = self._get('/api/block/image/rbd/coimg') + self.assertStatus(200) + self._validate_image(img, features_name=['layering', 'exclusive-lock', + 'fast-diff', 'object-map']) + + img_copy = self._get('/api/block/image/rbd_iscsi/coimg-copy') + self._validate_image(img_copy, features_name=['exclusive-lock', + 'fast-diff', 'layering', + 'object-map'], + disk_usage=img['disk_usage']) + + self.remove_image('rbd', 'coimg') + self.assertStatus(204) + self.remove_image('rbd_iscsi', 'coimg-copy') + self.assertStatus(204) + + def test_flatten(self): + self.create_snapshot('rbd', 'img1', 'snapf') + self.update_snapshot('rbd', 'img1', 'snapf', None, True) + self.clone_image('rbd', 'img1', 'snapf', 'rbd_iscsi', 'img1_snapf_clone') + + img = self._get('/api/block/image/rbd_iscsi/img1_snapf_clone') + self.assertStatus(200) + self.assertIsNotNone(img['parent']) + + self.flatten_image('rbd_iscsi', 'img1_snapf_clone') + self.assertStatus([200, 201]) + + img = self._get('/api/block/image/rbd_iscsi/img1_snapf_clone') + self.assertStatus(200) + self.assertIsNone(img['parent']) + + self.update_snapshot('rbd', 'img1', 'snapf', None, False) + self.remove_snapshot('rbd', 'img1', 'snapf') + self.assertStatus(204) + + self.remove_image('rbd_iscsi', 'img1_snapf_clone') + self.assertStatus(204) + + def test_default_features(self): + default_features = self._get('/api/block/image/default_features') + self.assertEqual(default_features, ['deep-flatten', 'exclusive-lock', + 'fast-diff', 'layering', + 'object-map']) + + def test_image_with_special_name(self): + rbd_name = 'test/rbd' + rbd_name_encoded = 'test%2Frbd' + + self.create_image('rbd', rbd_name, 10240) + self.assertStatus(201) + + img = self._get("/api/block/image/rbd/" + rbd_name_encoded) + self.assertStatus(200) + + self._validate_image(img, name=rbd_name, size=10240, + num_objs=1, obj_size=4194304, + features_name=['deep-flatten', + 'exclusive-lock', + 'fast-diff', 'layering', + 'object-map']) + + self.remove_image('rbd', rbd_name_encoded) + + def test_move_image_to_trash(self): + id = self.create_image_in_trash('rbd', 'test_rbd') + self.assertStatus(200) + + self._get('/api/block/image/rbd/test_rbd') + self.assertStatus(404) + + time.sleep(1) + + image = self.get_trash('rbd', id) + self.assertIsNotNone(image) + + self.remove_trash('rbd', id, 'test_rbd') + + def test_list_trash(self): + id = self.create_image_in_trash('rbd', 'test_rbd', 0) + data = self._get('/api/block/image/trash/?pool_name={}'.format('rbd')) + self.assertStatus(200) + self.assertIsInstance(data, list) + self.assertIsNotNone(data) + + self.remove_trash('rbd', id, 'test_rbd') + self.assertStatus(204) + + def test_restore_trash(self): + id = self.create_image_in_trash('rbd', 'test_rbd') + + self._task_post('/api/block/image/trash/{}/{}/restore'.format('rbd', id), {'new_image_name': 'test_rbd'}) + + self._get('/api/block/image/rbd/test_rbd') + self.assertStatus(200) + + image = self.get_trash('rbd', id) + self.assertIsNone(image) + + self.remove_image('rbd', 'test_rbd') + + def test_remove_expired_trash(self): + id = self.create_image_in_trash('rbd', 'test_rbd', 0) + self.remove_trash('rbd', id, 'test_rbd', False) + self.assertStatus(204) + + image = self.get_trash('rbd', id) + self.assertIsNone(image) + + def test_remove_not_expired_trash(self): + id = self.create_image_in_trash('rbd', 'test_rbd', 9999) + self.remove_trash('rbd', id, 'test_rbd', False) + self.assertStatus(400) + + time.sleep(1) + + image = self.get_trash('rbd', id) + self.assertIsNotNone(image) + + self.remove_trash('rbd', id, 'test_rbd', True) + + def test_remove_not_expired_trash_with_force(self): + id = self.create_image_in_trash('rbd', 'test_rbd', 9999) + self.remove_trash('rbd', id, 'test_rbd', True) + self.assertStatus(204) + + image = self.get_trash('rbd', id) + self.assertIsNone(image) + + def test_purge_trash(self): + id_expired = self.create_image_in_trash('rbd', 'test_rbd_expired', 0) + id_not_expired = self.create_image_in_trash('rbd', 'test_rbd', 9999) + + time.sleep(1) + + self._task_post('/api/block/image/trash/purge?pool_name={}'.format('rbd')) + self.assertStatus([200, 201]) + + time.sleep(1) + + trash_not_expired = self.get_trash('rbd', id_not_expired) + self.assertIsNotNone(trash_not_expired) + + trash_expired = self.get_trash('rbd', id_expired) + self.wait_until_equal(lambda: self.get_trash('rbd', id_expired), None, 60) diff --git a/qa/tasks/mgr/dashboard/test_rbd_mirroring.py b/qa/tasks/mgr/dashboard/test_rbd_mirroring.py new file mode 100644 index 00000000..8480cb87 --- /dev/null +++ b/qa/tasks/mgr/dashboard/test_rbd_mirroring.py @@ -0,0 +1,177 @@ +# -*- coding: utf-8 -*- +# pylint: disable=too-many-public-methods + +from __future__ import absolute_import + +from .helper import DashboardTestCase + + +class RbdMirroringTest(DashboardTestCase): + AUTH_ROLES = ['pool-manager', 'block-manager'] + + @classmethod + def create_pool(cls, name, application='rbd'): + data = { + 'pool': name, + 'pg_num': 2**3, + 'pool_type': 'replicated', + 'application_metadata': [application] + } + cls._task_post("/api/pool", data) + + @classmethod + def get_pool(cls, pool): + data = cls._get('/api/block/mirroring/pool/{}'.format(pool)) + if isinstance(data, dict): + return data + return {} + + @classmethod + def update_pool(cls, pool, mirror_mode): + data = {'mirror_mode': mirror_mode} + return cls._task_put('/api/block/mirroring/pool/{}'.format(pool), + data) + + @classmethod + def list_peers(cls, pool): + data = cls._get('/api/block/mirroring/pool/{}/peer'.format(pool)) + if isinstance(data, list): + return data + return [] + + @classmethod + def get_peer(cls, pool, peer_uuid): + data = cls._get('/api/block/mirroring/pool/{}/peer/{}'.format(pool, peer_uuid)) + if isinstance(data, dict): + return data + return {} + + @classmethod + def create_peer(cls, pool, cluster_name, client_id, **kwargs): + data = {'cluster_name': cluster_name, 'client_id': client_id} + data.update(kwargs) + return cls._task_post('/api/block/mirroring/pool/{}/peer'.format(pool), + data) + + @classmethod + def update_peer(cls, pool, peer_uuid, **kwargs): + return cls._task_put('/api/block/mirroring/pool/{}/peer/{}'.format(pool, peer_uuid), + kwargs) + + @classmethod + def delete_peer(cls, pool, peer_uuid): + return cls._task_delete('/api/block/mirroring/pool/{}/peer/{}'.format(pool, peer_uuid)) + + @classmethod + def setUpClass(cls): + super(RbdMirroringTest, cls).setUpClass() + cls.create_pool('rbd') + + @classmethod + def tearDownClass(cls): + super(RbdMirroringTest, cls).tearDownClass() + cls._ceph_cmd(['osd', 'pool', 'delete', 'rbd', 'rbd', '--yes-i-really-really-mean-it']) + + @DashboardTestCase.RunAs('test', 'test', [{'rbd-mirroring': ['create', 'update', 'delete']}]) + def test_read_access_permissions(self): + self.get_pool('rbd') + self.assertStatus(403) + self.list_peers('rbd') + self.assertStatus(403) + self.get_peer('rbd', '123') + self.assertStatus(403) + + @DashboardTestCase.RunAs('test', 'test', [{'rbd-mirroring': ['read', 'update', 'delete']}]) + def test_create_access_permissions(self): + self.create_peer('rbd', 'remote', 'id') + self.assertStatus(403) + + @DashboardTestCase.RunAs('test', 'test', [{'rbd-mirroring': ['read', 'create', 'delete']}]) + def test_update_access_permissions(self): + self.update_peer('rbd', '123') + self.assertStatus(403) + + @DashboardTestCase.RunAs('test', 'test', [{'rbd-mirroring': ['read', 'create', 'update']}]) + def test_delete_access_permissions(self): + self.delete_peer('rbd', '123') + self.assertStatus(403) + + def test_mirror_mode(self): + self.update_pool('rbd', 'disabled') + mode = self.get_pool('rbd').get('mirror_mode') + self.assertEqual(mode, 'disabled') + + self.update_pool('rbd', 'image') + mode = self.get_pool('rbd').get('mirror_mode') + self.assertEqual(mode, 'image') + + self.update_pool('rbd', 'pool') + mode = self.get_pool('rbd').get('mirror_mode') + self.assertEqual(mode, 'pool') + + self.update_pool('rbd', 'disabled') + mode = self.get_pool('rbd').get('mirror_mode') + self.assertEqual(mode, 'disabled') + + def test_set_invalid_mirror_mode(self): + self.update_pool('rbd', 'invalid') + self.assertStatus(400) + + def test_set_same_mirror_mode(self): + self.update_pool('rbd', 'disabled') + self.update_pool('rbd', 'disabled') + self.assertStatus(200) + + def test_peer(self): + self.update_pool('rbd', 'image') + self.assertStatus(200) + + peers = self.list_peers('rbd') + self.assertStatus(200) + self.assertEqual([], peers) + + uuid = self.create_peer('rbd', 'remote', 'admin')['uuid'] + self.assertStatus(201) + + peers = self.list_peers('rbd') + self.assertStatus(200) + self.assertEqual([uuid], peers) + + expected_peer = { + 'uuid': uuid, + 'cluster_name': 'remote', + 'client_id': 'admin', + 'mon_host': '', + 'key': '' + } + peer = self.get_peer('rbd', uuid) + self.assertEqual(expected_peer, peer) + + self.update_peer('rbd', uuid, mon_host='1.2.3.4') + self.assertStatus(200) + + expected_peer['mon_host'] = '1.2.3.4' + peer = self.get_peer('rbd', uuid) + self.assertEqual(expected_peer, peer) + + self.delete_peer('rbd', uuid) + self.assertStatus(204) + + self.update_pool('rbd', 'disabled') + self.assertStatus(200) + + def test_disable_mirror_with_peers(self): + self.update_pool('rbd', 'image') + self.assertStatus(200) + + uuid = self.create_peer('rbd', 'remote', 'admin')['uuid'] + self.assertStatus(201) + + self.update_pool('rbd', 'disabled') + self.assertStatus(400) + + self.delete_peer('rbd', uuid) + self.assertStatus(204) + + self.update_pool('rbd', 'disabled') + self.assertStatus(200) diff --git a/qa/tasks/mgr/dashboard/test_requests.py b/qa/tasks/mgr/dashboard/test_requests.py new file mode 100644 index 00000000..cd917dae --- /dev/null +++ b/qa/tasks/mgr/dashboard/test_requests.py @@ -0,0 +1,32 @@ +# -*- coding: utf-8 -*- + +from __future__ import absolute_import + +from .helper import DashboardTestCase + + +class RequestsTest(DashboardTestCase): + def test_gzip(self): + self._get('/api/summary') + self.assertHeaders({ + 'Content-Encoding': 'gzip', + 'Content-Type': 'application/json', + }) + + def test_force_no_gzip(self): + self._get('/api/summary', params=dict( + headers={'Accept-Encoding': 'identity'} + )) + self.assertNotIn('Content-Encoding', self._resp.headers) + self.assertHeaders({ + 'Content-Type': 'application/json', + }) + + def test_server(self): + self._get('/api/summary') + self.assertHeaders({ + 'server': 'Ceph-Dashboard', + 'Content-Security-Policy': "frame-ancestors 'self';", + 'X-Content-Type-Options': 'nosniff', + 'Strict-Transport-Security': 'max-age=63072000; includeSubDomains; preload' + }) diff --git a/qa/tasks/mgr/dashboard/test_rgw.py b/qa/tasks/mgr/dashboard/test_rgw.py new file mode 100644 index 00000000..9e781142 --- /dev/null +++ b/qa/tasks/mgr/dashboard/test_rgw.py @@ -0,0 +1,710 @@ +# -*- coding: utf-8 -*- +from __future__ import absolute_import + +import logging +import six +from six.moves.urllib import parse + +from .helper import DashboardTestCase, JObj, JList, JLeaf + +logger = logging.getLogger(__name__) + + +class RgwTestCase(DashboardTestCase): + + maxDiff = None + create_test_user = False + + AUTH_ROLES = ['rgw-manager'] + + @classmethod + def setUpClass(cls): + super(RgwTestCase, cls).setUpClass() + # Create the administrator account. + cls._radosgw_admin_cmd([ + 'user', 'create', '--uid', 'admin', '--display-name', 'admin', + '--system', '--access-key', 'admin', '--secret', 'admin' + ]) + # Update the dashboard configuration. + cls._ceph_cmd_with_secret(['dashboard', 'set-rgw-api-secret-key'], 'admin') + cls._ceph_cmd_with_secret(['dashboard', 'set-rgw-api-access-key'], 'admin') + # Create a test user? + if cls.create_test_user: + cls._radosgw_admin_cmd([ + 'user', 'create', '--uid', 'teuth-test-user', '--display-name', + 'teuth-test-user' + ]) + cls._radosgw_admin_cmd([ + 'caps', 'add', '--uid', 'teuth-test-user', '--caps', + 'metadata=write' + ]) + cls._radosgw_admin_cmd([ + 'subuser', 'create', '--uid', 'teuth-test-user', '--subuser', + 'teuth-test-subuser', '--access', 'full', '--key-type', 's3', + '--access-key', 'xyz123' + ]) + cls._radosgw_admin_cmd([ + 'subuser', 'create', '--uid', 'teuth-test-user', '--subuser', + 'teuth-test-subuser2', '--access', 'full', '--key-type', + 'swift' + ]) + + @classmethod + def tearDownClass(cls): + if cls.create_test_user: + cls._radosgw_admin_cmd(['user', 'rm', '--uid=teuth-test-user']) + super(RgwTestCase, cls).tearDownClass() + + def setUp(self): + super(RgwTestCase, self).setUp() + + def get_rgw_user(self, uid): + return self._get('/api/rgw/user/{}'.format(uid)) + + +class RgwApiCredentialsTest(RgwTestCase): + + AUTH_ROLES = ['rgw-manager'] + + def setUp(self): + super(RgwApiCredentialsTest, self).setUp() + # Restart the Dashboard module to ensure that the connection to the + # RGW Admin Ops API is re-established with the new credentials. + self.logout() + self._ceph_cmd(['mgr', 'module', 'disable', 'dashboard']) + self._ceph_cmd(['mgr', 'module', 'enable', 'dashboard', '--force']) + # Set the default credentials. + self._ceph_cmd(['dashboard', 'set-rgw-api-user-id', '']) + self._ceph_cmd_with_secret(['dashboard', 'set-rgw-api-secret-key'], 'admin') + self._ceph_cmd_with_secret(['dashboard', 'set-rgw-api-access-key'], 'admin') + super(RgwApiCredentialsTest, self).setUp() + + def test_no_access_secret_key(self): + self._ceph_cmd(['dashboard', 'reset-rgw-api-secret-key']) + self._ceph_cmd(['dashboard', 'reset-rgw-api-access-key']) + resp = self._get('/api/rgw/user') + self.assertStatus(500) + self.assertIn('detail', resp) + self.assertIn('component', resp) + self.assertIn('No RGW credentials found', resp['detail']) + self.assertEqual(resp['component'], 'rgw') + + def test_success(self): + data = self._get('/api/rgw/status') + self.assertStatus(200) + self.assertIn('available', data) + self.assertIn('message', data) + self.assertTrue(data['available']) + + def test_invalid_user_id(self): + self._ceph_cmd(['dashboard', 'set-rgw-api-user-id', 'xyz']) + data = self._get('/api/rgw/status') + self.assertStatus(200) + self.assertIn('available', data) + self.assertIn('message', data) + self.assertFalse(data['available']) + self.assertIn('The user "xyz" is unknown to the Object Gateway.', + data['message']) + + +class RgwBucketTest(RgwTestCase): + + AUTH_ROLES = ['rgw-manager'] + + @classmethod + def setUpClass(cls): + cls.create_test_user = True + super(RgwBucketTest, cls).setUpClass() + # Create tenanted users. + cls._radosgw_admin_cmd([ + 'user', 'create', '--tenant', 'testx', '--uid', 'teuth-test-user', + '--display-name', 'tenanted teuth-test-user' + ]) + cls._radosgw_admin_cmd([ + 'user', 'create', '--tenant', 'testx', '--uid', 'teuth-test-user2', + '--display-name', 'tenanted teuth-test-user 2' + ]) + + @classmethod + def tearDownClass(cls): + cls._radosgw_admin_cmd( + ['user', 'rm', '--tenant', 'testx', '--uid=teuth-test-user']) + cls._radosgw_admin_cmd( + ['user', 'rm', '--tenant', 'testx', '--uid=teuth-test-user2']) + super(RgwBucketTest, cls).tearDownClass() + + def test_all(self): + # Create a new bucket. + self._post( + '/api/rgw/bucket', + params={ + 'bucket': 'teuth-test-bucket', + 'uid': 'admin' + }) + self.assertStatus(201) + data = self.jsonBody() + self.assertSchema(data, JObj(sub_elems={ + 'bucket_info': JObj(sub_elems={ + 'bucket': JObj(allow_unknown=True, sub_elems={ + 'name': JLeaf(str), + 'bucket_id': JLeaf(str), + 'tenant': JLeaf(str) + }), + 'quota': JObj(sub_elems={}, allow_unknown=True), + 'creation_time': JLeaf(str) + }, allow_unknown=True) + }, allow_unknown=True)) + data = data['bucket_info']['bucket'] + self.assertEqual(data['name'], 'teuth-test-bucket') + self.assertEqual(data['tenant'], '') + + # List all buckets. + data = self._get('/api/rgw/bucket') + self.assertStatus(200) + self.assertEqual(len(data), 1) + self.assertIn('teuth-test-bucket', data) + + # List all buckets with stats. + data = self._get('/api/rgw/bucket?stats=true') + self.assertStatus(200) + self.assertEqual(len(data), 1) + self.assertSchema(data[0], JObj(sub_elems={ + 'bid': JLeaf(str), + 'bucket': JLeaf(str), + 'bucket_quota': JObj(sub_elems={}, allow_unknown=True), + 'id': JLeaf(str), + 'owner': JLeaf(str), + 'usage': JObj(sub_elems={}, allow_unknown=True), + 'tenant': JLeaf(str), + }, allow_unknown=True)) + + # Get the bucket. + data = self._get('/api/rgw/bucket/teuth-test-bucket') + self.assertStatus(200) + self.assertSchema(data, JObj(sub_elems={ + 'id': JLeaf(str), + 'bid': JLeaf(str), + 'tenant': JLeaf(str), + 'bucket': JLeaf(str), + 'bucket_quota': JObj(sub_elems={}, allow_unknown=True), + 'owner': JLeaf(str), + 'usage': JObj(sub_elems={}, allow_unknown=True), + }, allow_unknown=True)) + self.assertEqual(data['bucket'], 'teuth-test-bucket') + self.assertEqual(data['owner'], 'admin') + + # Update the bucket. + self._put( + '/api/rgw/bucket/teuth-test-bucket', + params={ + 'bucket_id': data['id'], + 'uid': 'teuth-test-user' + }) + self.assertStatus(200) + data = self._get('/api/rgw/bucket/teuth-test-bucket') + self.assertStatus(200) + self.assertSchema(data, JObj(sub_elems={ + 'owner': JLeaf(str), + 'bid': JLeaf(str), + 'tenant': JLeaf(str) + }, allow_unknown=True)) + self.assertEqual(data['owner'], 'teuth-test-user') + + # Delete the bucket. + self._delete('/api/rgw/bucket/teuth-test-bucket') + self.assertStatus(204) + data = self._get('/api/rgw/bucket') + self.assertStatus(200) + self.assertEqual(len(data), 0) + + def test_create_get_update_delete_w_tenant(self): + # Create a new bucket. The tenant of the user is used when + # the bucket is created. + self._post( + '/api/rgw/bucket', + params={ + 'bucket': 'teuth-test-bucket', + 'uid': 'testx$teuth-test-user' + }) + self.assertStatus(201) + # It's not possible to validate the result because there + # IS NO result object returned by the RGW Admin OPS API + # when a tenanted bucket is created. + data = self.jsonBody() + self.assertIsNone(data) + + # List all buckets. + data = self._get('/api/rgw/bucket') + self.assertStatus(200) + self.assertEqual(len(data), 1) + self.assertIn('testx/teuth-test-bucket', data) + + # Get the bucket. + data = self._get('/api/rgw/bucket/{}'.format( + parse.quote_plus('testx/teuth-test-bucket'))) + self.assertStatus(200) + self.assertSchema(data, JObj(sub_elems={ + 'owner': JLeaf(str), + 'bucket': JLeaf(str), + 'tenant': JLeaf(str), + 'bid': JLeaf(str) + }, allow_unknown=True)) + self.assertEqual(data['owner'], 'testx$teuth-test-user') + self.assertEqual(data['bucket'], 'teuth-test-bucket') + self.assertEqual(data['tenant'], 'testx') + self.assertEqual(data['bid'], 'testx/teuth-test-bucket') + + # Update bucket: different user from same tenant. + self._put( + '/api/rgw/bucket/{}'.format( + parse.quote_plus('testx/teuth-test-bucket')), + params={ + 'bucket_id': data['id'], + 'uid': 'testx$teuth-test-user2' + }) + self.assertStatus(200) + data = self._get('/api/rgw/bucket/{}'.format( + parse.quote_plus('testx/teuth-test-bucket'))) + self.assertStatus(200) + self.assertIn('owner', data) + self.assertEqual(data['owner'], 'testx$teuth-test-user2') + + # Update bucket: different user from empty tenant. + self._put( + '/api/rgw/bucket/{}'.format( + parse.quote_plus('testx/teuth-test-bucket')), + params={ + 'bucket_id': data['id'], + 'uid': 'admin' + }) + self.assertStatus(200) + data = self._get('/api/rgw/bucket/{}'.format( + parse.quote_plus('testx/teuth-test-bucket'))) + self.assertStatus(200) + self.assertIn('owner', data) + self.assertEqual(data['owner'], 'admin') + + # Delete the bucket. + self._delete('/api/rgw/bucket/{}'.format( + parse.quote_plus('testx/teuth-test-bucket'))) + self.assertStatus(204) + data = self._get('/api/rgw/bucket') + self.assertStatus(200) + self.assertEqual(len(data), 0) + + +class RgwDaemonTest(DashboardTestCase): + + AUTH_ROLES = ['rgw-manager'] + + @DashboardTestCase.RunAs('test', 'test', [{ + 'rgw': ['create', 'update', 'delete'] + }]) + def test_read_access_permissions(self): + self._get('/api/rgw/daemon') + self.assertStatus(403) + self._get('/api/rgw/daemon/id') + self.assertStatus(403) + + def test_list(self): + data = self._get('/api/rgw/daemon') + self.assertStatus(200) + self.assertEqual(len(data), 1) + data = data[0] + self.assertIn('id', data) + self.assertIn('version', data) + self.assertIn('server_hostname', data) + + def test_get(self): + data = self._get('/api/rgw/daemon') + self.assertStatus(200) + + data = self._get('/api/rgw/daemon/{}'.format(data[0]['id'])) + self.assertStatus(200) + self.assertIn('rgw_metadata', data) + self.assertIn('rgw_id', data) + self.assertIn('rgw_status', data) + self.assertTrue(data['rgw_metadata']) + + def test_status(self): + self._radosgw_admin_cmd([ + 'user', 'create', '--uid=admin', '--display-name=admin', + '--system', '--access-key=admin', '--secret=admin' + ]) + self._ceph_cmd(['dashboard', 'set-rgw-api-user-id', 'admin']) + self._ceph_cmd_with_secret(['dashboard', 'set-rgw-api-secret-key'], 'admin') + self._ceph_cmd_with_secret(['dashboard', 'set-rgw-api-access-key'], 'admin') + + data = self._get('/api/rgw/status') + self.assertStatus(200) + self.assertIn('available', data) + self.assertIn('message', data) + self.assertTrue(data['available']) + + +class RgwUserTest(RgwTestCase): + + AUTH_ROLES = ['rgw-manager'] + + @classmethod + def setUpClass(cls): + super(RgwUserTest, cls).setUpClass() + + def _assert_user_data(self, data): + self.assertSchema(data, JObj(sub_elems={ + 'caps': JList(JObj(sub_elems={}, allow_unknown=True)), + 'display_name': JLeaf(str), + 'email': JLeaf(str), + 'keys': JList(JObj(sub_elems={}, allow_unknown=True)), + 'max_buckets': JLeaf(int), + 'subusers': JList(JLeaf(str)), + 'suspended': JLeaf(int), + 'swift_keys': JList(JObj(sub_elems={}, allow_unknown=True)), + 'tenant': JLeaf(str), + 'user_id': JLeaf(str), + 'uid': JLeaf(str) + }, allow_unknown=True)) + self.assertGreaterEqual(len(data['keys']), 1) + + def test_get(self): + data = self.get_rgw_user('admin') + self.assertStatus(200) + self._assert_user_data(data) + self.assertEqual(data['user_id'], 'admin') + + def test_list(self): + data = self._get('/api/rgw/user') + self.assertStatus(200) + self.assertGreaterEqual(len(data), 1) + self.assertIn('admin', data) + + def test_create_get_update_delete(self): + # Create a new user. + self._post('/api/rgw/user', params={ + 'uid': 'teuth-test-user', + 'display_name': 'display name' + }) + self.assertStatus(201) + data = self.jsonBody() + self._assert_user_data(data) + self.assertEqual(data['user_id'], 'teuth-test-user') + self.assertEqual(data['display_name'], 'display name') + + # Get the user. + data = self.get_rgw_user('teuth-test-user') + self.assertStatus(200) + self._assert_user_data(data) + self.assertEqual(data['tenant'], '') + self.assertEqual(data['user_id'], 'teuth-test-user') + self.assertEqual(data['uid'], 'teuth-test-user') + + # Update the user. + self._put( + '/api/rgw/user/teuth-test-user', + params={'display_name': 'new name'}) + self.assertStatus(200) + data = self.jsonBody() + self._assert_user_data(data) + self.assertEqual(data['display_name'], 'new name') + + # Delete the user. + self._delete('/api/rgw/user/teuth-test-user') + self.assertStatus(204) + self.get_rgw_user('teuth-test-user') + self.assertStatus(500) + resp = self.jsonBody() + self.assertIn('detail', resp) + self.assertIn('failed request with status code 404', resp['detail']) + self.assertIn('"Code":"NoSuchUser"', resp['detail']) + self.assertIn('"HostId"', resp['detail']) + self.assertIn('"RequestId"', resp['detail']) + + def test_create_get_update_delete_w_tenant(self): + # Create a new user. + self._post( + '/api/rgw/user', + params={ + 'uid': 'test01$teuth-test-user', + 'display_name': 'display name' + }) + self.assertStatus(201) + data = self.jsonBody() + self._assert_user_data(data) + self.assertEqual(data['user_id'], 'teuth-test-user') + self.assertEqual(data['display_name'], 'display name') + + # Get the user. + data = self.get_rgw_user('test01$teuth-test-user') + self.assertStatus(200) + self._assert_user_data(data) + self.assertEqual(data['tenant'], 'test01') + self.assertEqual(data['user_id'], 'teuth-test-user') + self.assertEqual(data['uid'], 'test01$teuth-test-user') + + # Update the user. + self._put( + '/api/rgw/user/test01$teuth-test-user', + params={'display_name': 'new name'}) + self.assertStatus(200) + data = self.jsonBody() + self._assert_user_data(data) + self.assertEqual(data['display_name'], 'new name') + + # Delete the user. + self._delete('/api/rgw/user/test01$teuth-test-user') + self.assertStatus(204) + self.get_rgw_user('test01$teuth-test-user') + self.assertStatus(500) + resp = self.jsonBody() + self.assertIn('detail', resp) + self.assertIn('failed request with status code 404', resp['detail']) + self.assertIn('"Code":"NoSuchUser"', resp['detail']) + self.assertIn('"HostId"', resp['detail']) + self.assertIn('"RequestId"', resp['detail']) + + +class RgwUserCapabilityTest(RgwTestCase): + + AUTH_ROLES = ['rgw-manager'] + + @classmethod + def setUpClass(cls): + cls.create_test_user = True + super(RgwUserCapabilityTest, cls).setUpClass() + + def test_set(self): + self._post( + '/api/rgw/user/teuth-test-user/capability', + params={ + 'type': 'usage', + 'perm': 'read' + }) + self.assertStatus(201) + data = self.jsonBody() + self.assertEqual(len(data), 1) + data = data[0] + self.assertEqual(data['type'], 'usage') + self.assertEqual(data['perm'], 'read') + + # Get the user data to validate the capabilities. + data = self.get_rgw_user('teuth-test-user') + self.assertStatus(200) + self.assertGreaterEqual(len(data['caps']), 1) + self.assertEqual(data['caps'][0]['type'], 'usage') + self.assertEqual(data['caps'][0]['perm'], 'read') + + def test_delete(self): + self._delete( + '/api/rgw/user/teuth-test-user/capability', + params={ + 'type': 'metadata', + 'perm': 'write' + }) + self.assertStatus(204) + + # Get the user data to validate the capabilities. + data = self.get_rgw_user('teuth-test-user') + self.assertStatus(200) + self.assertEqual(len(data['caps']), 0) + + +class RgwUserKeyTest(RgwTestCase): + + AUTH_ROLES = ['rgw-manager'] + + @classmethod + def setUpClass(cls): + cls.create_test_user = True + super(RgwUserKeyTest, cls).setUpClass() + + def test_create_s3(self): + self._post( + '/api/rgw/user/teuth-test-user/key', + params={ + 'key_type': 's3', + 'generate_key': 'false', + 'access_key': 'abc987', + 'secret_key': 'aaabbbccc' + }) + data = self.jsonBody() + self.assertStatus(201) + self.assertGreaterEqual(len(data), 3) + key = self.find_object_in_list('access_key', 'abc987', data) + self.assertIsInstance(key, object) + self.assertEqual(key['secret_key'], 'aaabbbccc') + + def test_create_swift(self): + self._post( + '/api/rgw/user/teuth-test-user/key', + params={ + 'key_type': 'swift', + 'subuser': 'teuth-test-subuser', + 'generate_key': 'false', + 'secret_key': 'xxxyyyzzz' + }) + data = self.jsonBody() + self.assertStatus(201) + self.assertGreaterEqual(len(data), 2) + key = self.find_object_in_list('secret_key', 'xxxyyyzzz', data) + self.assertIsInstance(key, object) + + def test_delete_s3(self): + self._delete( + '/api/rgw/user/teuth-test-user/key', + params={ + 'key_type': 's3', + 'access_key': 'xyz123' + }) + self.assertStatus(204) + + def test_delete_swift(self): + self._delete( + '/api/rgw/user/teuth-test-user/key', + params={ + 'key_type': 'swift', + 'subuser': 'teuth-test-user:teuth-test-subuser2' + }) + self.assertStatus(204) + + +class RgwUserQuotaTest(RgwTestCase): + + AUTH_ROLES = ['rgw-manager'] + + @classmethod + def setUpClass(cls): + cls.create_test_user = True + super(RgwUserQuotaTest, cls).setUpClass() + + def _assert_quota(self, data): + self.assertIn('user_quota', data) + self.assertIn('max_objects', data['user_quota']) + self.assertIn('enabled', data['user_quota']) + self.assertIn('max_size_kb', data['user_quota']) + self.assertIn('max_size', data['user_quota']) + self.assertIn('bucket_quota', data) + self.assertIn('max_objects', data['bucket_quota']) + self.assertIn('enabled', data['bucket_quota']) + self.assertIn('max_size_kb', data['bucket_quota']) + self.assertIn('max_size', data['bucket_quota']) + + def test_get_quota(self): + data = self._get('/api/rgw/user/teuth-test-user/quota') + self.assertStatus(200) + self._assert_quota(data) + + def test_set_user_quota(self): + self._put( + '/api/rgw/user/teuth-test-user/quota', + params={ + 'quota_type': 'user', + 'enabled': 'true', + 'max_size_kb': 2048, + 'max_objects': 101 + }) + self.assertStatus(200) + + data = self._get('/api/rgw/user/teuth-test-user/quota') + self.assertStatus(200) + self._assert_quota(data) + self.assertEqual(data['user_quota']['max_objects'], 101) + self.assertTrue(data['user_quota']['enabled']) + self.assertEqual(data['user_quota']['max_size_kb'], 2048) + + def test_set_bucket_quota(self): + self._put( + '/api/rgw/user/teuth-test-user/quota', + params={ + 'quota_type': 'bucket', + 'enabled': 'false', + 'max_size_kb': 4096, + 'max_objects': 2000 + }) + self.assertStatus(200) + + data = self._get('/api/rgw/user/teuth-test-user/quota') + self.assertStatus(200) + self._assert_quota(data) + self.assertEqual(data['bucket_quota']['max_objects'], 2000) + self.assertFalse(data['bucket_quota']['enabled']) + self.assertEqual(data['bucket_quota']['max_size_kb'], 4096) + + +class RgwUserSubuserTest(RgwTestCase): + + AUTH_ROLES = ['rgw-manager'] + + @classmethod + def setUpClass(cls): + cls.create_test_user = True + super(RgwUserSubuserTest, cls).setUpClass() + + def test_create_swift(self): + self._post( + '/api/rgw/user/teuth-test-user/subuser', + params={ + 'subuser': 'tux', + 'access': 'readwrite', + 'key_type': 'swift' + }) + self.assertStatus(201) + data = self.jsonBody() + subuser = self.find_object_in_list('id', 'teuth-test-user:tux', data) + self.assertIsInstance(subuser, object) + self.assertEqual(subuser['permissions'], 'read-write') + + # Get the user data to validate the keys. + data = self.get_rgw_user('teuth-test-user') + self.assertStatus(200) + key = self.find_object_in_list('user', 'teuth-test-user:tux', + data['swift_keys']) + self.assertIsInstance(key, object) + + def test_create_s3(self): + self._post( + '/api/rgw/user/teuth-test-user/subuser', + params={ + 'subuser': 'hugo', + 'access': 'write', + 'generate_secret': 'false', + 'access_key': 'yyy', + 'secret_key': 'xxx' + }) + self.assertStatus(201) + data = self.jsonBody() + subuser = self.find_object_in_list('id', 'teuth-test-user:hugo', data) + self.assertIsInstance(subuser, object) + self.assertEqual(subuser['permissions'], 'write') + + # Get the user data to validate the keys. + data = self.get_rgw_user('teuth-test-user') + self.assertStatus(200) + key = self.find_object_in_list('user', 'teuth-test-user:hugo', + data['keys']) + self.assertIsInstance(key, object) + self.assertEqual(key['secret_key'], 'xxx') + + def test_delete_w_purge(self): + self._delete( + '/api/rgw/user/teuth-test-user/subuser/teuth-test-subuser2') + self.assertStatus(204) + + # Get the user data to check that the keys don't exist anymore. + data = self.get_rgw_user('teuth-test-user') + self.assertStatus(200) + key = self.find_object_in_list( + 'user', 'teuth-test-user:teuth-test-subuser2', data['swift_keys']) + self.assertIsNone(key) + + def test_delete_wo_purge(self): + self._delete( + '/api/rgw/user/teuth-test-user/subuser/teuth-test-subuser', + params={'purge_keys': 'false'}) + self.assertStatus(204) + + # Get the user data to check whether they keys still exist. + data = self.get_rgw_user('teuth-test-user') + self.assertStatus(200) + key = self.find_object_in_list( + 'user', 'teuth-test-user:teuth-test-subuser', data['keys']) + self.assertIsInstance(key, object) diff --git a/qa/tasks/mgr/dashboard/test_role.py b/qa/tasks/mgr/dashboard/test_role.py new file mode 100644 index 00000000..6b0e35b2 --- /dev/null +++ b/qa/tasks/mgr/dashboard/test_role.py @@ -0,0 +1,140 @@ +# -*- coding: utf-8 -*- + +from __future__ import absolute_import + +from .helper import DashboardTestCase + + +class RoleTest(DashboardTestCase): + @classmethod + def _create_role(cls, name=None, description=None, scopes_permissions=None): + data = {} + if name: + data['name'] = name + if description: + data['description'] = description + if scopes_permissions: + data['scopes_permissions'] = scopes_permissions + cls._post('/api/role', data) + + def test_crud_role(self): + self._create_role(name='role1', + description='Description 1', + scopes_permissions={'osd': ['read']}) + self.assertStatus(201) + self.assertJsonBody({ + 'name': 'role1', + 'description': 'Description 1', + 'scopes_permissions': {'osd': ['read']}, + 'system': False + }) + + self._get('/api/role/role1') + self.assertStatus(200) + self.assertJsonBody({ + 'name': 'role1', + 'description': 'Description 1', + 'scopes_permissions': {'osd': ['read']}, + 'system': False + }) + + self._put('/api/role/role1', { + 'description': 'Description 2', + 'scopes_permissions': {'osd': ['read', 'update']}, + }) + self.assertStatus(200) + self.assertJsonBody({ + 'name': 'role1', + 'description': 'Description 2', + 'scopes_permissions': {'osd': ['read', 'update']}, + 'system': False + }) + + self._delete('/api/role/role1') + self.assertStatus(204) + + def test_list_roles(self): + roles = self._get('/api/role') + self.assertStatus(200) + + self.assertGreaterEqual(len(roles), 1) + for role in roles: + self.assertIn('name', role) + self.assertIn('description', role) + self.assertIn('scopes_permissions', role) + self.assertIn('system', role) + + def test_get_role_does_not_exist(self): + self._get('/api/role/role2') + self.assertStatus(404) + + def test_create_role_already_exists(self): + self._create_role(name='read-only', + description='Description 1', + scopes_permissions={'osd': ['read']}) + self.assertStatus(400) + self.assertError(code='role_already_exists', + component='role') + + def test_create_role_no_name(self): + self._create_role(description='Description 1', + scopes_permissions={'osd': ['read']}) + self.assertStatus(400) + self.assertError(code='name_required', + component='role') + + def test_create_role_invalid_scope(self): + self._create_role(name='role1', + description='Description 1', + scopes_permissions={'invalid-scope': ['read']}) + self.assertStatus(400) + self.assertError(code='invalid_scope', + component='role') + + def test_create_role_invalid_permission(self): + self._create_role(name='role1', + description='Description 1', + scopes_permissions={'osd': ['invalid-permission']}) + self.assertStatus(400) + self.assertError(code='invalid_permission', + component='role') + + def test_delete_role_does_not_exist(self): + self._delete('/api/role/role2') + self.assertStatus(404) + + def test_delete_system_role(self): + self._delete('/api/role/read-only') + self.assertStatus(400) + self.assertError(code='cannot_delete_system_role', + component='role') + + def test_delete_role_associated_with_user(self): + self.create_user("user", "user", ['read-only']) + self._create_role(name='role1', + description='Description 1', + scopes_permissions={'user': ['create', 'read', 'update', 'delete']}) + self.assertStatus(201) + self._put('/api/user/user', {'roles': ['role1']}) + self.assertStatus(200) + + self._delete('/api/role/role1') + self.assertStatus(400) + self.assertError(code='role_is_associated_with_user', + component='role') + + self._put('/api/user/user', {'roles': ['administrator']}) + self.assertStatus(200) + self._delete('/api/role/role1') + self.assertStatus(204) + self.delete_user("user") + + def test_update_role_does_not_exist(self): + self._put('/api/role/role2', {}) + self.assertStatus(404) + + def test_update_system_role(self): + self._put('/api/role/read-only', {}) + self.assertStatus(400) + self.assertError(code='cannot_update_system_role', + component='role') diff --git a/qa/tasks/mgr/dashboard/test_settings.py b/qa/tasks/mgr/dashboard/test_settings.py new file mode 100644 index 00000000..2d890484 --- /dev/null +++ b/qa/tasks/mgr/dashboard/test_settings.py @@ -0,0 +1,65 @@ +# -*- coding: utf-8 -*- + +from __future__ import absolute_import + +from .helper import DashboardTestCase, JList, JObj, JAny + + +class SettingsTest(DashboardTestCase): + def setUp(self): + super(SettingsTest, self).setUp() + self.settings = self._get('/api/settings') + + def tearDown(self): + self._put( + '/api/settings', + {setting['name']: setting['value'] + for setting in self.settings}) + + def test_list_settings(self): + settings = self._get('/api/settings') + self.assertGreater(len(settings), 10) + self.assertSchema( + settings, + JList( + JObj({ + 'default': JAny(none=False), + 'name': str, + 'type': str, + 'value': JAny(none=False) + }))) + self.assertStatus(200) + + def test_get_setting(self): + setting = self._get('/api/settings/rgw-api-access-key') + self.assertSchema( + setting, + JObj({ + 'default': JAny(none=False), + 'name': str, + 'type': str, + 'value': JAny(none=False) + })) + self.assertStatus(200) + + def test_set_setting(self): + self._put('/api/settings/rgw-api-access-key', {'value': 'foo'}) + self.assertStatus(200) + + value = self._get('/api/settings/rgw-api-access-key')['value'] + self.assertEqual('foo', value) + + def test_bulk_set(self): + self._put('/api/settings', { + 'RGW_API_HOST': 'somehost', + 'RGW_API_PORT': 7777, + }) + self.assertStatus(200) + + host = self._get('/api/settings/rgw-api-host')['value'] + self.assertStatus(200) + self.assertEqual('somehost', host) + + port = self._get('/api/settings/rgw-api-port')['value'] + self.assertStatus(200) + self.assertEqual(7777, port) diff --git a/qa/tasks/mgr/dashboard/test_summary.py b/qa/tasks/mgr/dashboard/test_summary.py new file mode 100644 index 00000000..1a5d1e99 --- /dev/null +++ b/qa/tasks/mgr/dashboard/test_summary.py @@ -0,0 +1,40 @@ +from __future__ import absolute_import + +from .helper import DashboardTestCase + + +class SummaryTest(DashboardTestCase): + CEPHFS = True + + def test_summary(self): + data = self._get("/api/summary") + self.assertStatus(200) + + self.assertIn('health_status', data) + self.assertIn('mgr_id', data) + self.assertIn('have_mon_connection', data) + self.assertIn('rbd_mirroring', data) + self.assertIn('executing_tasks', data) + self.assertIn('finished_tasks', data) + self.assertIn('version', data) + self.assertIsNotNone(data['health_status']) + self.assertIsNotNone(data['mgr_id']) + self.assertIsNotNone(data['have_mon_connection']) + self.assertEqual(data['rbd_mirroring'], {'errors': 0, 'warnings': 0}) + + @DashboardTestCase.RunAs('test', 'test', ['pool-manager']) + def test_summary_permissions(self): + data = self._get("/api/summary") + self.assertStatus(200) + + self.assertIn('health_status', data) + self.assertIn('mgr_id', data) + self.assertIn('have_mon_connection', data) + self.assertNotIn('rbd_mirroring', data) + self.assertIn('executing_tasks', data) + self.assertIn('finished_tasks', data) + self.assertIn('version', data) + self.assertIsNotNone(data['health_status']) + self.assertIsNotNone(data['mgr_id']) + self.assertIsNotNone(data['have_mon_connection']) + diff --git a/qa/tasks/mgr/dashboard/test_user.py b/qa/tasks/mgr/dashboard/test_user.py new file mode 100644 index 00000000..7af3442d --- /dev/null +++ b/qa/tasks/mgr/dashboard/test_user.py @@ -0,0 +1,115 @@ +# -*- coding: utf-8 -*- + +from __future__ import absolute_import + +from .helper import DashboardTestCase + + +class UserTest(DashboardTestCase): + + @classmethod + def _create_user(cls, username=None, password=None, name=None, email=None, roles=None): + data = {} + if username: + data['username'] = username + if password: + data['password'] = password + if name: + data['name'] = name + if email: + data['email'] = email + if roles: + data['roles'] = roles + cls._post("/api/user", data) + + def test_crud_user(self): + self._create_user(username='user1', + password='mypassword', + name='My Name', + email='my@email.com', + roles=['administrator']) + self.assertStatus(201) + user = self.jsonBody() + + self._get('/api/user/user1') + self.assertStatus(200) + self.assertJsonBody({ + 'username': 'user1', + 'name': 'My Name', + 'email': 'my@email.com', + 'roles': ['administrator'], + 'lastUpdate': user['lastUpdate'] + }) + + self._put('/api/user/user1', { + 'name': 'My New Name', + 'email': 'mynew@email.com', + 'roles': ['block-manager'], + }) + self.assertStatus(200) + user = self.jsonBody() + self.assertJsonBody({ + 'username': 'user1', + 'name': 'My New Name', + 'email': 'mynew@email.com', + 'roles': ['block-manager'], + 'lastUpdate': user['lastUpdate'] + }) + + self._delete('/api/user/user1') + self.assertStatus(204) + + def test_list_users(self): + self._get('/api/user') + self.assertStatus(200) + user = self.jsonBody() + self.assertEqual(len(user), 1) + user = user[0] + self.assertJsonBody([{ + 'username': 'admin', + 'name': None, + 'email': None, + 'roles': ['administrator'], + 'lastUpdate': user['lastUpdate'] + }]) + + def test_create_user_already_exists(self): + self._create_user(username='admin', + password='mypassword', + name='administrator', + email='my@email.com', + roles=['administrator']) + self.assertStatus(400) + self.assertError(code='username_already_exists', + component='user') + + def test_create_user_invalid_role(self): + self._create_user(username='user1', + password='mypassword', + name='My Name', + email='my@email.com', + roles=['invalid-role']) + self.assertStatus(400) + self.assertError(code='role_does_not_exist', + component='user') + + def test_delete_user_does_not_exist(self): + self._delete('/api/user/user2') + self.assertStatus(404) + + @DashboardTestCase.RunAs('test', 'test', [{'user': ['create', 'read', 'update', 'delete']}]) + def test_delete_current_user(self): + self._delete('/api/user/test') + self.assertStatus(400) + self.assertError(code='cannot_delete_current_user', + component='user') + + def test_update_user_does_not_exist(self): + self._put('/api/user/user2', {'name': 'My New Name'}) + self.assertStatus(404) + + def test_update_user_invalid_role(self): + self._put('/api/user/admin', {'roles': ['invalid-role']}) + self.assertStatus(400) + self.assertError(code='role_does_not_exist', + component='user') diff --git a/qa/tasks/mgr/mgr_test_case.py b/qa/tasks/mgr/mgr_test_case.py new file mode 100644 index 00000000..66f87486 --- /dev/null +++ b/qa/tasks/mgr/mgr_test_case.py @@ -0,0 +1,204 @@ + +from unittest import case +import json +import logging + +from teuthology import misc +from tasks.ceph_test_case import CephTestCase + +# TODO move definition of CephCluster away from the CephFS stuff +from tasks.cephfs.filesystem import CephCluster + + +log = logging.getLogger(__name__) + + +class MgrCluster(CephCluster): + def __init__(self, ctx): + super(MgrCluster, self).__init__(ctx) + self.mgr_ids = list(misc.all_roles_of_type(ctx.cluster, 'mgr')) + + if len(self.mgr_ids) == 0: + raise RuntimeError( + "This task requires at least one manager daemon") + + self.mgr_daemons = dict( + [(mgr_id, self._ctx.daemons.get_daemon('mgr', mgr_id)) for mgr_id + in self.mgr_ids]) + + def mgr_stop(self, mgr_id): + self.mgr_daemons[mgr_id].stop() + + def mgr_fail(self, mgr_id): + self.mon_manager.raw_cluster_cmd("mgr", "fail", mgr_id) + + def mgr_restart(self, mgr_id): + self.mgr_daemons[mgr_id].restart() + + def get_mgr_map(self): + status = json.loads( + self.mon_manager.raw_cluster_cmd("status", "--format=json-pretty")) + + return status["mgrmap"] + + def get_active_id(self): + return self.get_mgr_map()["active_name"] + + def get_standby_ids(self): + return [s['name'] for s in self.get_mgr_map()["standbys"]] + + def set_module_conf(self, module, key, val): + self.mon_manager.raw_cluster_cmd("config", "set", "mgr", + "mgr/{0}/{1}".format( + module, key + ), val) + + def set_module_localized_conf(self, module, mgr_id, key, val): + self.mon_manager.raw_cluster_cmd("config", "set", "mgr", + "mgr/{0}/{1}/{2}".format( + module, mgr_id, key + ), val) + + +class MgrTestCase(CephTestCase): + MGRS_REQUIRED = 1 + + @classmethod + def setup_mgrs(cls): + # Stop all the daemons + for daemon in cls.mgr_cluster.mgr_daemons.values(): + daemon.stop() + + for mgr_id in cls.mgr_cluster.mgr_ids: + cls.mgr_cluster.mgr_fail(mgr_id) + + # Unload all non-default plugins + loaded = json.loads(cls.mgr_cluster.mon_manager.raw_cluster_cmd( + "mgr", "module", "ls"))['enabled_modules'] + unload_modules = set(loaded) - {"restful"} + + for m in unload_modules: + cls.mgr_cluster.mon_manager.raw_cluster_cmd( + "mgr", "module", "disable", m) + + # Start all the daemons + for daemon in cls.mgr_cluster.mgr_daemons.values(): + daemon.restart() + + # Wait for an active to come up + cls.wait_until_true(lambda: cls.mgr_cluster.get_active_id() != "", + timeout=20) + + expect_standbys = set(cls.mgr_cluster.mgr_ids) \ + - {cls.mgr_cluster.get_active_id()} + cls.wait_until_true( + lambda: set(cls.mgr_cluster.get_standby_ids()) == expect_standbys, + timeout=20) + + @classmethod + def setUpClass(cls): + # The test runner should have populated this + assert cls.mgr_cluster is not None + + if len(cls.mgr_cluster.mgr_ids) < cls.MGRS_REQUIRED: + cls.skipTest( + "Only have {0} manager daemons, {1} are required".format( + len(cls.mgr_cluster.mgr_ids), cls.MGRS_REQUIRED)) + + cls.setup_mgrs() + + @classmethod + def _load_module(cls, module_name): + loaded = json.loads(cls.mgr_cluster.mon_manager.raw_cluster_cmd( + "mgr", "module", "ls"))['enabled_modules'] + if module_name in loaded: + # The enable command is idempotent, but our wait for a restart + # isn't, so let's return now if it's already loaded + return + + initial_mgr_map = cls.mgr_cluster.get_mgr_map() + + # check if the the module is configured as an always on module + mgr_daemons = json.loads(cls.mgr_cluster.mon_manager.raw_cluster_cmd( + "mgr", "metadata")) + + for daemon in mgr_daemons: + if daemon["name"] == initial_mgr_map["active_name"]: + ceph_version = daemon["ceph_release"] + always_on = initial_mgr_map["always_on_modules"].get(ceph_version, []) + if module_name in always_on: + return + + initial_gid = initial_mgr_map['active_gid'] + cls.mgr_cluster.mon_manager.raw_cluster_cmd("mgr", "module", "enable", + module_name, "--force") + + # Wait for the module to load + def has_restarted(): + mgr_map = cls.mgr_cluster.get_mgr_map() + done = mgr_map['active_gid'] != initial_gid and mgr_map['available'] + if done: + log.debug("Restarted after module load (new active {0}/{1})".format( + mgr_map['active_name'] , mgr_map['active_gid'])) + return done + cls.wait_until_true(has_restarted, timeout=30) + + + @classmethod + def _get_uri(cls, service_name): + # Little dict hack so that I can assign into this from + # the get_or_none function + mgr_map = {'x': None} + + def _get_or_none(): + mgr_map['x'] = cls.mgr_cluster.get_mgr_map() + result = mgr_map['x']['services'].get(service_name, None) + return result + + cls.wait_until_true(lambda: _get_or_none() is not None, 30) + + uri = mgr_map['x']['services'][service_name] + + log.debug("Found {0} at {1} (daemon {2}/{3})".format( + service_name, uri, mgr_map['x']['active_name'], + mgr_map['x']['active_gid'])) + + return uri + + @classmethod + def _assign_ports(cls, module_name, config_name, min_port=7789): + """ + To avoid the need to run lots of hosts in teuthology tests to + get different URLs per mgr, we will hand out different ports + to each mgr here. + + This is already taken care of for us when running in a vstart + environment. + """ + # Start handing out ports well above Ceph's range. + assign_port = min_port + + for mgr_id in cls.mgr_cluster.mgr_ids: + cls.mgr_cluster.mgr_stop(mgr_id) + cls.mgr_cluster.mgr_fail(mgr_id) + + for mgr_id in cls.mgr_cluster.mgr_ids: + log.debug("Using port {0} for {1} on mgr.{2}".format( + assign_port, module_name, mgr_id + )) + cls.mgr_cluster.set_module_localized_conf(module_name, mgr_id, + config_name, + str(assign_port)) + assign_port += 1 + + for mgr_id in cls.mgr_cluster.mgr_ids: + cls.mgr_cluster.mgr_restart(mgr_id) + + def is_available(): + mgr_map = cls.mgr_cluster.get_mgr_map() + done = mgr_map['available'] + if done: + log.debug("Available after assign ports (new active {0}/{1})".format( + mgr_map['active_name'], mgr_map['active_gid'])) + return done + cls.wait_until_true(is_available, timeout=30) diff --git a/qa/tasks/mgr/test_crash.py b/qa/tasks/mgr/test_crash.py new file mode 100644 index 00000000..49191127 --- /dev/null +++ b/qa/tasks/mgr/test_crash.py @@ -0,0 +1,108 @@ +import json +import logging +import datetime + +from .mgr_test_case import MgrTestCase + + +log = logging.getLogger(__name__) +UUID = 'd5775432-0742-44a3-a435-45095e32e6b1' +DATEFMT = '%Y-%m-%d %H:%M:%S.%f' + + +class TestCrash(MgrTestCase): + + def setUp(self): + super(TestCrash, self).setUp() + self.setup_mgrs() + self._load_module('crash') + + # Whip up some crash data + self.crashes = dict() + now = datetime.datetime.utcnow() + + for i in (0, 1, 3, 4, 8): + timestamp = now - datetime.timedelta(days=i) + timestamp = timestamp.strftime(DATEFMT) + 'Z' + crash_id = '_'.join((timestamp, UUID)).replace(' ', '_') + self.crashes[crash_id] = { + 'crash_id': crash_id, 'timestamp': timestamp, + } + + self.assertEqual( + 0, + self.mgr_cluster.mon_manager.raw_cluster_cmd_result( + 'crash', 'post', '-i', '-', + stdin=json.dumps(self.crashes[crash_id]), + ) + ) + + retstr = self.mgr_cluster.mon_manager.raw_cluster_cmd( + 'crash', 'ls', + ) + log.warning("setUp: crash ls returns %s" % retstr) + + self.oldest_crashid = crash_id + + def tearDown(self): + for crash in self.crashes.values(): + self.mgr_cluster.mon_manager.raw_cluster_cmd_result( + 'crash', 'rm', crash['crash_id'] + ) + + def test_info(self): + for crash in self.crashes.values(): + log.warning('test_info: crash %s' % crash) + retstr = self.mgr_cluster.mon_manager.raw_cluster_cmd( + 'crash', 'ls' + ) + log.warning('ls output: %s' % retstr) + retstr = self.mgr_cluster.mon_manager.raw_cluster_cmd( + 'crash', 'info', crash['crash_id'], + ) + log.warning('crash info output: %s' % retstr) + crashinfo = json.loads(retstr) + self.assertIn('crash_id', crashinfo) + self.assertIn('timestamp', crashinfo) + + def test_ls(self): + retstr = self.mgr_cluster.mon_manager.raw_cluster_cmd( + 'crash', 'ls', + ) + for crash in self.crashes.values(): + self.assertIn(crash['crash_id'], retstr) + + def test_rm(self): + crashid = next(iter(self.crashes.keys())) + self.assertEqual( + 0, + self.mgr_cluster.mon_manager.raw_cluster_cmd_result( + 'crash', 'rm', crashid, + ) + ) + + retstr = self.mgr_cluster.mon_manager.raw_cluster_cmd( + 'crash', 'ls', + ) + self.assertNotIn(crashid, retstr) + + def test_stat(self): + retstr = self.mgr_cluster.mon_manager.raw_cluster_cmd( + 'crash', 'stat', + ) + self.assertIn('5 crashes recorded', retstr) + self.assertIn('4 older than 1 days old:', retstr) + self.assertIn('3 older than 3 days old:', retstr) + self.assertIn('1 older than 7 days old:', retstr) + + def test_prune(self): + self.assertEqual( + 0, + self.mgr_cluster.mon_manager.raw_cluster_cmd_result( + 'crash', 'prune', '5' + ) + ) + retstr = self.mgr_cluster.mon_manager.raw_cluster_cmd( + 'crash', 'ls', + ) + self.assertNotIn(self.oldest_crashid, retstr) diff --git a/qa/tasks/mgr/test_dashboard.py b/qa/tasks/mgr/test_dashboard.py new file mode 100644 index 00000000..41b26ad8 --- /dev/null +++ b/qa/tasks/mgr/test_dashboard.py @@ -0,0 +1,140 @@ +import logging +import requests + +from .mgr_test_case import MgrTestCase + + +log = logging.getLogger(__name__) + + +class TestDashboard(MgrTestCase): + MGRS_REQUIRED = 3 + + def setUp(self): + super(TestDashboard, self).setUp() + + self._assign_ports("dashboard", "ssl_server_port") + self._load_module("dashboard") + self.mgr_cluster.mon_manager.raw_cluster_cmd("dashboard", + "create-self-signed-cert") + + def tearDown(self): + self.mgr_cluster.mon_manager.raw_cluster_cmd("config", "set", "mgr", + "mgr/dashboard/standby_behaviour", + "redirect") + self.mgr_cluster.mon_manager.raw_cluster_cmd("config", "set", "mgr", + "mgr/dashboard/standby_error_status_code", + "500") + + def wait_until_webserver_available(self, url): + def _check_connection(): + try: + requests.get(url, allow_redirects=False, verify=False) + return True + except requests.ConnectionError: + pass + return False + self.wait_until_true(_check_connection, timeout=30) + + def test_standby(self): + original_active_id = self.mgr_cluster.get_active_id() + original_uri = self._get_uri("dashboard") + log.info("Originally running manager '{}' at {}".format( + original_active_id, original_uri)) + + # Force a failover and wait until the previously active manager + # is listed as standby. + self.mgr_cluster.mgr_fail(original_active_id) + self.wait_until_true( + lambda: original_active_id in self.mgr_cluster.get_standby_ids(), + timeout=30) + + failed_active_id = self.mgr_cluster.get_active_id() + failed_over_uri = self._get_uri("dashboard") + log.info("After failover running manager '{}' at {}".format( + failed_active_id, failed_over_uri)) + + self.assertNotEqual(original_uri, failed_over_uri) + + # Wait until web server of the standby node is settled. + self.wait_until_webserver_available(original_uri) + + # The original active daemon should have come back up as a standby + # and be doing redirects to the new active daemon. + r = requests.get(original_uri, allow_redirects=False, verify=False) + self.assertEqual(r.status_code, 303) + self.assertEqual(r.headers['Location'], failed_over_uri) + + # Ensure that every URL redirects to the active daemon. + r = requests.get("{}/runtime.js".format(original_uri.strip('/')), + allow_redirects=False, + verify=False) + self.assertEqual(r.status_code, 303) + self.assertEqual(r.headers['Location'], failed_over_uri) + + def test_standby_disable_redirect(self): + self.mgr_cluster.mon_manager.raw_cluster_cmd("config", "set", "mgr", + "mgr/dashboard/standby_behaviour", + "error") + + original_active_id = self.mgr_cluster.get_active_id() + original_uri = self._get_uri("dashboard") + log.info("Originally running manager '{}' at {}".format( + original_active_id, original_uri)) + + # Force a failover and wait until the previously active manager + # is listed as standby. + self.mgr_cluster.mgr_fail(original_active_id) + self.wait_until_true( + lambda: original_active_id in self.mgr_cluster.get_standby_ids(), + timeout=30) + + failed_active_id = self.mgr_cluster.get_active_id() + failed_over_uri = self._get_uri("dashboard") + log.info("After failover running manager '{}' at {}".format( + failed_active_id, failed_over_uri)) + + self.assertNotEqual(original_uri, failed_over_uri) + + # Wait until web server of the standby node is settled. + self.wait_until_webserver_available(original_uri) + + # Redirection should be disabled now, instead a 500 must be returned. + r = requests.get(original_uri, allow_redirects=False, verify=False) + self.assertEqual(r.status_code, 500) + + self.mgr_cluster.mon_manager.raw_cluster_cmd("config", "set", "mgr", + "mgr/dashboard/standby_error_status_code", + "503") + + # The customized HTTP status code (503) must be returned. + r = requests.get(original_uri, allow_redirects=False, verify=False) + self.assertEqual(r.status_code, 503) + + def test_urls(self): + base_uri = self._get_uri("dashboard") + + # This is a very simple smoke test to check that the dashboard can + # give us a 200 response to requests. We're not testing that + # the content is correct or even renders! + + urls = [ + "/", + ] + + failures = [] + + for url in urls: + r = requests.get(base_uri + url, allow_redirects=False, + verify=False) + if r.status_code >= 300 and r.status_code < 400: + log.error("Unexpected redirect to: {0} (from {1})".format( + r.headers['Location'], base_uri)) + if r.status_code != 200: + failures.append(url) + + log.info("{0}: {1} ({2} bytes)".format( + url, r.status_code, len(r.content) + )) + + self.assertListEqual(failures, []) diff --git a/qa/tasks/mgr/test_failover.py b/qa/tasks/mgr/test_failover.py new file mode 100644 index 00000000..a4e84088 --- /dev/null +++ b/qa/tasks/mgr/test_failover.py @@ -0,0 +1,148 @@ + +import logging +import json + +from .mgr_test_case import MgrTestCase + + +log = logging.getLogger(__name__) + + +class TestFailover(MgrTestCase): + MGRS_REQUIRED = 2 + + def setUp(self): + super(TestFailover, self).setUp() + self.setup_mgrs() + + def test_timeout(self): + """ + That when an active mgr stops responding, a standby is promoted + after mon_mgr_beacon_grace. + """ + + # Query which mgr is active + original_active = self.mgr_cluster.get_active_id() + original_standbys = self.mgr_cluster.get_standby_ids() + + # Stop that daemon + self.mgr_cluster.mgr_stop(original_active) + + # Assert that the other mgr becomes active + self.wait_until_true( + lambda: self.mgr_cluster.get_active_id() in original_standbys, + timeout=60 + ) + + self.mgr_cluster.mgr_restart(original_active) + self.wait_until_true( + lambda: original_active in self.mgr_cluster.get_standby_ids(), + timeout=10 + ) + + def test_timeout_nostandby(self): + """ + That when an active mgr stop responding, and no standby is + available, the active mgr is removed from the map anyway. + """ + # Query which mgr is active + original_active = self.mgr_cluster.get_active_id() + original_standbys = self.mgr_cluster.get_standby_ids() + + for s in original_standbys: + self.mgr_cluster.mgr_stop(s) + self.mgr_cluster.mgr_fail(s) + + self.assertListEqual(self.mgr_cluster.get_standby_ids(), []) + self.assertEqual(self.mgr_cluster.get_active_id(), original_active) + + grace = int(self.mgr_cluster.get_config("mon_mgr_beacon_grace")) + log.info("Should time out in about {0} seconds".format(grace)) + + self.mgr_cluster.mgr_stop(original_active) + + # Now wait for the mon to notice the mgr is gone and remove it + # from the map. + self.wait_until_equal( + lambda: self.mgr_cluster.get_active_id(), + "", + timeout=grace * 2 + ) + + self.assertListEqual(self.mgr_cluster.get_standby_ids(), []) + self.assertEqual(self.mgr_cluster.get_active_id(), "") + + def test_explicit_fail(self): + """ + That when a user explicitly fails a daemon, a standby immediately + replaces it. + :return: + """ + # Query which mgr is active + original_active = self.mgr_cluster.get_active_id() + original_standbys = self.mgr_cluster.get_standby_ids() + + self.mgr_cluster.mgr_fail(original_active) + + # A standby should take over + self.wait_until_true( + lambda: self.mgr_cluster.get_active_id() in original_standbys, + timeout=60 + ) + + # The one we failed should come back as a standby (he isn't + # really dead) + self.wait_until_true( + lambda: original_active in self.mgr_cluster.get_standby_ids(), + timeout=10 + ) + + # Both daemons should have fully populated metadata + # (regression test for http://tracker.ceph.com/issues/21260) + meta = json.loads(self.mgr_cluster.mon_manager.raw_cluster_cmd( + "mgr", "metadata")) + id_to_meta = dict([(i['name'], i) for i in meta]) + for i in [original_active] + original_standbys: + self.assertIn(i, id_to_meta) + self.assertIn('ceph_version', id_to_meta[i]) + + # We should be able to fail back over again: the exercises + # our re-initialization of the python runtime within + # a single process lifetime. + + # Get rid of any bystander standbys so that the original_active + # will be selected as next active. + new_active = self.mgr_cluster.get_active_id() + for daemon in original_standbys: + if daemon != new_active: + self.mgr_cluster.mgr_stop(daemon) + self.mgr_cluster.mgr_fail(daemon) + + self.assertListEqual(self.mgr_cluster.get_standby_ids(), + [original_active]) + + self.mgr_cluster.mgr_stop(new_active) + self.mgr_cluster.mgr_fail(new_active) + + self.assertEqual(self.mgr_cluster.get_active_id(), original_active) + self.assertEqual(self.mgr_cluster.get_standby_ids(), []) + + def test_standby_timeout(self): + """ + That when a standby daemon stops sending beacons, it is + removed from the list of standbys + :return: + """ + original_active = self.mgr_cluster.get_active_id() + original_standbys = self.mgr_cluster.get_standby_ids() + + victim = original_standbys[0] + self.mgr_cluster.mgr_stop(victim) + + expect_standbys = set(original_standbys) - {victim} + + self.wait_until_true( + lambda: set(self.mgr_cluster.get_standby_ids()) == expect_standbys, + timeout=60 + ) + self.assertEqual(self.mgr_cluster.get_active_id(), original_active) diff --git a/qa/tasks/mgr/test_insights.py b/qa/tasks/mgr/test_insights.py new file mode 100644 index 00000000..53a98b9c --- /dev/null +++ b/qa/tasks/mgr/test_insights.py @@ -0,0 +1,203 @@ +import logging +import json +import datetime +import time + +from .mgr_test_case import MgrTestCase + + +log = logging.getLogger(__name__) +UUID = 'd5775432-0742-44a3-a435-45095e32e6b2' +DATEFMT = '%Y-%m-%d %H:%M:%S.%f' + +class TestInsights(MgrTestCase): + def setUp(self): + super(TestInsights, self).setUp() + self.setup_mgrs() + self._load_module("insights") + self._load_module("selftest") + self.crash_ids = [] + + def tearDown(self): + self._clear_crashes() + + def _insights(self): + retstr = self.mgr_cluster.mon_manager.raw_cluster_cmd("insights") + return json.loads(retstr) + + def _add_crash(self, hours, make_invalid = False): + now = datetime.datetime.utcnow() + timestamp = now - datetime.timedelta(hours = hours) + timestamp = timestamp.strftime(DATEFMT) + 'Z' + crash_id = '_'.join((timestamp, UUID)).replace(' ', '_') + crash = { + 'crash_id': crash_id, + 'timestamp': timestamp, + } + if make_invalid: + crash["timestamp"] = "not a timestamp" + + ret = self.mgr_cluster.mon_manager.raw_cluster_cmd_result( + 'crash', 'post', '-i', '-', + stdin=json.dumps(crash) + ) + self.crash_ids.append(crash_id) + self.assertEqual(0, ret) + + def _clear_crashes(self): + for crash_id in self.crash_ids: + self.mgr_cluster.mon_manager.raw_cluster_cmd_result( + 'crash', 'rm', crash_id + ) + + def _wait_for_health_history_checks(self, *args): + """Wait for a set of health checks to appear in the health history""" + timeout = datetime.datetime.utcnow() + \ + datetime.timedelta(seconds = 15) + while True: + report = self._insights() + missing = False + for check in args: + if check not in report["health"]["history"]["checks"]: + missing = True + break + if not missing: + return + self.assertGreater(timeout, + datetime.datetime.utcnow()) + time.sleep(0.25) + + def _wait_for_curr_health_cleared(self, check): + timeout = datetime.datetime.utcnow() + \ + datetime.timedelta(seconds = 15) + while True: + report = self._insights() + if check not in report["health"]["current"]["checks"]: + return + self.assertGreater(timeout, + datetime.datetime.utcnow()) + time.sleep(0.25) + + def test_health_history(self): + # use empty health history as starting point + self.mgr_cluster.mon_manager.raw_cluster_cmd_result( + "insights", "prune-health", "0") + report = self._insights() + self.assertFalse(report["health"]["history"]["checks"]) + + # generate health check history entries. we want to avoid the edge case + # of running these tests at _exactly_ the top of the hour so we can + # explicitly control when hourly work occurs. for this we use the + # current time offset to a half hour. + now = datetime.datetime.utcnow() + now = datetime.datetime( + year = now.year, + month = now.month, + day = now.day, + hour = now.hour, + minute = 30) + + check_names = set() + for hours in [-18, -11, -5, -1, 0]: + # change the insight module's perception of "now" ... + self.mgr_cluster.mon_manager.raw_cluster_cmd_result( + "mgr", "self-test", "insights_set_now_offset", str(hours)) + + # ... to simulate health check arrivals in the past + unique_check_name = "insights_health_check_{}".format(hours) + health_check = { + unique_check_name: { + "severity": "warning", + "summary": "summary", + "detail": ["detail"] + } + } + self.mgr_cluster.mon_manager.raw_cluster_cmd_result( + "mgr", "self-test", "health", "set", + json.dumps(health_check)) + + check_names.add(unique_check_name) + + # and also set the same health check to test deduplication + dupe_check_name = "insights_health_check".format(hours) + health_check = { + dupe_check_name: { + "severity": "warning", + "summary": "summary", + "detail": ["detail"] + } + } + self.mgr_cluster.mon_manager.raw_cluster_cmd_result( + "mgr", "self-test", "health", "set", + json.dumps(health_check)) + + check_names.add(dupe_check_name) + + # wait for the health check to show up in the history report + self._wait_for_health_history_checks(unique_check_name, dupe_check_name) + + # clear out the current health checks before moving on + self.mgr_cluster.mon_manager.raw_cluster_cmd_result( + "mgr", "self-test", "health", "clear") + self._wait_for_curr_health_cleared(unique_check_name) + + report = self._insights() + for check in check_names: + self.assertIn(check, report["health"]["history"]["checks"]) + + # restart the manager + active_id = self.mgr_cluster.get_active_id() + self.mgr_cluster.mgr_restart(active_id) + + # ensure that at least one of the checks is present after the restart. + # we don't for them all to be present because "earlier" checks may not + # have sat in memory long enough to be flushed. + all_missing = True + report = self._insights() + for check in check_names: + if check in report["health"]["history"]["checks"]: + all_missing = False + break + self.assertFalse(all_missing) + + # pruning really removes history + self.mgr_cluster.mon_manager.raw_cluster_cmd_result( + "insights", "prune-health", "0") + report = self._insights() + self.assertFalse(report["health"]["history"]["checks"]) + + def test_schema(self): + """TODO: assert conformance to a full schema specification?""" + report = self._insights() + for key in ["osd_metadata", + "pg_summary", + "mon_status", + "manager_map", + "service_map", + "mon_map", + "crush_map", + "fs_map", + "osd_tree", + "df", + "osd_dump", + "config", + "health", + "crashes", + "version", + "errors"]: + self.assertIn(key, report) + + def test_crash_history(self): + self._clear_crashes() + report = self._insights() + self.assertFalse(report["crashes"]["summary"]) + self.assertFalse(report["errors"]) + + # crashes show up in the report + self._add_crash(1) + report = self._insights() + self.assertTrue(report["crashes"]["summary"]) + self.assertFalse(report["errors"]) + log.warning("{}".format(json.dumps(report["crashes"], indent=2))) + + self._clear_crashes() diff --git a/qa/tasks/mgr/test_module_selftest.py b/qa/tasks/mgr/test_module_selftest.py new file mode 100644 index 00000000..3c36a6eb --- /dev/null +++ b/qa/tasks/mgr/test_module_selftest.py @@ -0,0 +1,335 @@ + +import time +import requests +import errno +import logging +from teuthology.exceptions import CommandFailedError + +from .mgr_test_case import MgrTestCase + + +log = logging.getLogger(__name__) + + +class TestModuleSelftest(MgrTestCase): + """ + That modules with a self-test command can be loaded and execute it + without errors. + + This is not a substitute for really testing the modules, but it + is quick and is designed to catch regressions that could occur + if data structures change in a way that breaks how the modules + touch them. + """ + MGRS_REQUIRED = 1 + + def setUp(self): + super(TestModuleSelftest, self).setUp() + self.setup_mgrs() + + def _selftest_plugin(self, module_name): + self._load_module("selftest") + self._load_module(module_name) + + # Execute the module's self_test() method + self.mgr_cluster.mon_manager.raw_cluster_cmd( + "mgr", "self-test", "module", module_name) + + def test_zabbix(self): + # Set these mandatory config fields so that the zabbix module + # won't trigger health/log errors on load/serve. + self.mgr_cluster.set_module_conf("zabbix", "zabbix_host", "localhost") + self.mgr_cluster.set_module_conf("zabbix", "identifier", "foo") + self._selftest_plugin("zabbix") + + def test_prometheus(self): + self._assign_ports("prometheus", "server_port", min_port=8100) + self._selftest_plugin("prometheus") + + def test_influx(self): + self._selftest_plugin("influx") + + def test_diskprediction_local(self): + self._selftest_plugin("diskprediction_local") + + # Not included in qa/packages/packages.yaml + #def test_diskprediction_cloud(self): + # self._selftest_plugin("diskprediction_cloud") + + def test_telegraf(self): + self._selftest_plugin("telegraf") + + def test_iostat(self): + self._selftest_plugin("iostat") + + def test_devicehealth(self): + self._selftest_plugin("devicehealth") + # Clean up the pool that the module creates, because otherwise + # it's low PG count causes test failures. + pool_name = "device_health_metrics" + self.mgr_cluster.mon_manager.raw_cluster_cmd( + "osd", "pool", "delete", pool_name, pool_name, + "--yes-i-really-really-mean-it") + + def test_selftest_run(self): + self._load_module("selftest") + self.mgr_cluster.mon_manager.raw_cluster_cmd("mgr", "self-test", "run") + + def test_telemetry(self): + self._selftest_plugin("telemetry") + + def test_crash(self): + self._selftest_plugin("crash") + + def test_orchestrator_cli(self): + self._selftest_plugin("orchestrator_cli") + + + def test_selftest_config_update(self): + """ + That configuration updates are seen by running mgr modules + """ + self._load_module("selftest") + + def get_value(): + return self.mgr_cluster.mon_manager.raw_cluster_cmd( + "mgr", "self-test", "config", "get", "testkey").strip() + + self.assertEqual(get_value(), "None") + self.mgr_cluster.mon_manager.raw_cluster_cmd( + "config", "set", "mgr", "mgr/selftest/testkey", "foo") + self.wait_until_equal(get_value, "foo", timeout=10) + + def get_localized_value(): + return self.mgr_cluster.mon_manager.raw_cluster_cmd( + "mgr", "self-test", "config", "get_localized", "testkey").strip() + + self.assertEqual(get_localized_value(), "foo") + self.mgr_cluster.mon_manager.raw_cluster_cmd( + "config", "set", "mgr", "mgr/selftest/{}/testkey".format( + self.mgr_cluster.get_active_id()), + "bar") + self.wait_until_equal(get_localized_value, "bar", timeout=10) + + def test_selftest_config_upgrade(self): + """ + That pre-mimic config-key config settings are migrated into + mimic-style config settings and visible from mgr modules. + """ + self._load_module("selftest") + + def get_value(): + return self.mgr_cluster.mon_manager.raw_cluster_cmd( + "mgr", "self-test", "config", "get", "testkey").strip() + + def get_config(): + lines = self.mgr_cluster.mon_manager.raw_cluster_cmd( + "config", "dump")\ + .strip().split("\n") + result = [] + for line in lines[1:]: + tokens = line.strip().split() + log.info("tokens: {0}".format(tokens)) + subsys, key, value = tokens[0], tokens[2], tokens[3] + result.append((subsys, key, value)) + + return result + + # Stop ceph-mgr while we synthetically create a pre-mimic + # configuration scenario + for mgr_id in self.mgr_cluster.mgr_daemons.keys(): + self.mgr_cluster.mgr_stop(mgr_id) + self.mgr_cluster.mgr_fail(mgr_id) + + # Blow away any modern-style mgr module config options + # (the ceph-mgr implementation may only do the upgrade if + # it doesn't see new style options) + stash = [] + for subsys, key, value in get_config(): + if subsys == "mgr" and key.startswith("mgr/"): + log.info("Removing config key {0} ahead of upgrade".format( + key)) + self.mgr_cluster.mon_manager.raw_cluster_cmd( + "config", "rm", subsys, key) + stash.append((subsys, key, value)) + + # Inject an old-style configuration setting in config-key + self.mgr_cluster.mon_manager.raw_cluster_cmd( + "config-key", "set", "mgr/selftest/testkey", "testvalue") + + # Inject configuration settings that looks data-ish and should + # not be migrated to a config key + self.mgr_cluster.mon_manager.raw_cluster_cmd( + "config-key", "set", "mgr/selftest/testnewline", "foo\nbar") + + # Inject configuration setting that does not appear in the + # module's config schema + self.mgr_cluster.mon_manager.raw_cluster_cmd( + "config-key", "set", "mgr/selftest/kvitem", "foo\nbar") + + # Bring mgr daemons back online, the one that goes active + # should be doing the upgrade. + for mgr_id in self.mgr_cluster.mgr_daemons.keys(): + self.mgr_cluster.mgr_restart(mgr_id) + + # Wait for a new active + self.wait_until_true( + lambda: self.mgr_cluster.get_active_id() != "", timeout=30) + + # Check that the selftest module sees the upgraded value + self.assertEqual(get_value(), "testvalue") + + # Check that the upgraded value is visible in the configuration + seen_keys = [k for s,k,v in get_config()] + self.assertIn("mgr/selftest/testkey", seen_keys) + + # ...and that the non-config-looking one isn't + self.assertNotIn("mgr/selftest/testnewline", seen_keys) + + # ...and that the not-in-schema one isn't + self.assertNotIn("mgr/selftest/kvitem", seen_keys) + + # Restore previous configuration + for subsys, key, value in stash: + self.mgr_cluster.mon_manager.raw_cluster_cmd( + "config", "set", subsys, key, value) + + def test_selftest_command_spam(self): + # Use the selftest module to stress the mgr daemon + self._load_module("selftest") + + # Use the dashboard to test that the mgr is still able to do its job + self._assign_ports("dashboard", "ssl_server_port") + self._load_module("dashboard") + self.mgr_cluster.mon_manager.raw_cluster_cmd("dashboard", + "create-self-signed-cert") + + original_active = self.mgr_cluster.get_active_id() + original_standbys = self.mgr_cluster.get_standby_ids() + + self.mgr_cluster.mon_manager.raw_cluster_cmd("mgr", "self-test", + "background", "start", + "command_spam") + + dashboard_uri = self._get_uri("dashboard") + + delay = 10 + periods = 10 + for i in range(0, periods): + t1 = time.time() + # Check that an HTTP module remains responsive + r = requests.get(dashboard_uri, verify=False) + self.assertEqual(r.status_code, 200) + + # Check that a native non-module command remains responsive + self.mgr_cluster.mon_manager.raw_cluster_cmd("osd", "df") + + time.sleep(delay - (time.time() - t1)) + + self.mgr_cluster.mon_manager.raw_cluster_cmd("mgr", "self-test", + "background", "stop") + + # Check that all mgr daemons are still running + self.assertEqual(original_active, self.mgr_cluster.get_active_id()) + self.assertEqual(original_standbys, self.mgr_cluster.get_standby_ids()) + + def test_module_commands(self): + """ + That module-handled commands have appropriate behavior on + disabled/failed/recently-enabled modules. + """ + + # Calling a command on a disabled module should return the proper + # error code. + self._load_module("selftest") + self.mgr_cluster.mon_manager.raw_cluster_cmd( + "mgr", "module", "disable", "selftest") + with self.assertRaises(CommandFailedError) as exc_raised: + self.mgr_cluster.mon_manager.raw_cluster_cmd( + "mgr", "self-test", "run") + + self.assertEqual(exc_raised.exception.exitstatus, errno.EOPNOTSUPP) + + # Calling a command that really doesn't exist should give me EINVAL. + with self.assertRaises(CommandFailedError) as exc_raised: + self.mgr_cluster.mon_manager.raw_cluster_cmd( + "osd", "albatross") + + self.assertEqual(exc_raised.exception.exitstatus, errno.EINVAL) + + # Enabling a module and then immediately using ones of its commands + # should work (#21683) + self._load_module("selftest") + self.mgr_cluster.mon_manager.raw_cluster_cmd( + "mgr", "self-test", "config", "get", "testkey") + + # Calling a command for a failed module should return the proper + # error code. + self.mgr_cluster.mon_manager.raw_cluster_cmd( + "mgr", "self-test", "background", "start", "throw_exception") + with self.assertRaises(CommandFailedError) as exc_raised: + self.mgr_cluster.mon_manager.raw_cluster_cmd( + "mgr", "self-test", "run" + ) + self.assertEqual(exc_raised.exception.exitstatus, errno.EIO) + + # A health alert should be raised for a module that has thrown + # an exception from its serve() method + self.wait_for_health( + "Module 'selftest' has failed: Synthetic exception in serve", + timeout=30) + + self.mgr_cluster.mon_manager.raw_cluster_cmd( + "mgr", "module", "disable", "selftest") + + self.wait_for_health_clear(timeout=30) + + def test_module_remote(self): + """ + Use the selftest module to exercise inter-module communication + """ + self._load_module("selftest") + # The "self-test remote" operation just happens to call into + # influx. + self._load_module("influx") + + self.mgr_cluster.mon_manager.raw_cluster_cmd( + "mgr", "self-test", "remote") + + def test_selftest_cluster_log(self): + """ + Use the selftest module to test the cluster/audit log interface. + """ + priority_map = { + "info": "INF", + "security": "SEC", + "warning": "WRN", + "error": "ERR" + } + self._load_module("selftest") + for priority in priority_map.keys(): + message = "foo bar {}".format(priority) + log_message = "[{}] {}".format(priority_map[priority], message) + # Check for cluster/audit logs: + # 2018-09-24 09:37:10.977858 mgr.x [INF] foo bar info + # 2018-09-24 09:37:10.977860 mgr.x [SEC] foo bar security + # 2018-09-24 09:37:10.977863 mgr.x [WRN] foo bar warning + # 2018-09-24 09:37:10.977866 mgr.x [ERR] foo bar error + with self.assert_cluster_log(log_message): + self.mgr_cluster.mon_manager.raw_cluster_cmd( + "mgr", "self-test", "cluster-log", "cluster", + priority, message) + with self.assert_cluster_log(log_message, watch_channel="audit"): + self.mgr_cluster.mon_manager.raw_cluster_cmd( + "mgr", "self-test", "cluster-log", "audit", + priority, message) + + def test_selftest_cluster_log_unknown_channel(self): + """ + Use the selftest module to test the cluster/audit log interface. + """ + with self.assertRaises(CommandFailedError) as exc_raised: + self.mgr_cluster.mon_manager.raw_cluster_cmd( + "mgr", "self-test", "cluster-log", "xyz", + "ERR", "The channel does not exist") + self.assertEqual(exc_raised.exception.exitstatus, errno.EOPNOTSUPP) diff --git a/qa/tasks/mgr/test_orchestrator_cli.py b/qa/tasks/mgr/test_orchestrator_cli.py new file mode 100644 index 00000000..50416f1d --- /dev/null +++ b/qa/tasks/mgr/test_orchestrator_cli.py @@ -0,0 +1,154 @@ +import errno +import json +import logging + +from teuthology.exceptions import CommandFailedError + +from .mgr_test_case import MgrTestCase + + +log = logging.getLogger(__name__) + + +class TestOrchestratorCli(MgrTestCase): + MGRS_REQUIRED = 1 + + def _orch_cmd(self, *args): + return self.mgr_cluster.mon_manager.raw_cluster_cmd("orchestrator", *args) + + def _progress_cmd(self, *args): + return self.mgr_cluster.mon_manager.raw_cluster_cmd("progress", *args) + + def _orch_cmd_result(self, *args, **kwargs): + """ + raw_cluster_cmd doesn't support kwargs. + """ + return self.mgr_cluster.mon_manager.raw_cluster_cmd_result("orchestrator", *args, **kwargs) + + def setUp(self): + super(TestOrchestratorCli, self).setUp() + + self._load_module("orchestrator_cli") + self._load_module("test_orchestrator") + self._orch_cmd("set", "backend", "test_orchestrator") + + def test_status(self): + ret = self._orch_cmd("status") + self.assertIn("test_orchestrator", ret) + + def test_device_ls(self): + ret = self._orch_cmd("device", "ls") + self.assertIn("localhost:", ret) + + def test_device_ls_refresh(self): + ret = self._orch_cmd("device", "ls", "--refresh") + self.assertIn("localhost:", ret) + + def test_device_ls_hoshs(self): + ret = self._orch_cmd("device", "ls", "localhost", "host1") + self.assertIn("localhost:", ret) + + + def test_device_ls_json(self): + ret = self._orch_cmd("device", "ls", "--format", "json") + self.assertIn("localhost", ret) + self.assertIsInstance(json.loads(ret), list) + + def test_service_ls(self): + ret = self._orch_cmd("service", "ls") + self.assertIn("ceph-mgr", ret) + + def test_service_ls_json(self): + ret = self._orch_cmd("service", "ls", "--format", "json") + self.assertIsInstance(json.loads(ret), list) + self.assertIn("ceph-mgr", ret) + + + def test_service_action(self): + self._orch_cmd("service", "reload", "mds", "cephfs") + self._orch_cmd("service", "stop", "mds", "cephfs") + self._orch_cmd("service", "start", "mds", "cephfs") + + def test_service_instance_action(self): + self._orch_cmd("service-instance", "reload", "mds", "a") + self._orch_cmd("service-instance", "stop", "mds", "a") + self._orch_cmd("service-instance", "start", "mds", "a") + + def test_osd_create(self): + self._orch_cmd("osd", "create", "*:device") + self._orch_cmd("osd", "create", "*:device,device2") + + drive_group = { + "host_pattern": "*", + "data_devices": {"paths": ["/dev/sda"]} + } + + res = self._orch_cmd_result("osd", "create", "-i", "-", stdin=json.dumps(drive_group)) + self.assertEqual(res, 0) + + with self.assertRaises(CommandFailedError): + self._orch_cmd("osd", "create", "notfound:device") + + def test_mds_add(self): + self._orch_cmd("mds", "add", "service_name") + + def test_rgw_add(self): + self._orch_cmd("rgw", "add", "service_name") + + def test_nfs_add(self): + self._orch_cmd("nfs", "add", "service_name", "pool", "--namespace", "ns") + self._orch_cmd("nfs", "add", "service_name", "pool") + + def test_osd_rm(self): + self._orch_cmd("osd", "rm", "osd.0") + + def test_mds_rm(self): + self._orch_cmd("mds", "rm", "foo") + + def test_rgw_rm(self): + self._orch_cmd("rgw", "rm", "foo") + + def test_nfs_rm(self): + self._orch_cmd("nfs", "rm", "service_name") + + def test_host_ls(self): + out = self._orch_cmd("host", "ls") + self.assertEqual(out, "localhost\n") + + def test_host_add(self): + self._orch_cmd("host", "add", "hostname") + + def test_host_rm(self): + self._orch_cmd("host", "rm", "hostname") + + def test_mon_update(self): + self._orch_cmd("mon", "update", "3") + self._orch_cmd("mon", "update", "3", "host1", "host2", "host3") + self._orch_cmd("mon", "update", "3", "host1:network", "host2:network", "host3:network") + + def test_mgr_update(self): + self._orch_cmd("mgr", "update", "3") + + def test_nfs_update(self): + self._orch_cmd("nfs", "update", "service_name", "2") + + def test_error(self): + ret = self._orch_cmd_result("host", "add", "raise_no_support") + self.assertEqual(ret, errno.ENOENT) + ret = self._orch_cmd_result("host", "add", "raise_bug") + self.assertEqual(ret, errno.EINVAL) + ret = self._orch_cmd_result("host", "add", "raise_not_implemented") + self.assertEqual(ret, errno.ENOENT) + ret = self._orch_cmd_result("host", "add", "raise_no_orchestrator") + self.assertEqual(ret, errno.ENOENT) + ret = self._orch_cmd_result("host", "add", "raise_import_error") + self.assertEqual(ret, errno.ENOENT) + + def test_progress(self): + self._progress_cmd('clear') + evs = json.loads(self._progress_cmd('json'))['completed'] + self.assertEqual(len(evs), 0) + self._orch_cmd("mgr", "update", "4") + evs = json.loads(self._progress_cmd('json'))['completed'] + self.assertEqual(len(evs), 1) + self.assertIn('update_mgrs', evs[0]['message']) diff --git a/qa/tasks/mgr/test_progress.py b/qa/tasks/mgr/test_progress.py new file mode 100644 index 00000000..8c06dd0e --- /dev/null +++ b/qa/tasks/mgr/test_progress.py @@ -0,0 +1,376 @@ + +import json +import logging +import time +from unittest import SkipTest + +from .mgr_test_case import MgrTestCase + + +log = logging.getLogger(__name__) + + +class TestProgress(MgrTestCase): + POOL = "progress_data" + + # How long we expect to wait at most between taking an OSD out + # and seeing the progress event pop up. + EVENT_CREATION_PERIOD = 5 + + WRITE_PERIOD = 30 + + # Generous period for OSD recovery, should be same order of magnitude + # to how long it took to write the data to begin with + RECOVERY_PERIOD = WRITE_PERIOD * 4 + + def _get_progress(self): + out = self.mgr_cluster.mon_manager.raw_cluster_cmd("progress", "json") + return json.loads(out) + + def _all_events(self): + """ + To avoid racing on completion, we almost always want to look + for events in the total list of active and complete, so + munge them into a single list. + """ + p = self._get_progress() + log.info(json.dumps(p, indent=2)) + return p['events'] + p['completed'] + + def _events_in_progress(self): + """ + this function returns all events that are in progress + """ + p = self._get_progress() + log.info(json.dumps(p, indent=2)) + return p['events'] + + def _completed_events(self): + """ + This function returns all events that are completed + """ + p = self._get_progress() + log.info(json.dumps(p, indent=2)) + return p['completed'] + + def is_osd_marked_out(self, ev): + return ev['message'].endswith('marked out') + + def is_osd_marked_in(self, ev): + return ev['message'].endswith('marked in') + + def _get_osd_in_out_events(self, marked='both'): + """ + Return the event that deals with OSDs being + marked in, out or both + """ + + marked_in_events = [] + marked_out_events = [] + + events_in_progress = self._events_in_progress() + for ev in events_in_progress: + if self.is_osd_marked_out(ev): + marked_out_events.append(ev) + elif self.is_osd_marked_in(ev): + marked_in_events.append(ev) + + if marked == 'both': + return [marked_in_events] + [marked_out_events] + elif marked == 'in': + return marked_in_events + else: + return marked_out_events + + def _osd_in_out_events_count(self, marked='both'): + """ + Count the number of on going recovery events that deals with + OSDs being marked in, out or both. + """ + events_in_progress = self._events_in_progress() + marked_in_count = 0 + marked_out_count = 0 + + for ev in events_in_progress: + if self.is_osd_marked_out(ev): + marked_out_count += 1 + elif self.is_osd_marked_in(ev): + marked_in_count += 1 + + if marked == 'both': + return marked_in_count + marked_out_count + elif marked == 'in': + return marked_in_count + else: + return marked_out_count + + def _setup_pool(self, size=None): + self.mgr_cluster.mon_manager.create_pool(self.POOL) + if size is not None: + self.mgr_cluster.mon_manager.raw_cluster_cmd( + 'osd', 'pool', 'set', self.POOL, 'size', str(size)) + + def _write_some_data(self, t): + """ + To adapt to test systems of varying performance, we write + data for a defined time period, rather than to a defined + capacity. This will hopefully result in a similar timescale + for PG recovery after an OSD failure. + """ + + args = [ + "rados", "-p", self.POOL, "bench", str(t), "write", "-t", "16"] + + self.mgr_cluster.admin_remote.run(args=args, wait=True) + + def _osd_count(self): + osd_map = self.mgr_cluster.mon_manager.get_osd_dump_json() + return len(osd_map['osds']) + + def setUp(self): + super(TestProgress, self).setUp() + # Ensure we have at least four OSDs + if self._osd_count() < 4: + raise SkipTest("Not enough OSDS!") + + # Remove any filesystems so that we can remove their pools + if self.mds_cluster: + self.mds_cluster.mds_stop() + self.mds_cluster.mds_fail() + self.mds_cluster.delete_all_filesystems() + + # Remove all other pools + for pool in self.mgr_cluster.mon_manager.get_osd_dump_json()['pools']: + self.mgr_cluster.mon_manager.remove_pool(pool['pool_name']) + + self._load_module("progress") + self.mgr_cluster.mon_manager.raw_cluster_cmd('progress', 'clear') + + def _simulate_failure(self, osd_ids=None): + """ + Common lead-in to several tests: get some data in the cluster, + then mark an OSD out to trigger the start of a progress event. + + Return the JSON representation of the failure event. + """ + + if osd_ids is None: + osd_ids = [0] + + self._setup_pool() + self._write_some_data(self.WRITE_PERIOD) + + for osd_id in osd_ids: + self.mgr_cluster.mon_manager.raw_cluster_cmd( + 'osd', 'out', str(osd_id)) + + # Wait for a progress event to pop up + self.wait_until_equal(lambda: self._osd_in_out_events_count('out'), 1, + timeout=self.EVENT_CREATION_PERIOD*2, + period=1) + ev = self._get_osd_in_out_events('out')[0] + log.info(json.dumps(ev, indent=1)) + self.assertIn("Rebalancing after osd.0 marked out", ev['message']) + + return ev + + def _simulate_back_in(self, osd_ids, initial_event): + + for osd_id in osd_ids: + self.mgr_cluster.mon_manager.raw_cluster_cmd( + 'osd', 'in', str(osd_id)) + + # First Event should complete promptly + self.wait_until_true(lambda: self._is_complete(initial_event['id']), + timeout=self.EVENT_CREATION_PERIOD) + + try: + # Wait for progress event marked in to pop up + self.wait_until_equal(lambda: self._osd_in_out_events_count('in'), 1, + timeout=self.EVENT_CREATION_PERIOD*2, + period=1) + except RuntimeError as ex: + if not "Timed out after" in str(ex): + raise ex + + log.info("There was no PGs affected by osd being marked in") + return None + + new_event = self._get_osd_in_out_events('in')[0] + return new_event + + def _no_events_anywhere(self): + """ + Whether there are any live or completed events + """ + p = self._get_progress() + total_events = len(p['events']) + len(p['completed']) + return total_events == 0 + + def _is_quiet(self): + """ + Whether any progress events are live. + """ + return len(self._get_progress()['events']) == 0 + + def _is_complete(self, ev_id): + progress = self._get_progress() + live_ids = [ev['id'] for ev in progress['events']] + complete_ids = [ev['id'] for ev in progress['completed']] + if ev_id in complete_ids: + assert ev_id not in live_ids + return True + else: + assert ev_id in live_ids + return False + + def tearDown(self): + if self.POOL in self.mgr_cluster.mon_manager.pools: + self.mgr_cluster.mon_manager.remove_pool(self.POOL) + + osd_map = self.mgr_cluster.mon_manager.get_osd_dump_json() + for osd in osd_map['osds']: + if osd['weight'] == 0.0: + self.mgr_cluster.mon_manager.raw_cluster_cmd( + 'osd', 'in', str(osd['osd'])) + + super(TestProgress, self).tearDown() + + def test_osd_healthy_recovery(self): + """ + The simple recovery case: an OSD goes down, its PGs get a new + placement, and we wait for the PG to get healthy in its new + locations. + """ + ev = self._simulate_failure() + + # Wait for progress event to ultimately reach completion + self.wait_until_true(lambda: self._is_complete(ev['id']), + timeout=self.RECOVERY_PERIOD) + self.assertTrue(self._is_quiet()) + + def test_pool_removal(self): + """ + That a pool removed during OSD recovery causes the + progress event to be correctly marked complete once there + is no more data to move. + """ + ev = self._simulate_failure() + + self.mgr_cluster.mon_manager.remove_pool(self.POOL) + + # Event should complete promptly + self.wait_until_true(lambda: self._is_complete(ev['id']), + timeout=self.EVENT_CREATION_PERIOD) + self.assertTrue(self._is_quiet()) + + def test_osd_came_back(self): + """ + When a recovery is underway, but then the out OSD + comes back in, such that recovery is no longer necessary. + It should create another event for when osd is marked in + and cancel the one that is still ongoing. + """ + ev1 = self._simulate_failure() + + ev2 = self._simulate_back_in([0], ev1) + + # Wait for progress event to ultimately complete + self.wait_until_true(lambda: self._is_complete(ev2['id']), + timeout=self.RECOVERY_PERIOD) + + self.assertTrue(self._is_quiet()) + + def test_osd_cannot_recover(self): + """ + When the cluster cannot recover from a lost OSD, e.g. + because there is no suitable new placement for it. + (a size=3 pool when there are only 2 OSDs left) + (a size=3 pool when the remaining osds are only on 2 hosts) + + Progress event should not be created. + """ + + pool_size = 3 + + self._setup_pool(size=pool_size) + self._write_some_data(self.WRITE_PERIOD) + + # Fail enough OSDs so there are less than N_replicas OSDs + # available. + osd_count = self._osd_count() + + # First do some failures that will result in a normal rebalance + # (Assumption: we're in a test environment that is configured + # not to require replicas be on different hosts, like teuthology) + for osd_id in range(0, osd_count - pool_size): + self.mgr_cluster.mon_manager.raw_cluster_cmd( + 'osd', 'out', str(osd_id)) + + # We should see an event for each of the OSDs we took out + self.wait_until_equal( + lambda: len(self._all_events()), + osd_count - pool_size, + timeout=self.EVENT_CREATION_PERIOD) + + # Those should complete cleanly + self.wait_until_true( + lambda: self._is_quiet(), + timeout=self.RECOVERY_PERIOD + ) + + # Fail one last OSD, at the point the PGs have nowhere to go + victim_osd = osd_count - pool_size + self.mgr_cluster.mon_manager.raw_cluster_cmd( + 'osd', 'out', str(victim_osd)) + + # Check that no event is created + time.sleep(self.EVENT_CREATION_PERIOD) + + self.assertEqual(len(self._all_events()), osd_count - pool_size) + + def test_turn_off_module(self): + """ + When the the module is turned off, there should not + be any on going events or completed events. + Also module should not accept any kind of Remote Event + coming in from other module, however, once it is turned + back, on creating an event should be working as it is. + """ + + pool_size = 3 + self._setup_pool(size=pool_size) + self._write_some_data(self.WRITE_PERIOD) + + self.mgr_cluster.mon_manager.raw_cluster_cmd("progress", "off") + + self.mgr_cluster.mon_manager.raw_cluster_cmd( + 'osd', 'out', '0') + + time.sleep(self.EVENT_CREATION_PERIOD) + + self.mgr_cluster.mon_manager.raw_cluster_cmd( + 'osd', 'in', '0') + + time.sleep(self.EVENT_CREATION_PERIOD) + + self.assertTrue(self._is_quiet()) + + self.mgr_cluster.mon_manager.raw_cluster_cmd("progress", "on") + + self._write_some_data(self.WRITE_PERIOD) + + self.mgr_cluster.mon_manager.raw_cluster_cmd( + 'osd', 'out', '0') + + # Wait for a progress event to pop up + self.wait_until_equal(lambda: len(self._all_events()), 1, + timeout=self.EVENT_CREATION_PERIOD*2) + + ev = self._all_events()[0] + + log.info(json.dumps(ev, indent=1)) + + self.wait_until_true(lambda: self._is_complete(ev['id']), + timeout=self.RECOVERY_PERIOD) + self.assertTrue(self._is_quiet()) diff --git a/qa/tasks/mgr/test_prometheus.py b/qa/tasks/mgr/test_prometheus.py new file mode 100644 index 00000000..376556ab --- /dev/null +++ b/qa/tasks/mgr/test_prometheus.py @@ -0,0 +1,79 @@ +import json +import logging +import requests + +from .mgr_test_case import MgrTestCase + +log = logging.getLogger(__name__) + + +class TestPrometheus(MgrTestCase): + MGRS_REQUIRED = 3 + + def setUp(self): + super(TestPrometheus, self).setUp() + self.setup_mgrs() + + def test_file_sd_command(self): + self._assign_ports("prometheus", "server_port") + self._load_module("prometheus") + + result = json.loads(self.mgr_cluster.mon_manager.raw_cluster_cmd( + "prometheus", "file_sd_config")) + mgr_map = self.mgr_cluster.get_mgr_map() + self.assertEqual(len(result[0]['targets']), len(mgr_map['standbys']) + 1) + + + + def test_standby(self): + self._assign_ports("prometheus", "server_port") + self._load_module("prometheus") + + original_active = self.mgr_cluster.get_active_id() + + original_uri = self._get_uri("prometheus") + log.info("Originally running at {0}".format(original_uri)) + + self.mgr_cluster.mgr_fail(original_active) + + failed_over_uri = self._get_uri("prometheus") + log.info("After failover running at {0}".format(failed_over_uri)) + + self.assertNotEqual(original_uri, failed_over_uri) + + # The original active daemon should have come back up as a standby + # and serve some html under "/" and an empty answer under /metrics + r = requests.get(original_uri, allow_redirects=False) + self.assertEqual(r.status_code, 200) + r = requests.get(original_uri + "metrics", allow_redirects=False) + self.assertEqual(r.status_code, 200) + self.assertEqual(r.headers["content-type"], "text/plain;charset=utf-8") + self.assertEqual(r.headers["server"], "Ceph-Prometheus") + + def test_urls(self): + self._assign_ports("prometheus", "server_port") + self._load_module("prometheus") + + base_uri = self._get_uri("prometheus") + + # This is a very simple smoke test to check that the module can + # give us a 200 response to requests. We're not testing that + # the content is correct or even renders! + + urls = [ + "/", + "/metrics" + ] + + failures = [] + + for url in urls: + r = requests.get(base_uri + url, allow_redirects=False) + if r.status_code != 200: + failures.append(url) + + log.info("{0}: {1} ({2} bytes)".format( + url, r.status_code, len(r.content) + )) + + self.assertListEqual(failures, []) diff --git a/qa/tasks/mgr/test_ssh_orchestrator.py b/qa/tasks/mgr/test_ssh_orchestrator.py new file mode 100644 index 00000000..f7c1c0ed --- /dev/null +++ b/qa/tasks/mgr/test_ssh_orchestrator.py @@ -0,0 +1,23 @@ +import logging +from tasks.mgr.mgr_test_case import MgrTestCase + +log = logging.getLogger(__name__) + +class TestOrchestratorCli(MgrTestCase): + MGRS_REQUIRED = 1 + + def _orch_cmd(self, *args): + return self.mgr_cluster.mon_manager.raw_cluster_cmd("orchestrator", *args) + + def setUp(self): + super(TestOrchestratorCli, self).setUp() + self._load_module("orchestrator_cli") + self._load_module("ssh") + self._orch_cmd("set", "backend", "ssh") + + def test_host_ls(self): + self._orch_cmd("host", "add", "osd0") + self._orch_cmd("host", "add", "mon0") + ret = self._orch_cmd("host", "ls") + self.assertIn("osd0", ret) + self.assertIn("mon0", ret) diff --git a/qa/tasks/mon_clock_skew_check.py b/qa/tasks/mon_clock_skew_check.py new file mode 100644 index 00000000..59d4169d --- /dev/null +++ b/qa/tasks/mon_clock_skew_check.py @@ -0,0 +1,73 @@ +""" +Handle clock skews in monitors. +""" +import logging +import time +from tasks import ceph_manager +from teuthology import misc as teuthology + +log = logging.getLogger(__name__) + +class ClockSkewCheck: + """ + Check if there are any clock skews among the monitors in the + quorum. + + This task accepts the following options: + + interval amount of seconds to wait before check. (default: 30.0) + expect-skew 'true' or 'false', to indicate whether to expect a skew during + the run or not. If 'true', the test will fail if no skew is + found, and succeed if a skew is indeed found; if 'false', it's + the other way around. (default: false) + + - mon_clock_skew_check: + expect-skew: true + """ + + def __init__(self, ctx, manager, config, logger): + self.ctx = ctx + self.manager = manager + + self.stopping = False + self.logger = logger + self.config = config + + if self.config is None: + self.config = dict() + + +def task(ctx, config): + if config is None: + config = {} + assert isinstance(config, dict), \ + 'mon_clock_skew_check task only accepts a dict for configuration' + interval = float(config.get('interval', 30.0)) + expect_skew = config.get('expect-skew', False) + + log.info('Beginning mon_clock_skew_check...') + first_mon = teuthology.get_first_mon(ctx, config) + (mon,) = ctx.cluster.only(first_mon).remotes.keys() + manager = ceph_manager.CephManager( + mon, + ctx=ctx, + logger=log.getChild('ceph_manager'), + ) + + quorum_size = len(teuthology.get_mon_names(ctx)) + manager.wait_for_mon_quorum_size(quorum_size) + + # wait a bit + log.info('sleeping for {s} seconds'.format( + s=interval)) + time.sleep(interval) + + health = manager.get_mon_health(True) + log.info('got health %s' % health) + if expect_skew: + if 'MON_CLOCK_SKEW' not in health['checks']: + raise RuntimeError('expected MON_CLOCK_SKEW but got none') + else: + if 'MON_CLOCK_SKEW' in health['checks']: + raise RuntimeError('got MON_CLOCK_SKEW but expected none') + diff --git a/qa/tasks/mon_recovery.py b/qa/tasks/mon_recovery.py new file mode 100644 index 00000000..fa7aa1a8 --- /dev/null +++ b/qa/tasks/mon_recovery.py @@ -0,0 +1,80 @@ +""" +Monitor recovery +""" +import logging +from tasks import ceph_manager +from teuthology import misc as teuthology + + +log = logging.getLogger(__name__) + +def task(ctx, config): + """ + Test monitor recovery. + """ + if config is None: + config = {} + assert isinstance(config, dict), \ + 'task only accepts a dict for configuration' + first_mon = teuthology.get_first_mon(ctx, config) + (mon,) = ctx.cluster.only(first_mon).remotes.keys() + + manager = ceph_manager.CephManager( + mon, + ctx=ctx, + logger=log.getChild('ceph_manager'), + ) + + mons = [f.split('.')[1] for f in teuthology.get_mon_names(ctx)] + log.info("mon ids = %s" % mons) + + manager.wait_for_mon_quorum_size(len(mons)) + + log.info('verifying all monitors are in the quorum') + for m in mons: + s = manager.get_mon_status(m) + assert s['state'] == 'leader' or s['state'] == 'peon' + assert len(s['quorum']) == len(mons) + + log.info('restarting each monitor in turn') + for m in mons: + # stop a monitor + manager.kill_mon(m) + manager.wait_for_mon_quorum_size(len(mons) - 1) + + # restart + manager.revive_mon(m) + manager.wait_for_mon_quorum_size(len(mons)) + + # in forward and reverse order, + rmons = mons + rmons.reverse() + for mons in mons, rmons: + log.info('stopping all monitors') + for m in mons: + manager.kill_mon(m) + + log.info('forming a minimal quorum for %s, then adding monitors' % mons) + qnum = (len(mons) // 2) + 1 + num = 0 + for m in mons: + manager.revive_mon(m) + num += 1 + if num >= qnum: + manager.wait_for_mon_quorum_size(num) + + # on both leader and non-leader ranks... + for rank in [0, 1]: + # take one out + log.info('removing mon %s' % mons[rank]) + manager.kill_mon(mons[rank]) + manager.wait_for_mon_quorum_size(len(mons) - 1) + + log.info('causing some monitor log activity') + m = 30 + for n in range(1, m): + manager.raw_cluster_cmd('log', '%d of %d' % (n, m)) + + log.info('adding mon %s back in' % mons[rank]) + manager.revive_mon(mons[rank]) + manager.wait_for_mon_quorum_size(len(mons)) diff --git a/qa/tasks/mon_thrash.py b/qa/tasks/mon_thrash.py new file mode 100644 index 00000000..d45e8a88 --- /dev/null +++ b/qa/tasks/mon_thrash.py @@ -0,0 +1,343 @@ +""" +Monitor thrash +""" +import logging +import contextlib +import random +import time +import gevent +import json +import math +from teuthology import misc as teuthology +from tasks import ceph_manager + +log = logging.getLogger(__name__) + +def _get_mons(ctx): + """ + Get monitor names from the context value. + """ + mons = [f[len('mon.'):] for f in teuthology.get_mon_names(ctx)] + return mons + +class MonitorThrasher: + """ + How it works:: + + - pick a monitor + - kill it + - wait for quorum to be formed + - sleep for 'revive_delay' seconds + - revive monitor + - wait for quorum to be formed + - sleep for 'thrash_delay' seconds + + Options:: + + seed Seed to use on the RNG to reproduce a previous + behaviour (default: None; i.e., not set) + revive_delay Number of seconds to wait before reviving + the monitor (default: 10) + thrash_delay Number of seconds to wait in-between + test iterations (default: 0) + thrash_store Thrash monitor store before killing the monitor being thrashed (default: False) + thrash_store_probability Probability of thrashing a monitor's store + (default: 50) + thrash_many Thrash multiple monitors instead of just one. If + 'maintain-quorum' is set to False, then we will + thrash up to as many monitors as there are + available. (default: False) + maintain_quorum Always maintain quorum, taking care on how many + monitors we kill during the thrashing. If we + happen to only have one or two monitors configured, + if this option is set to True, then we won't run + this task as we cannot guarantee maintenance of + quorum. Setting it to false however would allow the + task to run with as many as just one single monitor. + (default: True) + freeze_mon_probability: how often to freeze the mon instead of killing it, + in % (default: 0) + freeze_mon_duration: how many seconds to freeze the mon (default: 15) + scrub Scrub after each iteration (default: True) + + Note: if 'store-thrash' is set to True, then 'maintain-quorum' must also + be set to True. + + For example:: + + tasks: + - ceph: + - mon_thrash: + revive_delay: 20 + thrash_delay: 1 + thrash_store: true + thrash_store_probability: 40 + seed: 31337 + maintain_quorum: true + thrash_many: true + - ceph-fuse: + - workunit: + clients: + all: + - mon/workloadgen.sh + """ + def __init__(self, ctx, manager, config, logger): + self.ctx = ctx + self.manager = manager + self.manager.wait_for_clean() + + self.stopping = False + self.logger = logger + self.config = config + + if self.config is None: + self.config = dict() + + """ Test reproducibility """ + self.random_seed = self.config.get('seed', None) + + if self.random_seed is None: + self.random_seed = int(time.time()) + + self.rng = random.Random() + self.rng.seed(int(self.random_seed)) + + """ Monitor thrashing """ + self.revive_delay = float(self.config.get('revive_delay', 10.0)) + self.thrash_delay = float(self.config.get('thrash_delay', 0.0)) + + self.thrash_many = self.config.get('thrash_many', False) + self.maintain_quorum = self.config.get('maintain_quorum', True) + + self.scrub = self.config.get('scrub', True) + + self.freeze_mon_probability = float(self.config.get('freeze_mon_probability', 10)) + self.freeze_mon_duration = float(self.config.get('freeze_mon_duration', 15.0)) + + assert self.max_killable() > 0, \ + 'Unable to kill at least one monitor with the current config.' + + """ Store thrashing """ + self.store_thrash = self.config.get('store_thrash', False) + self.store_thrash_probability = int( + self.config.get('store_thrash_probability', 50)) + if self.store_thrash: + assert self.store_thrash_probability > 0, \ + 'store_thrash is set, probability must be > 0' + assert self.maintain_quorum, \ + 'store_thrash = true must imply maintain_quorum = true' + + self.thread = gevent.spawn(self.do_thrash) + + def log(self, x): + """ + locally log info messages + """ + self.logger.info(x) + + def do_join(self): + """ + Break out of this processes thrashing loop. + """ + self.stopping = True + self.thread.get() + + def should_thrash_store(self): + """ + If allowed, indicate that we should thrash a certain percentage of + the time as determined by the store_thrash_probability value. + """ + if not self.store_thrash: + return False + return self.rng.randrange(0, 101) < self.store_thrash_probability + + def thrash_store(self, mon): + """ + Thrash the monitor specified. + :param mon: monitor to thrash + """ + addr = self.ctx.ceph['ceph'].mons['mon.%s' % mon] + self.log('thrashing mon.{id}@{addr} store'.format(id=mon, addr=addr)) + out = self.manager.raw_cluster_cmd('-m', addr, 'sync', 'force') + j = json.loads(out) + assert j['ret'] == 0, \ + 'error forcing store sync on mon.{id}:\n{ret}'.format( + id=mon,ret=out) + + def should_freeze_mon(self): + """ + Indicate that we should freeze a certain percentago of the time + as determined by the freeze_mon_probability value. + """ + return self.rng.randrange(0, 101) < self.freeze_mon_probability + + def freeze_mon(self, mon): + """ + Send STOP signal to freeze the monitor. + """ + log.info('Sending STOP to mon %s', mon) + self.manager.signal_mon(mon, 19) # STOP + + def unfreeze_mon(self, mon): + """ + Send CONT signal to unfreeze the monitor. + """ + log.info('Sending CONT to mon %s', mon) + self.manager.signal_mon(mon, 18) # CONT + + def kill_mon(self, mon): + """ + Kill the monitor specified + """ + self.log('killing mon.{id}'.format(id=mon)) + self.manager.kill_mon(mon) + + def revive_mon(self, mon): + """ + Revive the monitor specified + """ + self.log('killing mon.{id}'.format(id=mon)) + self.log('reviving mon.{id}'.format(id=mon)) + self.manager.revive_mon(mon) + + def max_killable(self): + """ + Return the maximum number of monitors we can kill. + """ + m = len(_get_mons(self.ctx)) + if self.maintain_quorum: + return max(math.ceil(m/2.0)-1, 0) + else: + return m + + def do_thrash(self): + """ + Cotinuously loop and thrash the monitors. + """ + self.log('start thrashing') + self.log('seed: {s}, revive delay: {r}, thrash delay: {t} '\ + 'thrash many: {tm}, maintain quorum: {mq} '\ + 'store thrash: {st}, probability: {stp} '\ + 'freeze mon: prob {fp} duration {fd}'.format( + s=self.random_seed,r=self.revive_delay,t=self.thrash_delay, + tm=self.thrash_many, mq=self.maintain_quorum, + st=self.store_thrash,stp=self.store_thrash_probability, + fp=self.freeze_mon_probability,fd=self.freeze_mon_duration, + )) + + while not self.stopping: + mons = _get_mons(self.ctx) + self.manager.wait_for_mon_quorum_size(len(mons)) + self.log('making sure all monitors are in the quorum') + for m in mons: + s = self.manager.get_mon_status(m) + assert s['state'] == 'leader' or s['state'] == 'peon' + assert len(s['quorum']) == len(mons) + + kill_up_to = self.rng.randrange(1, self.max_killable()+1) + mons_to_kill = self.rng.sample(mons, kill_up_to) + self.log('monitors to thrash: {m}'.format(m=mons_to_kill)) + + mons_to_freeze = [] + for mon in mons: + if mon in mons_to_kill: + continue + if self.should_freeze_mon(): + mons_to_freeze.append(mon) + self.log('monitors to freeze: {m}'.format(m=mons_to_freeze)) + + for mon in mons_to_kill: + self.log('thrashing mon.{m}'.format(m=mon)) + + """ we only thrash stores if we are maintaining quorum """ + if self.should_thrash_store() and self.maintain_quorum: + self.thrash_store(mon) + + self.kill_mon(mon) + + if mons_to_freeze: + for mon in mons_to_freeze: + self.freeze_mon(mon) + self.log('waiting for {delay} secs to unfreeze mons'.format( + delay=self.freeze_mon_duration)) + time.sleep(self.freeze_mon_duration) + for mon in mons_to_freeze: + self.unfreeze_mon(mon) + + if self.maintain_quorum: + self.manager.wait_for_mon_quorum_size(len(mons)-len(mons_to_kill)) + for m in mons: + if m in mons_to_kill: + continue + s = self.manager.get_mon_status(m) + assert s['state'] == 'leader' or s['state'] == 'peon' + assert len(s['quorum']) == len(mons)-len(mons_to_kill) + + self.log('waiting for {delay} secs before reviving monitors'.format( + delay=self.revive_delay)) + time.sleep(self.revive_delay) + + for mon in mons_to_kill: + self.revive_mon(mon) + # do more freezes + if mons_to_freeze: + for mon in mons_to_freeze: + self.freeze_mon(mon) + self.log('waiting for {delay} secs to unfreeze mons'.format( + delay=self.freeze_mon_duration)) + time.sleep(self.freeze_mon_duration) + for mon in mons_to_freeze: + self.unfreeze_mon(mon) + + self.manager.wait_for_mon_quorum_size(len(mons)) + for m in mons: + s = self.manager.get_mon_status(m) + assert s['state'] == 'leader' or s['state'] == 'peon' + assert len(s['quorum']) == len(mons) + + if self.scrub: + self.log('triggering scrub') + try: + self.manager.raw_cluster_cmd('scrub') + except Exception as e: + log.warning("Ignoring exception while triggering scrub: %s", e) + + if self.thrash_delay > 0.0: + self.log('waiting for {delay} secs before continuing thrashing'.format( + delay=self.thrash_delay)) + time.sleep(self.thrash_delay) + +@contextlib.contextmanager +def task(ctx, config): + """ + Stress test the monitor by thrashing them while another task/workunit + is running. + + Please refer to MonitorThrasher class for further information on the + available options. + """ + if config is None: + config = {} + assert isinstance(config, dict), \ + 'mon_thrash task only accepts a dict for configuration' + assert len(_get_mons(ctx)) > 2, \ + 'mon_thrash task requires at least 3 monitors' + log.info('Beginning mon_thrash...') + first_mon = teuthology.get_first_mon(ctx, config) + (mon,) = ctx.cluster.only(first_mon).remotes.keys() + manager = ceph_manager.CephManager( + mon, + ctx=ctx, + logger=log.getChild('ceph_manager'), + ) + thrash_proc = MonitorThrasher(ctx, + manager, config, + logger=log.getChild('mon_thrasher')) + try: + log.debug('Yielding') + yield + finally: + log.info('joining mon_thrasher') + thrash_proc.do_join() + mons = _get_mons(ctx) + manager.wait_for_mon_quorum_size(len(mons)) diff --git a/qa/tasks/multibench.py b/qa/tasks/multibench.py new file mode 100644 index 00000000..c2a7299f --- /dev/null +++ b/qa/tasks/multibench.py @@ -0,0 +1,61 @@ +""" +Multibench testing +""" +import contextlib +import logging +import time +import copy +import gevent + +from tasks import radosbench + +log = logging.getLogger(__name__) + +@contextlib.contextmanager +def task(ctx, config): + """ + Run multibench + + The config should be as follows: + + multibench: + time: + segments: + radosbench: + + example: + + tasks: + - ceph: + - multibench: + clients: [client.0] + time: 360 + - interactive: + """ + log.info('Beginning multibench...') + assert isinstance(config, dict), \ + "please list clients to run on" + + def run_one(num): + """Run test spawn from gevent""" + start = time.time() + if not config.get('radosbench'): + benchcontext = {} + else: + benchcontext = copy.copy(config.get('radosbench')) + iterations = 0 + while time.time() - start < int(config.get('time', 600)): + log.info("Starting iteration %s of segment %s"%(iterations, num)) + benchcontext['pool'] = str(num) + "-" + str(iterations) + with radosbench.task(ctx, benchcontext): + time.sleep() + iterations += 1 + log.info("Starting %s threads"%(str(config.get('segments', 3)),)) + segments = [ + gevent.spawn(run_one, i) + for i in range(0, int(config.get('segments', 3)))] + + try: + yield + finally: + [i.get() for i in segments] diff --git a/qa/tasks/netem.py b/qa/tasks/netem.py new file mode 100644 index 00000000..1d9fd98f --- /dev/null +++ b/qa/tasks/netem.py @@ -0,0 +1,268 @@ +""" +Task to run tests with network delay between two remotes using tc and netem. +Reference:https://wiki.linuxfoundation.org/networking/netem. + +""" + +import logging +import contextlib +from paramiko import SSHException +import socket +import time +import gevent +import argparse + +log = logging.getLogger(__name__) + + +def set_priority(interface): + + # create a priority queueing discipline + return ['sudo', 'tc', 'qdisc', 'add', 'dev', interface, 'root', 'handle', '1:', 'prio'] + + +def show_tc(interface): + + # shows tc device present + return ['sudo', 'tc', 'qdisc', 'show', 'dev', interface] + + +def del_tc(interface): + + return ['sudo', 'tc', 'qdisc', 'del', 'dev', interface, 'root'] + + +def cmd_prefix(interface): + + # prepare command to set delay + cmd1 = ['sudo', 'tc', 'qdisc', 'add', 'dev', interface, 'parent', + '1:1', 'handle', '2:', 'netem', 'delay'] + + # prepare command to change delay + cmd2 = ['sudo', 'tc', 'qdisc', 'replace', 'dev', interface, 'root', 'netem', 'delay'] + + # prepare command to apply filter to the matched ip/host + + cmd3 = ['sudo', 'tc', 'filter', 'add', 'dev', interface, + 'parent', '1:0', 'protocol', 'ip', 'pref', '55', + 'handle', '::55', 'u32', 'match', 'ip', 'dst'] + + return cmd1, cmd2, cmd3 + + +def static_delay(remote, host, interface, delay): + + """ Sets a constant delay between two hosts to emulate network delays using tc qdisc and netem""" + + set_delay, change_delay, set_ip = cmd_prefix(interface) + + ip = socket.gethostbyname(host.hostname) + + tc = remote.sh(show_tc(interface)) + if tc.strip().find('refcnt') == -1: + # call set_priority() func to create priority queue + # if not already created(indicated by -1) + log.info('Create priority queue') + remote.run(args=set_priority(interface)) + + # set static delay, with +/- 5ms jitter with normal distribution as default + log.info('Setting delay to %s' % delay) + set_delay.extend(['%s' % delay, '5ms', 'distribution', 'normal']) + remote.run(args=set_delay) + + # set delay to a particular remote node via ip + log.info('Delay set on %s' % remote) + set_ip.extend(['%s' % ip, 'flowid', '2:1']) + remote.run(args=set_ip) + else: + # if the device is already created, only change the delay + log.info('Setting delay to %s' % delay) + change_delay.extend(['%s' % delay, '5ms', 'distribution', 'normal']) + remote.run(args=change_delay) + + +def variable_delay(remote, host, interface, delay_range=[]): + + """ Vary delay between two values""" + + set_delay, change_delay, set_ip = cmd_prefix(interface) + + ip = socket.gethostbyname(host.hostname) + + # delay1 has to be lower than delay2 + delay1 = delay_range[0] + delay2 = delay_range[1] + + tc = remote.sh(show_tc(interface)) + if tc.strip().find('refcnt') == -1: + # call set_priority() func to create priority queue + # if not already created(indicated by -1) + remote.run(args=set_priority(interface)) + + # set variable delay + log.info('Setting varying delay') + set_delay.extend(['%s' % delay1, '%s' % delay2]) + remote.run(args=set_delay) + + # set delay to a particular remote node via ip + log.info('Delay set on %s' % remote) + set_ip.extend(['%s' % ip, 'flowid', '2:1']) + remote.run(args=set_ip) + else: + # if the device is already created, only change the delay + log.info('Setting varying delay') + change_delay.extend(['%s' % delay1, '%s' % delay2]) + remote.run(args=change_delay) + + +def delete_dev(remote, interface): + + """ Delete the qdisc if present""" + + log.info('Delete tc') + tc = remote.sh(show_tc(interface)) + if tc.strip().find('refcnt') != -1: + remote.run(args=del_tc(interface)) + + +class Toggle: + + stop_event = gevent.event.Event() + + def __init__(self, ctx, remote, host, interface, interval): + self.ctx = ctx + self.remote = remote + self.host = host + self.interval = interval + self.interface = interface + self.ip = socket.gethostbyname(self.host.hostname) + + def packet_drop(self): + + """ Drop packets to the remote ip specified""" + + _, _, set_ip = cmd_prefix(self.interface) + + tc = self.remote.sh(show_tc(self.interface)) + if tc.strip().find('refcnt') == -1: + self.remote.run(args=set_priority(self.interface)) + # packet drop to specific ip + log.info('Drop all packets to %s' % self.host) + set_ip.extend(['%s' % self.ip, 'action', 'drop']) + self.remote.run(args=set_ip) + + def link_toggle(self): + + """ + For toggling packet drop and recovery in regular interval. + If interval is 5s, link is up for 5s and link is down for 5s + """ + + while not self.stop_event.is_set(): + self.stop_event.wait(timeout=self.interval) + # simulate link down + try: + self.packet_drop() + log.info('link down') + except SSHException: + log.debug('Failed to run command') + + self.stop_event.wait(timeout=self.interval) + # if qdisc exist,delete it. + try: + delete_dev(self.remote, self.interface) + log.info('link up') + except SSHException: + log.debug('Failed to run command') + + def begin(self, gname): + self.thread = gevent.spawn(self.link_toggle) + self.ctx.netem.names[gname] = self.thread + + def end(self, gname): + self.stop_event.set() + log.info('gname is {}'.format(self.ctx.netem.names[gname])) + self.ctx.netem.names[gname].get() + + def cleanup(self): + """ + Invoked during unwinding if the test fails or exits before executing task 'link_recover' + """ + log.info('Clean up') + self.stop_event.set() + self.thread.get() + + +@contextlib.contextmanager +def task(ctx, config): + + """ + - netem: + clients: [c1.rgw.0] + iface: eno1 + dst_client: [c2.rgw.1] + delay: 10ms + + - netem: + clients: [c1.rgw.0] + iface: eno1 + dst_client: [c2.rgw.1] + delay_range: [10ms, 20ms] # (min, max) + + - netem: + clients: [rgw.1, mon.0] + iface: eno1 + gname: t1 + dst_client: [c2.rgw.1] + link_toggle_interval: 10 # no unit mentioned. By default takes seconds. + + - netem: + clients: [rgw.1, mon.0] + iface: eno1 + link_recover: [t1, t2] + + + """ + + log.info('config %s' % config) + + assert isinstance(config, dict), \ + "please list clients to run on" + if not hasattr(ctx, 'netem'): + ctx.netem = argparse.Namespace() + ctx.netem.names = {} + + if config.get('dst_client') is not None: + dst = config.get('dst_client') + (host,) = ctx.cluster.only(dst).remotes.keys() + + for role in config.get('clients', None): + (remote,) = ctx.cluster.only(role).remotes.keys() + ctx.netem.remote = remote + if config.get('delay', False): + static_delay(remote, host, config.get('iface'), config.get('delay')) + if config.get('delay_range', False): + variable_delay(remote, host, config.get('iface'), config.get('delay_range')) + if config.get('link_toggle_interval', False): + log.info('Toggling link for %s' % config.get('link_toggle_interval')) + global toggle + toggle = Toggle(ctx, remote, host, config.get('iface'), config.get('link_toggle_interval')) + toggle.begin(config.get('gname')) + if config.get('link_recover', False): + log.info('Recovering link') + for gname in config.get('link_recover'): + toggle.end(gname) + log.info('sleeping') + time.sleep(config.get('link_toggle_interval')) + delete_dev(ctx.netem.remote, config.get('iface')) + del ctx.netem.names[gname] + + try: + yield + finally: + if ctx.netem.names: + toggle.cleanup() + for role in config.get('clients'): + (remote,) = ctx.cluster.only(role).remotes.keys() + delete_dev(remote, config.get('iface')) + diff --git a/qa/tasks/object_source_down.py b/qa/tasks/object_source_down.py new file mode 100644 index 00000000..e4519bb6 --- /dev/null +++ b/qa/tasks/object_source_down.py @@ -0,0 +1,101 @@ +""" +Test Object locations going down +""" +import logging +import time +from teuthology import misc as teuthology +from tasks import ceph_manager +from tasks.util.rados import rados + +log = logging.getLogger(__name__) + +def task(ctx, config): + """ + Test handling of object location going down + """ + if config is None: + config = {} + assert isinstance(config, dict), \ + 'lost_unfound task only accepts a dict for configuration' + first_mon = teuthology.get_first_mon(ctx, config) + (mon,) = ctx.cluster.only(first_mon).remotes.keys() + + manager = ceph_manager.CephManager( + mon, + ctx=ctx, + logger=log.getChild('ceph_manager'), + ) + + while len(manager.get_osd_status()['up']) < 3: + time.sleep(10) + manager.wait_for_clean() + + # something that is always there + dummyfile = '/etc/fstab' + + # take 0, 1 out + manager.mark_out_osd(0) + manager.mark_out_osd(1) + manager.wait_for_clean() + + # delay recovery, and make the pg log very long (to prevent backfill) + manager.raw_cluster_cmd( + 'tell', 'osd.0', + 'injectargs', + '--osd-recovery-delay-start 10000 --osd-min-pg-log-entries 100000000' + ) + # delay recovery, and make the pg log very long (to prevent backfill) + manager.raw_cluster_cmd( + 'tell', 'osd.1', + 'injectargs', + '--osd-recovery-delay-start 10000 --osd-min-pg-log-entries 100000000' + ) + # delay recovery, and make the pg log very long (to prevent backfill) + manager.raw_cluster_cmd( + 'tell', 'osd.2', + 'injectargs', + '--osd-recovery-delay-start 10000 --osd-min-pg-log-entries 100000000' + ) + # delay recovery, and make the pg log very long (to prevent backfill) + manager.raw_cluster_cmd( + 'tell', 'osd.3', + 'injectargs', + '--osd-recovery-delay-start 10000 --osd-min-pg-log-entries 100000000' + ) + + # kludge to make sure they get a map + rados(ctx, mon, ['-p', 'data', 'put', 'dummy', dummyfile]) + + # create old objects + for f in range(1, 10): + rados(ctx, mon, ['-p', 'data', 'put', 'existing_%d' % f, dummyfile]) + + manager.mark_out_osd(3) + manager.wait_till_active() + + manager.mark_in_osd(0) + manager.wait_till_active() + + manager.flush_pg_stats([2, 0]) + + manager.mark_out_osd(2) + manager.wait_till_active() + + # bring up 1 + manager.mark_in_osd(1) + manager.wait_till_active() + + manager.flush_pg_stats([0, 1]) + log.info("Getting unfound objects") + unfound = manager.get_num_unfound_objects() + assert not unfound + + manager.kill_osd(2) + manager.mark_down_osd(2) + manager.kill_osd(3) + manager.mark_down_osd(3) + + manager.flush_pg_stats([0, 1]) + log.info("Getting unfound objects") + unfound = manager.get_num_unfound_objects() + assert unfound diff --git a/qa/tasks/omapbench.py b/qa/tasks/omapbench.py new file mode 100644 index 00000000..af0793d9 --- /dev/null +++ b/qa/tasks/omapbench.py @@ -0,0 +1,85 @@ +""" +Run omapbench executable within teuthology +""" +import contextlib +import logging + +import six + +from teuthology.orchestra import run +from teuthology import misc as teuthology + +log = logging.getLogger(__name__) + +@contextlib.contextmanager +def task(ctx, config): + """ + Run omapbench + + The config should be as follows:: + + omapbench: + clients: [client list] + threads: + objects: + entries: + keysize: + valsize: + increment: + omaptype: + + example:: + + tasks: + - ceph: + - omapbench: + clients: [client.0] + threads: 30 + objects: 1000 + entries: 10 + keysize: 10 + valsize: 100 + increment: 100 + omaptype: uniform + - interactive: + """ + log.info('Beginning omapbench...') + assert isinstance(config, dict), \ + "please list clients to run on" + omapbench = {} + testdir = teuthology.get_testdir(ctx) + print(str(config.get('increment',-1))) + for role in config.get('clients', ['client.0']): + assert isinstance(role, six.string_types) + PREFIX = 'client.' + assert role.startswith(PREFIX) + id_ = role[len(PREFIX):] + (remote,) = ctx.cluster.only(role).remotes.keys() + proc = remote.run( + args=[ + "/bin/sh", "-c", + " ".join(['adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage', + 'omapbench', + '--name', role[len(PREFIX):], + '-t', str(config.get('threads', 30)), + '-o', str(config.get('objects', 1000)), + '--entries', str(config.get('entries',10)), + '--keysize', str(config.get('keysize',10)), + '--valsize', str(config.get('valsize',1000)), + '--inc', str(config.get('increment',10)), + '--omaptype', str(config.get('omaptype','uniform')) + ]).format(tdir=testdir), + ], + logger=log.getChild('omapbench.{id}'.format(id=id_)), + stdin=run.PIPE, + wait=False + ) + omapbench[id_] = proc + + try: + yield + finally: + log.info('joining omapbench') + run.wait(omapbench.values()) diff --git a/qa/tasks/openssl_keys.py b/qa/tasks/openssl_keys.py new file mode 100644 index 00000000..3cc4ed8a --- /dev/null +++ b/qa/tasks/openssl_keys.py @@ -0,0 +1,227 @@ +""" +Generates and installs a signed SSL certificate. +""" +import argparse +import logging +import os + +from teuthology import misc +from teuthology.exceptions import ConfigError +from teuthology.orchestra import run +from teuthology.task import Task + +log = logging.getLogger(__name__) + +class OpenSSLKeys(Task): + name = 'openssl_keys' + """ + Generates and installs a signed SSL certificate. + + To create a self-signed certificate: + + - openssl_keys: + # certificate name + root: # results in root.key and root.crt + + # [required] make the private key and certificate available in this client's test directory + client: client.0 + + # common name, defaults to `hostname`. chained certificates must not share a common name + cn: teuthology + + # private key type for -newkey, defaults to rsa:2048 + key-type: rsa:4096 + + # install the certificate as trusted on these clients: + install: [client.0, client.1] + + + To create a certificate signed by a ca certificate: + + - openssl_keys: + root: (self-signed certificate as above) + ... + + cert-for-client1: + client: client.1 + + # use another ssl certificate (by 'name') as the certificate authority + ca: root # --CAkey=root.key -CA=root.crt + + # embed the private key in the certificate file + embed-key: true + """ + + def __init__(self, ctx, config): + super(OpenSSLKeys, self).__init__(ctx, config) + self.certs = [] + self.installed = [] + + def setup(self): + # global dictionary allows other tasks to look up certificate paths + if not hasattr(self.ctx, 'ssl_certificates'): + self.ctx.ssl_certificates = {} + + # use testdir/ca as a working directory + self.cadir = '/'.join((misc.get_testdir(self.ctx), 'ca')) + # make sure self-signed certs get added first, they don't have 'ca' field + configs = sorted(self.config.items(), key=lambda x: 'ca' in x[1]) + for name, config in configs: + # names must be unique to avoid clobbering each others files + if name in self.ctx.ssl_certificates: + raise ConfigError('ssl: duplicate certificate name {}'.format(name)) + + # create the key and certificate + cert = self.create_cert(name, config) + + self.ctx.ssl_certificates[name] = cert + self.certs.append(cert) + + # install as trusted on the requested clients + for client in config.get('install', []): + installed = self.install_cert(cert, client) + self.installed.append(installed) + + def teardown(self): + """ + Clean up any created/installed certificate files. + """ + for cert in self.certs: + self.remove_cert(cert) + + for installed in self.installed: + self.uninstall_cert(installed) + + def create_cert(self, name, config): + """ + Create a certificate with the given configuration. + """ + cert = argparse.Namespace() + cert.name = name + cert.key_type = config.get('key-type', 'rsa:2048') + + cert.client = config.get('client', None) + if not cert.client: + raise ConfigError('ssl: missing required field "client"') + + (cert.remote,) = self.ctx.cluster.only(cert.client).remotes.keys() + + cert.remote.run(args=['mkdir', '-p', self.cadir]) + + cert.key = '{}/{}.key'.format(self.cadir, cert.name) + cert.certificate = '{}/{}.crt'.format(self.cadir, cert.name) + + # provide the common name in -subj to avoid the openssl command prompts + subject = '/CN={}'.format(config.get('cn', cert.remote.hostname)) + + # if a ca certificate is provided, use it to sign the new certificate + ca = config.get('ca', None) + if ca: + # the ca certificate must have been created by a prior ssl task + ca_cert = self.ctx.ssl_certificates.get(ca, None) + if not ca_cert: + raise ConfigError('ssl: ca {} not found for certificate {}' + .format(ca, cert.name)) + + # these commands are run on the ca certificate's client because + # they need access to its private key and cert + + # generate a private key and signing request + csr = '{}/{}.csr'.format(self.cadir, cert.name) + ca_cert.remote.run(args=['openssl', 'req', '-nodes', + '-newkey', cert.key_type, '-keyout', cert.key, + '-out', csr, '-subj', subject]) + + # create the signed certificate + ca_cert.remote.run(args=['openssl', 'x509', '-req', '-in', csr, + '-CA', ca_cert.certificate, '-CAkey', ca_cert.key, '-CAcreateserial', + '-out', cert.certificate, '-days', '365', '-sha256']) + + srl = '{}/{}.srl'.format(self.cadir, ca_cert.name) + ca_cert.remote.run(args=['rm', csr, srl]) # clean up the signing request and serial + + # verify the new certificate against its ca cert + ca_cert.remote.run(args=['openssl', 'verify', + '-CAfile', ca_cert.certificate, cert.certificate]) + + if cert.remote != ca_cert.remote: + # copy to remote client + self.remote_copy_file(ca_cert.remote, cert.certificate, cert.remote, cert.certificate) + self.remote_copy_file(ca_cert.remote, cert.key, cert.remote, cert.key) + # clean up the local copies + ca_cert.remote.run(args=['rm', cert.certificate, cert.key]) + # verify the remote certificate (requires ca to be in its trusted ca certificate store) + cert.remote.run(args=['openssl', 'verify', cert.certificate]) + else: + # otherwise, generate a private key and use it to self-sign a new certificate + cert.remote.run(args=['openssl', 'req', '-x509', '-nodes', + '-newkey', cert.key_type, '-keyout', cert.key, + '-days', '365', '-out', cert.certificate, '-subj', subject]) + + if config.get('embed-key', False): + # append the private key to the certificate file + cert.remote.run(args=['cat', cert.key, run.Raw('>>'), cert.certificate]) + + return cert + + def remove_cert(self, cert): + """ + Delete all of the files associated with the given certificate. + """ + # remove the private key and certificate + cert.remote.run(args=['rm', '-f', cert.certificate, cert.key]) + + # remove ca subdirectory if it's empty + cert.remote.run(args=['rmdir', '--ignore-fail-on-non-empty', self.cadir]) + + def install_cert(self, cert, client): + """ + Install as a trusted ca certificate on the given client. + """ + (remote,) = self.ctx.cluster.only(client).remotes.keys() + + installed = argparse.Namespace() + installed.remote = remote + + if remote.os.package_type == 'deb': + installed.path = '/usr/local/share/ca-certificates/{}.crt'.format(cert.name) + installed.command = ['sudo', 'update-ca-certificates'] + else: + installed.path = '/usr/share/pki/ca-trust-source/anchors/{}.crt'.format(cert.name) + installed.command = ['sudo', 'update-ca-trust'] + + cp_or_mv = 'cp' + if remote != cert.remote: + # copy into remote cadir (with mkdir if necessary) + remote.run(args=['mkdir', '-p', self.cadir]) + self.remote_copy_file(cert.remote, cert.certificate, remote, cert.certificate) + cp_or_mv = 'mv' # move this remote copy into the certificate store + + # install into certificate store as root + remote.run(args=['sudo', cp_or_mv, cert.certificate, installed.path]) + remote.run(args=installed.command) + + return installed + + def uninstall_cert(self, installed): + """ + Uninstall a certificate from the trusted certificate store. + """ + installed.remote.run(args=['sudo', 'rm', installed.path]) + installed.remote.run(args=installed.command) + + def remote_copy_file(self, from_remote, from_path, to_remote, to_path): + """ + Copies a file from one remote to another. + + The remotes don't have public-key auth for 'scp' or misc.copy_file(), + so this copies through an intermediate local tmp file. + """ + log.info('copying from {}:{} to {}:{}...'.format(from_remote, from_path, to_remote, to_path)) + local_path = from_remote.get_file(from_path) + try: + to_remote.put_file(local_path, to_path) + finally: + os.remove(local_path) + +task = OpenSSLKeys diff --git a/qa/tasks/osd_backfill.py b/qa/tasks/osd_backfill.py new file mode 100644 index 00000000..b33e1c91 --- /dev/null +++ b/qa/tasks/osd_backfill.py @@ -0,0 +1,104 @@ +""" +Osd backfill test +""" +import logging +import time +from tasks import ceph_manager +from teuthology import misc as teuthology + + +log = logging.getLogger(__name__) + + +def rados_start(ctx, remote, cmd): + """ + Run a remote rados command (currently used to only write data) + """ + log.info("rados %s" % ' '.join(cmd)) + testdir = teuthology.get_testdir(ctx) + pre = [ + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'rados', + ]; + pre.extend(cmd) + proc = remote.run( + args=pre, + wait=False, + ) + return proc + +def task(ctx, config): + """ + Test backfill + """ + if config is None: + config = {} + assert isinstance(config, dict), \ + 'thrashosds task only accepts a dict for configuration' + first_mon = teuthology.get_first_mon(ctx, config) + (mon,) = ctx.cluster.only(first_mon).remotes.keys() + + num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd') + log.info('num_osds is %s' % num_osds) + assert num_osds == 3 + + manager = ceph_manager.CephManager( + mon, + ctx=ctx, + logger=log.getChild('ceph_manager'), + ) + + while len(manager.get_osd_status()['up']) < 3: + time.sleep(10) + manager.flush_pg_stats([0, 1, 2]) + manager.wait_for_clean() + + # write some data + p = rados_start(ctx, mon, ['-p', 'rbd', 'bench', '15', 'write', '-b', '4096', + '--no-cleanup']) + err = p.wait() + log.info('err is %d' % err) + + # mark osd.0 out to trigger a rebalance/backfill + manager.mark_out_osd(0) + + # also mark it down to it won't be included in pg_temps + manager.kill_osd(0) + manager.mark_down_osd(0) + + # wait for everything to peer and be happy... + manager.flush_pg_stats([1, 2]) + manager.wait_for_recovery() + + # write some new data + p = rados_start(ctx, mon, ['-p', 'rbd', 'bench', '30', 'write', '-b', '4096', + '--no-cleanup']) + + time.sleep(15) + + # blackhole + restart osd.1 + # this triggers a divergent backfill target + manager.blackhole_kill_osd(1) + time.sleep(2) + manager.revive_osd(1) + + # wait for our writes to complete + succeed + err = p.wait() + log.info('err is %d' % err) + + # wait for osd.1 and osd.2 to be up + manager.wait_till_osd_is_up(1) + manager.wait_till_osd_is_up(2) + + # cluster must recover + manager.flush_pg_stats([1, 2]) + manager.wait_for_recovery() + + # re-add osd.0 + manager.revive_osd(0) + manager.flush_pg_stats([1, 2]) + manager.wait_for_clean() + + diff --git a/qa/tasks/osd_failsafe_enospc.py b/qa/tasks/osd_failsafe_enospc.py new file mode 100644 index 00000000..4b2cdb98 --- /dev/null +++ b/qa/tasks/osd_failsafe_enospc.py @@ -0,0 +1,219 @@ +""" +Handle osdfailsafe configuration settings (nearfull ratio and full ratio) +""" +from io import BytesIO +import logging +import six +import time + +from teuthology.orchestra import run +from tasks.util.rados import rados +from teuthology import misc as teuthology + +log = logging.getLogger(__name__) + +def task(ctx, config): + """ + Test handling of osd_failsafe_nearfull_ratio and osd_failsafe_full_ratio + configuration settings + + In order for test to pass must use log-whitelist as follows + + tasks: + - chef: + - install: + - ceph: + log-whitelist: ['OSD near full', 'OSD full dropping all updates'] + - osd_failsafe_enospc: + + """ + if config is None: + config = {} + assert isinstance(config, dict), \ + 'osd_failsafe_enospc task only accepts a dict for configuration' + + # Give 2 seconds for injectargs + osd_op_complaint_time (30) + 2 * osd_heartbeat_interval (6) + 6 padding + sleep_time = 50 + + # something that is always there + dummyfile = '/etc/fstab' + dummyfile2 = '/etc/resolv.conf' + + manager = ctx.managers['ceph'] + + # create 1 pg pool with 1 rep which can only be on osd.0 + osds = manager.get_osd_dump() + for osd in osds: + if osd['osd'] != 0: + manager.mark_out_osd(osd['osd']) + + log.info('creating pool foo') + manager.create_pool("foo") + manager.raw_cluster_cmd('osd', 'pool', 'set', 'foo', 'size', '1') + + # State NONE -> NEAR + log.info('1. Verify warning messages when exceeding nearfull_ratio') + + first_mon = teuthology.get_first_mon(ctx, config) + (mon,) = ctx.cluster.only(first_mon).remotes.keys() + + proc = mon.run( + args=[ + 'sudo', + 'daemon-helper', + 'kill', + 'ceph', '-w' + ], + stdin=run.PIPE, + stdout=BytesIO(), + wait=False, + ) + + manager.raw_cluster_cmd('tell', 'osd.0', 'injectargs', '--osd_failsafe_nearfull_ratio .00001') + + time.sleep(sleep_time) + proc.stdin.close() # causes daemon-helper send SIGKILL to ceph -w + proc.wait() + + lines = six.ensure_str(proc.stdout.getvalue()).split('\n') + + count = len(filter(lambda line: '[WRN] OSD near full' in line, lines)) + assert count == 2, 'Incorrect number of warning messages expected 2 got %d' % count + count = len(filter(lambda line: '[ERR] OSD full dropping all updates' in line, lines)) + assert count == 0, 'Incorrect number of error messages expected 0 got %d' % count + + # State NEAR -> FULL + log.info('2. Verify error messages when exceeding full_ratio') + + proc = mon.run( + args=[ + 'sudo', + 'daemon-helper', + 'kill', + 'ceph', '-w' + ], + stdin=run.PIPE, + stdout=BytesIO(), + wait=False, + ) + + manager.raw_cluster_cmd('tell', 'osd.0', 'injectargs', '--osd_failsafe_full_ratio .00001') + + time.sleep(sleep_time) + proc.stdin.close() # causes daemon-helper send SIGKILL to ceph -w + proc.wait() + + lines = six.ensure_str(proc.stdout.getvalue()).split('\n') + + count = len(filter(lambda line: '[ERR] OSD full dropping all updates' in line, lines)) + assert count == 2, 'Incorrect number of error messages expected 2 got %d' % count + + log.info('3. Verify write failure when exceeding full_ratio') + + # Write data should fail + ret = rados(ctx, mon, ['-p', 'foo', 'put', 'newfile1', dummyfile]) + assert ret != 0, 'Expected write failure but it succeeded with exit status 0' + + # Put back default + manager.raw_cluster_cmd('tell', 'osd.0', 'injectargs', '--osd_failsafe_full_ratio .97') + time.sleep(10) + + # State FULL -> NEAR + log.info('4. Verify write success when NOT exceeding full_ratio') + + # Write should succeed + ret = rados(ctx, mon, ['-p', 'foo', 'put', 'newfile2', dummyfile2]) + assert ret == 0, 'Expected write to succeed, but got exit status %d' % ret + + log.info('5. Verify warning messages again when exceeding nearfull_ratio') + + proc = mon.run( + args=[ + 'sudo', + 'daemon-helper', + 'kill', + 'ceph', '-w' + ], + stdin=run.PIPE, + stdout=BytesIO(), + wait=False, + ) + + time.sleep(sleep_time) + proc.stdin.close() # causes daemon-helper send SIGKILL to ceph -w + proc.wait() + + lines = six.ensure_str(proc.stdout.getvalue()).split('\n') + + count = len(filter(lambda line: '[WRN] OSD near full' in line, lines)) + assert count == 1 or count == 2, 'Incorrect number of warning messages expected 1 or 2 got %d' % count + count = len(filter(lambda line: '[ERR] OSD full dropping all updates' in line, lines)) + assert count == 0, 'Incorrect number of error messages expected 0 got %d' % count + + manager.raw_cluster_cmd('tell', 'osd.0', 'injectargs', '--osd_failsafe_nearfull_ratio .90') + time.sleep(10) + + # State NONE -> FULL + log.info('6. Verify error messages again when exceeding full_ratio') + + proc = mon.run( + args=[ + 'sudo', + 'daemon-helper', + 'kill', + 'ceph', '-w' + ], + stdin=run.PIPE, + stdout=BytesIO(), + wait=False, + ) + + manager.raw_cluster_cmd('tell', 'osd.0', 'injectargs', '--osd_failsafe_full_ratio .00001') + + time.sleep(sleep_time) + proc.stdin.close() # causes daemon-helper send SIGKILL to ceph -w + proc.wait() + + lines = six.ensure_str(proc.stdout.getvalue()).split('\n') + + count = len(filter(lambda line: '[WRN] OSD near full' in line, lines)) + assert count == 0, 'Incorrect number of warning messages expected 0 got %d' % count + count = len(filter(lambda line: '[ERR] OSD full dropping all updates' in line, lines)) + assert count == 2, 'Incorrect number of error messages expected 2 got %d' % count + + # State FULL -> NONE + log.info('7. Verify no messages settings back to default') + + manager.raw_cluster_cmd('tell', 'osd.0', 'injectargs', '--osd_failsafe_full_ratio .97') + time.sleep(10) + + proc = mon.run( + args=[ + 'sudo', + 'daemon-helper', + 'kill', + 'ceph', '-w' + ], + stdin=run.PIPE, + stdout=BytesIO(), + wait=False, + ) + + time.sleep(sleep_time) + proc.stdin.close() # causes daemon-helper send SIGKILL to ceph -w + proc.wait() + + lines = six.ensure_str(proc.stdout.getvalue()).split('\n') + + count = len(filter(lambda line: '[WRN] OSD near full' in line, lines)) + assert count == 0, 'Incorrect number of warning messages expected 0 got %d' % count + count = len(filter(lambda line: '[ERR] OSD full dropping all updates' in line, lines)) + assert count == 0, 'Incorrect number of error messages expected 0 got %d' % count + + log.info('Test Passed') + + # Bring all OSDs back in + manager.remove_pool("foo") + for osd in osds: + if osd['osd'] != 0: + manager.mark_in_osd(osd['osd']) diff --git a/qa/tasks/osd_max_pg_per_osd.py b/qa/tasks/osd_max_pg_per_osd.py new file mode 100644 index 00000000..6680fe6e --- /dev/null +++ b/qa/tasks/osd_max_pg_per_osd.py @@ -0,0 +1,126 @@ +import logging +import random + + +log = logging.getLogger(__name__) + + +def pg_num_in_all_states(pgs, *states): + return sum(1 for state in pgs.values() + if all(s in state for s in states)) + + +def pg_num_in_any_state(pgs, *states): + return sum(1 for state in pgs.values() + if any(s in state for s in states)) + + +def test_create_from_mon(ctx, config): + """ + osd should stop creating new pools if the number of pg it servers + exceeds the max-pg-per-osd setting, and it should resume the previously + suspended pg creations once the its pg number drops down below the setting + How it works:: + 1. set the hard limit of pg-per-osd to "2" + 2. create pool.a with pg_num=2 + # all pgs should be active+clean + 2. create pool.b with pg_num=2 + # new pgs belonging to this pool should be unknown (the primary osd + reaches the limit) or creating (replica osd reaches the limit) + 3. remove pool.a + 4. all pg belonging to pool.b should be active+clean + """ + pg_num = config.get('pg_num', 2) + manager = ctx.managers['ceph'] + log.info('1. creating pool.a') + pool_a = manager.create_pool_with_unique_name(pg_num) + pg_states = manager.wait_till_pg_convergence(300) + pg_created = pg_num_in_all_states(pg_states, 'active', 'clean') + assert pg_created == pg_num + + log.info('2. creating pool.b') + pool_b = manager.create_pool_with_unique_name(pg_num) + pg_states = manager.wait_till_pg_convergence(300) + pg_created = pg_num_in_all_states(pg_states, 'active', 'clean') + assert pg_created == pg_num + pg_pending = pg_num_in_any_state(pg_states, 'unknown', 'creating') + assert pg_pending == pg_num + + log.info('3. removing pool.a') + manager.remove_pool(pool_a) + pg_states = manager.wait_till_pg_convergence(300) + assert len(pg_states) == pg_num + pg_created = pg_num_in_all_states(pg_states, 'active', 'clean') + assert pg_created == pg_num + + # cleanup + manager.remove_pool(pool_b) + + +def test_create_from_peer(ctx, config): + """ + osd should stop creating new pools if the number of pg it servers + exceeds the max-pg-per-osd setting, and it should resume the previously + suspended pg creations once the its pg number drops down below the setting + + How it works:: + 0. create 4 OSDs. + 1. create pool.a with pg_num=1, size=2 + pg will be mapped to osd.0, and osd.1, and it should be active+clean + 2. create pool.b with pg_num=1, size=2. + if the pgs stuck in creating, delete the pool since the pool and try + again, eventually we'll get the pool to land on the other 2 osds that + aren't occupied by pool.a. (this will also verify that pgs for deleted + pools get cleaned out of the creating wait list.) + 3. mark an osd out. verify that some pgs get stuck stale or peering. + 4. delete a pool, verify pgs go active. + """ + pg_num = config.get('pg_num', 1) + from_primary = config.get('from_primary', True) + + manager = ctx.managers['ceph'] + log.info('1. creating pool.a') + pool_a = manager.create_pool_with_unique_name(pg_num) + pg_states = manager.wait_till_pg_convergence(300) + pg_created = pg_num_in_all_states(pg_states, 'active', 'clean') + assert pg_created == pg_num + + log.info('2. creating pool.b') + while True: + pool_b = manager.create_pool_with_unique_name(pg_num) + pg_states = manager.wait_till_pg_convergence(300) + pg_created = pg_num_in_all_states(pg_states, 'active', 'clean') + assert pg_created >= pg_num + pg_pending = pg_num_in_any_state(pg_states, 'unknown', 'creating') + assert pg_pending == pg_num * 2 - pg_created + if pg_created == pg_num * 2: + break + manager.remove_pool(pool_b) + + log.info('3. mark an osd out') + pg_stats = manager.get_pg_stats() + pg = random.choice(pg_stats) + if from_primary: + victim = pg['acting'][-1] + else: + victim = pg['acting'][0] + manager.mark_out_osd(victim) + pg_states = manager.wait_till_pg_convergence(300) + pg_stuck = pg_num_in_any_state(pg_states, 'activating', 'stale', 'peering') + assert pg_stuck > 0 + + log.info('4. removing pool.b') + manager.remove_pool(pool_b) + manager.wait_for_clean(30) + + # cleanup + manager.remove_pool(pool_a) + + +def task(ctx, config): + assert isinstance(config, dict), \ + 'osd_max_pg_per_osd task only accepts a dict for config' + if config.get('test_create_from_mon', True): + test_create_from_mon(ctx, config) + else: + test_create_from_peer(ctx, config) diff --git a/qa/tasks/osd_recovery.py b/qa/tasks/osd_recovery.py new file mode 100644 index 00000000..b0623c21 --- /dev/null +++ b/qa/tasks/osd_recovery.py @@ -0,0 +1,193 @@ +""" +osd recovery +""" +import logging +import time +from tasks import ceph_manager +from teuthology import misc as teuthology + + +log = logging.getLogger(__name__) + + +def rados_start(testdir, remote, cmd): + """ + Run a remote rados command (currently used to only write data) + """ + log.info("rados %s" % ' '.join(cmd)) + pre = [ + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'rados', + ]; + pre.extend(cmd) + proc = remote.run( + args=pre, + wait=False, + ) + return proc + +def task(ctx, config): + """ + Test (non-backfill) recovery + """ + if config is None: + config = {} + assert isinstance(config, dict), \ + 'task only accepts a dict for configuration' + testdir = teuthology.get_testdir(ctx) + first_mon = teuthology.get_first_mon(ctx, config) + (mon,) = ctx.cluster.only(first_mon).remotes.keys() + + num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd') + log.info('num_osds is %s' % num_osds) + assert num_osds == 3 + + manager = ceph_manager.CephManager( + mon, + ctx=ctx, + logger=log.getChild('ceph_manager'), + ) + + while len(manager.get_osd_status()['up']) < 3: + time.sleep(10) + manager.flush_pg_stats([0, 1, 2]) + manager.wait_for_clean() + + # test some osdmap flags + manager.raw_cluster_cmd('osd', 'set', 'noin') + manager.raw_cluster_cmd('osd', 'set', 'noout') + manager.raw_cluster_cmd('osd', 'set', 'noup') + manager.raw_cluster_cmd('osd', 'set', 'nodown') + manager.raw_cluster_cmd('osd', 'unset', 'noin') + manager.raw_cluster_cmd('osd', 'unset', 'noout') + manager.raw_cluster_cmd('osd', 'unset', 'noup') + manager.raw_cluster_cmd('osd', 'unset', 'nodown') + + # write some new data + p = rados_start(testdir, mon, ['-p', 'rbd', 'bench', '20', 'write', '-b', '4096', + '--no-cleanup']) + + time.sleep(15) + + # trigger a divergent target: + # blackhole + restart osd.1 (shorter log) + manager.blackhole_kill_osd(1) + # kill osd.2 (longer log... we'll make it divergent below) + manager.kill_osd(2) + time.sleep(2) + manager.revive_osd(1) + + # wait for our writes to complete + succeed + err = p.wait() + log.info('err is %d' % err) + + # cluster must repeer + manager.flush_pg_stats([0, 1]) + manager.wait_for_active_or_down() + + # write some more (make sure osd.2 really is divergent) + p = rados_start(testdir, mon, ['-p', 'rbd', 'bench', '15', 'write', '-b', '4096']) + p.wait() + + # revive divergent osd + manager.revive_osd(2) + + while len(manager.get_osd_status()['up']) < 3: + log.info('waiting a bit...') + time.sleep(2) + log.info('3 are up!') + + # cluster must recover + manager.flush_pg_stats([0, 1, 2]) + manager.wait_for_clean() + + +def test_incomplete_pgs(ctx, config): + """ + Test handling of incomplete pgs. Requires 4 osds. + """ + testdir = teuthology.get_testdir(ctx) + if config is None: + config = {} + assert isinstance(config, dict), \ + 'task only accepts a dict for configuration' + first_mon = teuthology.get_first_mon(ctx, config) + (mon,) = ctx.cluster.only(first_mon).remotes.keys() + + num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd') + log.info('num_osds is %s' % num_osds) + assert num_osds == 4 + + manager = ceph_manager.CephManager( + mon, + ctx=ctx, + logger=log.getChild('ceph_manager'), + ) + + while len(manager.get_osd_status()['up']) < 4: + time.sleep(10) + + manager.flush_pg_stats([0, 1, 2, 3]) + manager.wait_for_clean() + + log.info('Testing incomplete pgs...') + + for i in range(4): + manager.set_config( + i, + osd_recovery_delay_start=1000) + + # move data off of osd.0, osd.1 + manager.raw_cluster_cmd('osd', 'out', '0', '1') + manager.flush_pg_stats([0, 1, 2, 3], [0, 1]) + manager.wait_for_clean() + + # lots of objects in rbd (no pg log, will backfill) + p = rados_start(testdir, mon, + ['-p', 'rbd', 'bench', '20', 'write', '-b', '1', + '--no-cleanup']) + p.wait() + + # few objects in rbd pool (with pg log, normal recovery) + for f in range(1, 20): + p = rados_start(testdir, mon, ['-p', 'rbd', 'put', + 'foo.%d' % f, '/etc/passwd']) + p.wait() + + # move it back + manager.raw_cluster_cmd('osd', 'in', '0', '1') + manager.raw_cluster_cmd('osd', 'out', '2', '3') + time.sleep(10) + manager.flush_pg_stats([0, 1, 2, 3], [2, 3]) + time.sleep(10) + manager.wait_for_active() + + assert not manager.is_clean() + assert not manager.is_recovered() + + # kill 2 + 3 + log.info('stopping 2,3') + manager.kill_osd(2) + manager.kill_osd(3) + log.info('...') + manager.raw_cluster_cmd('osd', 'down', '2', '3') + manager.flush_pg_stats([0, 1]) + manager.wait_for_active_or_down() + + assert manager.get_num_down() > 0 + + # revive 2 + 3 + manager.revive_osd(2) + manager.revive_osd(3) + while len(manager.get_osd_status()['up']) < 4: + log.info('waiting a bit...') + time.sleep(2) + log.info('all are up!') + + for i in range(4): + manager.kick_recovery_wq(i) + + # cluster must recover + manager.wait_for_clean() diff --git a/qa/tasks/peer.py b/qa/tasks/peer.py new file mode 100644 index 00000000..6b19096b --- /dev/null +++ b/qa/tasks/peer.py @@ -0,0 +1,90 @@ +""" +Peer test (Single test, not much configurable here) +""" +import logging +import json +import time + +from tasks import ceph_manager +from tasks.util.rados import rados +from teuthology import misc as teuthology + +log = logging.getLogger(__name__) + +def task(ctx, config): + """ + Test peering. + """ + if config is None: + config = {} + assert isinstance(config, dict), \ + 'peer task only accepts a dict for configuration' + first_mon = teuthology.get_first_mon(ctx, config) + (mon,) = ctx.cluster.only(first_mon).remotes.keys() + + manager = ceph_manager.CephManager( + mon, + ctx=ctx, + logger=log.getChild('ceph_manager'), + ) + + while len(manager.get_osd_status()['up']) < 3: + time.sleep(10) + manager.flush_pg_stats([0, 1, 2]) + manager.wait_for_clean() + + for i in range(3): + manager.set_config( + i, + osd_recovery_delay_start=120) + + # take on osd down + manager.kill_osd(2) + manager.mark_down_osd(2) + + # kludge to make sure they get a map + rados(ctx, mon, ['-p', 'data', 'get', 'dummy', '-']) + + manager.flush_pg_stats([0, 1]) + manager.wait_for_recovery() + + # kill another and revive 2, so that some pgs can't peer. + manager.kill_osd(1) + manager.mark_down_osd(1) + manager.revive_osd(2) + manager.wait_till_osd_is_up(2) + + manager.flush_pg_stats([0, 2]) + + manager.wait_for_active_or_down() + + manager.flush_pg_stats([0, 2]) + + # look for down pgs + num_down_pgs = 0 + pgs = manager.get_pg_stats() + for pg in pgs: + out = manager.raw_cluster_cmd('pg', pg['pgid'], 'query') + log.debug("out string %s",out) + j = json.loads(out) + log.info("pg is %s, query json is %s", pg, j) + + if pg['state'].count('down'): + num_down_pgs += 1 + # verify that it is blocked on osd.1 + rs = j['recovery_state'] + assert len(rs) >= 2 + assert rs[0]['name'] == 'Started/Primary/Peering/Down' + assert rs[1]['name'] == 'Started/Primary/Peering' + assert rs[1]['blocked'] + assert rs[1]['down_osds_we_would_probe'] == [1] + assert len(rs[1]['peering_blocked_by']) == 1 + assert rs[1]['peering_blocked_by'][0]['osd'] == 1 + + assert num_down_pgs > 0 + + # bring it all back + manager.revive_osd(1) + manager.wait_till_osd_is_up(1) + manager.flush_pg_stats([0, 1, 2]) + manager.wait_for_clean() diff --git a/qa/tasks/peering_speed_test.py b/qa/tasks/peering_speed_test.py new file mode 100644 index 00000000..9dc65836 --- /dev/null +++ b/qa/tasks/peering_speed_test.py @@ -0,0 +1,87 @@ +""" +Remotely run peering tests. +""" +import logging +import time + +log = logging.getLogger(__name__) + +from teuthology.task.args import argify + +POOLNAME = "POOLNAME" +ARGS = [ + ('num_pgs', 'number of pgs to create', 256, int), + ('max_time', 'seconds to complete peering', 0, int), + ('runs', 'trials to run', 10, int), + ('num_objects', 'objects to create', 256 * 1024, int), + ('object_size', 'size in bytes for objects', 64, int), + ('creation_time_limit', 'time limit for pool population', 60*60, int), + ('create_threads', 'concurrent writes for create', 256, int) + ] + +def setup(ctx, config): + """ + Setup peering test on remotes. + """ + manager = ctx.managers['ceph'] + manager.clear_pools() + manager.create_pool(POOLNAME, config.num_pgs) + log.info("populating pool") + manager.rados_write_objects( + POOLNAME, + config.num_objects, + config.object_size, + config.creation_time_limit, + config.create_threads) + log.info("done populating pool") + +def do_run(ctx, config): + """ + Perform the test. + """ + start = time.time() + # mark in osd + manager = ctx.managers['ceph'] + manager.mark_in_osd(0) + log.info("writing out objects") + manager.rados_write_objects( + POOLNAME, + config.num_pgs, # write 1 object per pg or so + 1, + config.creation_time_limit, + config.num_pgs, # lots of concurrency + cleanup = True) + peering_end = time.time() + + log.info("peering done, waiting on recovery") + manager.wait_for_clean() + + log.info("recovery done") + recovery_end = time.time() + if config.max_time: + assert(peering_end - start < config.max_time) + manager.mark_out_osd(0) + manager.wait_for_clean() + return { + 'time_to_active': peering_end - start, + 'time_to_clean': recovery_end - start + } + +@argify("peering_speed_test", ARGS) +def task(ctx, config): + """ + Peering speed test + """ + setup(ctx, config) + manager = ctx.managers['ceph'] + manager.mark_out_osd(0) + manager.wait_for_clean() + ret = [] + for i in range(config.runs): + log.info("Run {i}".format(i = i)) + ret.append(do_run(ctx, config)) + + manager.mark_in_osd(0) + ctx.summary['recovery_times'] = { + 'runs': ret + } diff --git a/qa/tasks/populate_rbd_pool.py b/qa/tasks/populate_rbd_pool.py new file mode 100644 index 00000000..76395eb6 --- /dev/null +++ b/qa/tasks/populate_rbd_pool.py @@ -0,0 +1,82 @@ +""" +Populate rbd pools +""" +import contextlib +import logging + +log = logging.getLogger(__name__) + +@contextlib.contextmanager +def task(ctx, config): + """ + Populate pools with prefix with + rbd images at snaps + + The config could be as follows:: + + populate_rbd_pool: + client: + pool_prefix: foo + num_pools: 5 + num_images: 10 + num_snaps: 3 + image_size: 10737418240 + """ + if config is None: + config = {} + client = config.get("client", "client.0") + pool_prefix = config.get("pool_prefix", "foo") + num_pools = config.get("num_pools", 2) + num_images = config.get("num_images", 20) + num_snaps = config.get("num_snaps", 4) + image_size = config.get("image_size", 100) + write_size = config.get("write_size", 1024*1024) + write_threads = config.get("write_threads", 10) + write_total_per_snap = config.get("write_total_per_snap", 1024*1024*30) + + (remote,) = ctx.cluster.only(client).remotes.keys() + + for poolid in range(num_pools): + poolname = "%s-%s" % (pool_prefix, str(poolid)) + log.info("Creating pool %s" % (poolname,)) + ctx.managers['ceph'].create_pool(poolname) + for imageid in range(num_images): + imagename = "rbd-%s" % (str(imageid),) + log.info("Creating imagename %s" % (imagename,)) + remote.run( + args = [ + "rbd", + "create", + imagename, + "--image-format", "1", + "--size", str(image_size), + "--pool", str(poolname)]) + def bench_run(): + remote.run( + args = [ + "rbd", + "bench-write", + imagename, + "--pool", poolname, + "--io-size", str(write_size), + "--io-threads", str(write_threads), + "--io-total", str(write_total_per_snap), + "--io-pattern", "rand"]) + log.info("imagename %s first bench" % (imagename,)) + bench_run() + for snapid in range(num_snaps): + snapname = "snap-%s" % (str(snapid),) + log.info("imagename %s creating snap %s" % (imagename, snapname)) + remote.run( + args = [ + "rbd", "snap", "create", + "--pool", poolname, + "--snap", snapname, + imagename + ]) + bench_run() + + try: + yield + finally: + log.info('done') diff --git a/qa/tasks/qemu.py b/qa/tasks/qemu.py new file mode 100644 index 00000000..b24ecece --- /dev/null +++ b/qa/tasks/qemu.py @@ -0,0 +1,580 @@ +""" +Qemu task +""" + +import contextlib +import logging +import os +import yaml +import time + +from tasks import rbd +from tasks.util.workunit import get_refspec_after_overrides +from teuthology import contextutil +from teuthology import misc as teuthology +from teuthology.config import config as teuth_config +from teuthology.orchestra import run + +log = logging.getLogger(__name__) + +DEFAULT_NUM_DISKS = 2 +DEFAULT_IMAGE_URL = 'http://download.ceph.com/qa/ubuntu-12.04.qcow2' +DEFAULT_IMAGE_SIZE = 10240 # in megabytes +DEFAULT_CPUS = 1 +DEFAULT_MEM = 4096 # in megabytes + +def create_images(ctx, config, managers): + for client, client_config in config.items(): + disks = client_config.get('disks', DEFAULT_NUM_DISKS) + if not isinstance(disks, list): + disks = [{} for n in range(int(disks))] + clone = client_config.get('clone', False) + assert disks, 'at least one rbd device must be used' + for i, disk in enumerate(disks[1:]): + create_config = { + client: { + 'image_name': '{client}.{num}'.format(client=client, + num=i + 1), + 'image_format': 2 if clone else 1, + 'image_size': (disk or {}).get('image_size', + DEFAULT_IMAGE_SIZE), + } + } + managers.append( + lambda create_config=create_config: + rbd.create_image(ctx=ctx, config=create_config) + ) + +def create_clones(ctx, config, managers): + for client, client_config in config.items(): + clone = client_config.get('clone', False) + if clone: + num_disks = client_config.get('disks', DEFAULT_NUM_DISKS) + if isinstance(num_disks, list): + num_disks = len(num_disks) + for i in range(num_disks): + create_config = { + client: { + 'image_name': + '{client}.{num}-clone'.format(client=client, num=i), + 'parent_name': + '{client}.{num}'.format(client=client, num=i), + } + } + managers.append( + lambda create_config=create_config: + rbd.clone_image(ctx=ctx, config=create_config) + ) + +@contextlib.contextmanager +def create_dirs(ctx, config): + """ + Handle directory creation and cleanup + """ + testdir = teuthology.get_testdir(ctx) + for client, client_config in config.items(): + assert 'test' in client_config, 'You must specify a test to run' + (remote,) = ctx.cluster.only(client).remotes.keys() + remote.run( + args=[ + 'install', '-d', '-m0755', '--', + '{tdir}/qemu'.format(tdir=testdir), + '{tdir}/archive/qemu'.format(tdir=testdir), + ] + ) + try: + yield + finally: + for client, client_config in config.items(): + assert 'test' in client_config, 'You must specify a test to run' + (remote,) = ctx.cluster.only(client).remotes.keys() + remote.run( + args=[ + 'rmdir', '{tdir}/qemu'.format(tdir=testdir), run.Raw('||'), 'true', + ] + ) + +@contextlib.contextmanager +def generate_iso(ctx, config): + """Execute system commands to generate iso""" + log.info('generating iso...') + testdir = teuthology.get_testdir(ctx) + + # use ctx.config instead of config, because config has been + # through teuthology.replace_all_with_clients() + refspec = get_refspec_after_overrides(ctx.config, {}) + + git_url = teuth_config.get_ceph_qa_suite_git_url() + log.info('Pulling tests from %s ref %s', git_url, refspec) + + for client, client_config in config.items(): + assert 'test' in client_config, 'You must specify a test to run' + test = client_config['test'] + + (remote,) = ctx.cluster.only(client).remotes.keys() + + clone_dir = '{tdir}/qemu_clone.{role}'.format(tdir=testdir, role=client) + remote.run(args=refspec.clone(git_url, clone_dir)) + + src_dir = os.path.dirname(__file__) + userdata_path = os.path.join(testdir, 'qemu', 'userdata.' + client) + metadata_path = os.path.join(testdir, 'qemu', 'metadata.' + client) + + with open(os.path.join(src_dir, 'userdata_setup.yaml')) as f: + test_setup = ''.join(f.readlines()) + # configuring the commands to setup the nfs mount + mnt_dir = "/export/{client}".format(client=client) + test_setup = test_setup.format( + mnt_dir=mnt_dir + ) + + with open(os.path.join(src_dir, 'userdata_teardown.yaml')) as f: + test_teardown = ''.join(f.readlines()) + + user_data = test_setup + if client_config.get('type', 'filesystem') == 'filesystem': + num_disks = client_config.get('disks', DEFAULT_NUM_DISKS) + if isinstance(num_disks, list): + num_disks = len(num_disks) + for i in range(1, num_disks): + dev_letter = chr(ord('a') + i) + user_data += """ +- | + #!/bin/bash + mkdir /mnt/test_{dev_letter} + mkfs -t xfs /dev/vd{dev_letter} + mount -t xfs /dev/vd{dev_letter} /mnt/test_{dev_letter} +""".format(dev_letter=dev_letter) + + user_data += """ +- | + #!/bin/bash + test -d /etc/ceph || mkdir /etc/ceph + cp /mnt/cdrom/ceph.* /etc/ceph/ +""" + + cloud_config_archive = client_config.get('cloud_config_archive', []) + if cloud_config_archive: + user_data += yaml.safe_dump(cloud_config_archive, default_style='|', + default_flow_style=False) + + # this may change later to pass the directories as args to the + # script or something. xfstests needs that. + user_data += """ +- | + #!/bin/bash + test -d /mnt/test_b && cd /mnt/test_b + /mnt/cdrom/test.sh > /mnt/log/test.log 2>&1 && touch /mnt/log/success +""" + test_teardown + + user_data = user_data.format( + ceph_branch=ctx.config.get('branch'), + ceph_sha1=ctx.config.get('sha1')) + teuthology.write_file(remote, userdata_path, user_data) + + with open(os.path.join(src_dir, 'metadata.yaml'), 'rb') as f: + teuthology.write_file(remote, metadata_path, f) + + test_file = '{tdir}/qemu/{client}.test.sh'.format(tdir=testdir, client=client) + + log.info('fetching test %s for %s', test, client) + remote.run( + args=[ + 'cp', '--', os.path.join(clone_dir, test), test_file, + run.Raw('&&'), + 'chmod', '755', test_file, + ], + ) + remote.run( + args=[ + 'genisoimage', '-quiet', '-input-charset', 'utf-8', + '-volid', 'cidata', '-joliet', '-rock', + '-o', '{tdir}/qemu/{client}.iso'.format(tdir=testdir, client=client), + '-graft-points', + 'user-data={userdata}'.format(userdata=userdata_path), + 'meta-data={metadata}'.format(metadata=metadata_path), + 'ceph.conf=/etc/ceph/ceph.conf', + 'ceph.keyring=/etc/ceph/ceph.keyring', + 'test.sh={file}'.format(file=test_file), + ], + ) + try: + yield + finally: + for client in config.keys(): + (remote,) = ctx.cluster.only(client).remotes.keys() + remote.run( + args=[ + 'rm', '-rf', + '{tdir}/qemu/{client}.iso'.format(tdir=testdir, client=client), + os.path.join(testdir, 'qemu', 'userdata.' + client), + os.path.join(testdir, 'qemu', 'metadata.' + client), + '{tdir}/qemu/{client}.test.sh'.format(tdir=testdir, client=client), + '{tdir}/qemu_clone.{client}'.format(tdir=testdir, client=client), + ], + ) + +@contextlib.contextmanager +def download_image(ctx, config): + """Downland base image, remove image file when done""" + log.info('downloading base image') + testdir = teuthology.get_testdir(ctx) + for client, client_config in config.items(): + (remote,) = ctx.cluster.only(client).remotes.keys() + base_file = '{tdir}/qemu/base.{client}.qcow2'.format(tdir=testdir, client=client) + image_url = client_config.get('image_url', DEFAULT_IMAGE_URL) + remote.run( + args=[ + 'wget', '-nv', '-O', base_file, image_url, + ] + ) + + disks = client_config.get('disks', None) + if not isinstance(disks, list): + disks = [{}] + image_name = '{client}.0'.format(client=client) + image_size = (disks[0] or {}).get('image_size', DEFAULT_IMAGE_SIZE) + remote.run( + args=[ + 'qemu-img', 'convert', '-f', 'qcow2', '-O', 'raw', + base_file, 'rbd:rbd/{image_name}'.format(image_name=image_name) + ] + ) + remote.run( + args=[ + 'rbd', 'resize', + '--size={image_size}M'.format(image_size=image_size), + image_name, + ] + ) + try: + yield + finally: + log.debug('cleaning up base image files') + for client in config.keys(): + base_file = '{tdir}/qemu/base.{client}.qcow2'.format( + tdir=testdir, + client=client, + ) + (remote,) = ctx.cluster.only(client).remotes.keys() + remote.run( + args=[ + 'rm', '-f', base_file, + ], + ) + + +def _setup_nfs_mount(remote, client, mount_dir): + """ + Sets up an nfs mount on the remote that the guest can use to + store logs. This nfs mount is also used to touch a file + at the end of the test to indicate if the test was successful + or not. + """ + export_dir = "/export/{client}".format(client=client) + log.info("Creating the nfs export directory...") + remote.run(args=[ + 'sudo', 'mkdir', '-p', export_dir, + ]) + log.info("Mounting the test directory...") + remote.run(args=[ + 'sudo', 'mount', '--bind', mount_dir, export_dir, + ]) + log.info("Adding mount to /etc/exports...") + export = "{dir} *(rw,no_root_squash,no_subtree_check,insecure)".format( + dir=export_dir + ) + remote.run(args=[ + 'sudo', 'sed', '-i', '/^\/export\//d', "/etc/exports", + ]) + remote.run(args=[ + 'echo', export, run.Raw("|"), + 'sudo', 'tee', '-a', "/etc/exports", + ]) + log.info("Restarting NFS...") + if remote.os.package_type == "deb": + remote.run(args=['sudo', 'service', 'nfs-kernel-server', 'restart']) + else: + remote.run(args=['sudo', 'systemctl', 'restart', 'nfs']) + + +def _teardown_nfs_mount(remote, client): + """ + Tears down the nfs mount on the remote used for logging and reporting the + status of the tests being ran in the guest. + """ + log.info("Tearing down the nfs mount for {remote}".format(remote=remote)) + export_dir = "/export/{client}".format(client=client) + log.info("Stopping NFS...") + if remote.os.package_type == "deb": + remote.run(args=[ + 'sudo', 'service', 'nfs-kernel-server', 'stop' + ]) + else: + remote.run(args=[ + 'sudo', 'systemctl', 'stop', 'nfs' + ]) + log.info("Unmounting exported directory...") + remote.run(args=[ + 'sudo', 'umount', export_dir + ]) + log.info("Deleting exported directory...") + remote.run(args=[ + 'sudo', 'rm', '-r', '/export' + ]) + log.info("Deleting export from /etc/exports...") + remote.run(args=[ + 'sudo', 'sed', '-i', '$ d', '/etc/exports' + ]) + log.info("Starting NFS...") + if remote.os.package_type == "deb": + remote.run(args=[ + 'sudo', 'service', 'nfs-kernel-server', 'start' + ]) + else: + remote.run(args=[ + 'sudo', 'systemctl', 'start', 'nfs' + ]) + + +@contextlib.contextmanager +def run_qemu(ctx, config): + """Setup kvm environment and start qemu""" + procs = [] + testdir = teuthology.get_testdir(ctx) + for client, client_config in config.items(): + (remote,) = ctx.cluster.only(client).remotes.keys() + log_dir = '{tdir}/archive/qemu/{client}'.format(tdir=testdir, client=client) + remote.run( + args=[ + 'mkdir', log_dir, run.Raw('&&'), + 'sudo', 'modprobe', 'kvm', + ] + ) + + # make an nfs mount to use for logging and to + # allow to test to tell teuthology the tests outcome + _setup_nfs_mount(remote, client, log_dir) + + # Hack to make sure /dev/kvm permissions are set correctly + # See http://tracker.ceph.com/issues/17977 and + # https://bugzilla.redhat.com/show_bug.cgi?id=1333159 + remote.run(args='sudo udevadm control --reload') + remote.run(args='sudo udevadm trigger /dev/kvm') + remote.run(args='ls -l /dev/kvm') + + qemu_cmd = 'qemu-system-x86_64' + if remote.os.package_type == "rpm": + qemu_cmd = "/usr/libexec/qemu-kvm" + args=[ + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'daemon-helper', + 'term', + qemu_cmd, '-enable-kvm', '-nographic', '-cpu', 'host', + '-smp', str(client_config.get('cpus', DEFAULT_CPUS)), + '-m', str(client_config.get('memory', DEFAULT_MEM)), + # cd holding metadata for cloud-init + '-cdrom', '{tdir}/qemu/{client}.iso'.format(tdir=testdir, client=client), + ] + + cachemode = 'none' + ceph_config = ctx.ceph['ceph'].conf.get('global', {}) + ceph_config.update(ctx.ceph['ceph'].conf.get('client', {})) + ceph_config.update(ctx.ceph['ceph'].conf.get(client, {})) + if ceph_config.get('rbd cache', True): + if ceph_config.get('rbd cache max dirty', 1) > 0: + cachemode = 'writeback' + else: + cachemode = 'writethrough' + + clone = client_config.get('clone', False) + num_disks = client_config.get('disks', DEFAULT_NUM_DISKS) + if isinstance(num_disks, list): + num_disks = len(num_disks) + for i in range(num_disks): + suffix = '-clone' if clone else '' + args.extend([ + '-drive', + 'file=rbd:rbd/{img}:id={id},format=raw,if=virtio,cache={cachemode}'.format( + img='{client}.{num}{suffix}'.format(client=client, num=i, + suffix=suffix), + id=client[len('client.'):], + cachemode=cachemode, + ), + ]) + time_wait = client_config.get('time_wait', 0) + + log.info('starting qemu...') + procs.append( + remote.run( + args=args, + logger=log.getChild(client), + stdin=run.PIPE, + wait=False, + ) + ) + + try: + yield + finally: + log.info('waiting for qemu tests to finish...') + run.wait(procs) + + if time_wait > 0: + log.debug('waiting {time_wait} sec for workloads detect finish...'.format( + time_wait=time_wait)); + time.sleep(time_wait) + + log.debug('checking that qemu tests succeeded...') + for client in config.keys(): + (remote,) = ctx.cluster.only(client).remotes.keys() + + # ensure we have permissions to all the logs + log_dir = '{tdir}/archive/qemu/{client}'.format(tdir=testdir, + client=client) + remote.run( + args=[ + 'sudo', 'chmod', 'a+rw', '-R', log_dir + ] + ) + + # teardown nfs mount + _teardown_nfs_mount(remote, client) + # check for test status + remote.run( + args=[ + 'test', '-f', + '{tdir}/archive/qemu/{client}/success'.format( + tdir=testdir, + client=client + ), + ], + ) + + +@contextlib.contextmanager +def task(ctx, config): + """ + Run a test inside of QEMU on top of rbd. Only one test + is supported per client. + + For example, you can specify which clients to run on:: + + tasks: + - ceph: + - qemu: + client.0: + test: http://download.ceph.com/qa/test.sh + client.1: + test: http://download.ceph.com/qa/test2.sh + + Or use the same settings on all clients: + + tasks: + - ceph: + - qemu: + all: + test: http://download.ceph.com/qa/test.sh + + For tests that don't need a filesystem, set type to block:: + + tasks: + - ceph: + - qemu: + client.0: + test: http://download.ceph.com/qa/test.sh + type: block + + The test should be configured to run on /dev/vdb and later + devices. + + If you want to run a test that uses more than one rbd image, + specify how many images to use:: + + tasks: + - ceph: + - qemu: + client.0: + test: http://download.ceph.com/qa/test.sh + type: block + disks: 2 + + - or - + + tasks: + - ceph: + - qemu: + client.0: + test: http://ceph.com/qa/test.sh + type: block + disks: + - image_size: 1024 + - image_size: 2048 + + You can set the amount of CPUs and memory the VM has (default is 1 CPU and + 4096 MB):: + + tasks: + - ceph: + - qemu: + client.0: + test: http://download.ceph.com/qa/test.sh + cpus: 4 + memory: 512 # megabytes + + If you want to run a test against a cloned rbd image, set clone to true:: + + tasks: + - ceph: + - qemu: + client.0: + test: http://download.ceph.com/qa/test.sh + clone: true + + If you need to configure additional cloud-config options, set cloud_config + to the required data set:: + + tasks: + - ceph + - qemu: + client.0: + test: http://ceph.com/qa/test.sh + cloud_config_archive: + - | + #/bin/bash + touch foo1 + - content: | + test data + type: text/plain + filename: /tmp/data + + If you need to override the default cloud image, set image_url: + + tasks: + - ceph + - qemu: + client.0: + test: http://ceph.com/qa/test.sh + image_url: https://cloud-images.ubuntu.com/releases/16.04/release/ubuntu-16.04-server-cloudimg-amd64-disk1.img + """ + assert isinstance(config, dict), \ + "task qemu only supports a dictionary for configuration" + + config = teuthology.replace_all_with_clients(ctx.cluster, config) + + managers = [] + create_images(ctx=ctx, config=config, managers=managers) + managers.extend([ + lambda: create_dirs(ctx=ctx, config=config), + lambda: generate_iso(ctx=ctx, config=config), + lambda: download_image(ctx=ctx, config=config), + ]) + create_clones(ctx=ctx, config=config, managers=managers) + managers.append( + lambda: run_qemu(ctx=ctx, config=config), + ) + + with contextutil.nested(*managers): + yield diff --git a/qa/tasks/rados.py b/qa/tasks/rados.py new file mode 100644 index 00000000..66b626a1 --- /dev/null +++ b/qa/tasks/rados.py @@ -0,0 +1,272 @@ +""" +Rados modle-based integration tests +""" +import contextlib +import logging +import gevent +from teuthology import misc as teuthology + +import six + +from teuthology.orchestra import run + +log = logging.getLogger(__name__) + +@contextlib.contextmanager +def task(ctx, config): + """ + Run RadosModel-based integration tests. + + The config should be as follows:: + + rados: + clients: [client list] + ops: + objects: + max_in_flight: + object_size: + min_stride_size: + max_stride_size: + op_weights: + runs: - the pool is remade between runs + ec_pool: use an ec pool + erasure_code_profile: profile to use with the erasure coded pool + fast_read: enable ec_pool's fast_read + min_size: set the min_size of created pool + pool_snaps: use pool snapshots instead of selfmanaged snapshots + write_fadvise_dontneed: write behavior like with LIBRADOS_OP_FLAG_FADVISE_DONTNEED. + This mean data don't access in the near future. + Let osd backend don't keep data in cache. + + For example:: + + tasks: + - ceph: + - rados: + clients: [client.0] + ops: 1000 + max_seconds: 0 # 0 for no limit + objects: 25 + max_in_flight: 16 + object_size: 4000000 + min_stride_size: 1024 + max_stride_size: 4096 + op_weights: + read: 20 + write: 10 + delete: 2 + snap_create: 3 + rollback: 2 + snap_remove: 0 + ec_pool: create an ec pool, defaults to False + erasure_code_use_overwrites: test overwrites, default false + erasure_code_profile: + name: teuthologyprofile + k: 2 + m: 1 + crush-failure-domain: osd + pool_snaps: true + write_fadvise_dontneed: true + runs: 10 + - interactive: + + Optionally, you can provide the pool name to run against: + + tasks: + - ceph: + - exec: + client.0: + - ceph osd pool create foo + - rados: + clients: [client.0] + pools: [foo] + ... + + Alternatively, you can provide a pool prefix: + + tasks: + - ceph: + - exec: + client.0: + - ceph osd pool create foo.client.0 + - rados: + clients: [client.0] + pool_prefix: foo + ... + + The tests are run asynchronously, they are not complete when the task + returns. For instance: + + - rados: + clients: [client.0] + pools: [ecbase] + ops: 4000 + objects: 500 + op_weights: + read: 100 + write: 100 + delete: 50 + copy_from: 50 + - print: "**** done rados ec-cache-agent (part 2)" + + will run the print task immediately after the rados tasks begins but + not after it completes. To make the rados task a blocking / sequential + task, use: + + - sequential: + - rados: + clients: [client.0] + pools: [ecbase] + ops: 4000 + objects: 500 + op_weights: + read: 100 + write: 100 + delete: 50 + copy_from: 50 + - print: "**** done rados ec-cache-agent (part 2)" + + """ + log.info('Beginning rados...') + assert isinstance(config, dict), \ + "please list clients to run on" + + object_size = int(config.get('object_size', 4000000)) + op_weights = config.get('op_weights', {}) + testdir = teuthology.get_testdir(ctx) + args = [ + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'ceph_test_rados'] + if config.get('ec_pool', False): + args.extend(['--no-omap']) + if not config.get('erasure_code_use_overwrites', False): + args.extend(['--ec-pool']) + if config.get('write_fadvise_dontneed', False): + args.extend(['--write-fadvise-dontneed']) + if config.get('set_redirect', False): + args.extend(['--set_redirect']) + if config.get('set_chunk', False): + args.extend(['--set_chunk']) + if config.get('low_tier_pool', None): + args.extend(['--low_tier_pool', config.get('low_tier_pool', None)]) + if config.get('pool_snaps', False): + args.extend(['--pool-snaps']) + args.extend([ + '--max-ops', str(config.get('ops', 10000)), + '--objects', str(config.get('objects', 500)), + '--max-in-flight', str(config.get('max_in_flight', 16)), + '--size', str(object_size), + '--min-stride-size', str(config.get('min_stride_size', object_size // 10)), + '--max-stride-size', str(config.get('max_stride_size', object_size // 5)), + '--max-seconds', str(config.get('max_seconds', 0)) + ]) + + weights = {} + weights['read'] = 100 + weights['write'] = 100 + weights['delete'] = 10 + # Parallel of the op_types in test/osd/TestRados.cc + for field in [ + # read handled above + # write handled above + # delete handled above + "snap_create", + "snap_remove", + "rollback", + "setattr", + "rmattr", + "watch", + "copy_from", + "hit_set_list", + "is_dirty", + "undirty", + "cache_flush", + "cache_try_flush", + "cache_evict", + "append", + "write", + "read", + "delete" + ]: + if field in op_weights: + weights[field] = op_weights[field] + + if config.get('write_append_excl', True): + if 'write' in weights: + weights['write'] = weights['write'] // 2 + weights['write_excl'] = weights['write'] + + if 'append' in weights: + weights['append'] = weights['append'] // 2 + weights['append_excl'] = weights['append'] + + for op, weight in weights.items(): + args.extend([ + '--op', op, str(weight) + ]) + + + def thread(): + """Thread spawned by gevent""" + clients = ['client.{id}'.format(id=id_) for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')] + log.info('clients are %s' % clients) + manager = ctx.managers['ceph'] + if config.get('ec_pool', False): + profile = config.get('erasure_code_profile', {}) + profile_name = profile.get('name', 'teuthologyprofile') + manager.create_erasure_code_profile(profile_name, profile) + else: + profile_name = None + for i in range(int(config.get('runs', '1'))): + log.info("starting run %s out of %s", str(i), config.get('runs', '1')) + tests = {} + existing_pools = config.get('pools', []) + created_pools = [] + for role in config.get('clients', clients): + assert isinstance(role, six.string_types) + PREFIX = 'client.' + assert role.startswith(PREFIX) + id_ = role[len(PREFIX):] + + pool = config.get('pool', None) + if not pool and existing_pools: + pool = existing_pools.pop() + else: + pool = manager.create_pool_with_unique_name( + erasure_code_profile_name=profile_name, + erasure_code_use_overwrites= + config.get('erasure_code_use_overwrites', False) + ) + created_pools.append(pool) + if config.get('fast_read', False): + manager.raw_cluster_cmd( + 'osd', 'pool', 'set', pool, 'fast_read', 'true') + min_size = config.get('min_size', None); + if min_size is not None: + manager.raw_cluster_cmd( + 'osd', 'pool', 'set', pool, 'min_size', str(min_size)) + + (remote,) = ctx.cluster.only(role).remotes.keys() + proc = remote.run( + args=["CEPH_CLIENT_ID={id_}".format(id_=id_)] + args + + ["--pool", pool], + logger=log.getChild("rados.{id}".format(id=id_)), + stdin=run.PIPE, + wait=False + ) + tests[id_] = proc + run.wait(tests.values()) + + for pool in created_pools: + manager.wait_snap_trimming_complete(pool); + manager.remove_pool(pool) + + running = gevent.spawn(thread) + + try: + yield + finally: + log.info('joining rados') + running.get() diff --git a/qa/tasks/radosbench.py b/qa/tasks/radosbench.py new file mode 100644 index 00000000..32b09576 --- /dev/null +++ b/qa/tasks/radosbench.py @@ -0,0 +1,140 @@ +""" +Rados benchmarking +""" +import contextlib +import logging + +from teuthology.orchestra import run +from teuthology import misc as teuthology + +import six + +log = logging.getLogger(__name__) + +@contextlib.contextmanager +def task(ctx, config): + """ + Run radosbench + + The config should be as follows: + + radosbench: + clients: [client list] + time: + pool: + size: write size to use + concurrency: max number of outstanding writes (16) + objectsize: object size to use + unique_pool: use a unique pool, defaults to False + ec_pool: create an ec pool, defaults to False + create_pool: create pool, defaults to True + erasure_code_profile: + name: teuthologyprofile + k: 2 + m: 1 + crush-failure-domain: osd + cleanup: false (defaults to true) + type: (defaults to write) + example: + + tasks: + - ceph: + - radosbench: + clients: [client.0] + time: 360 + - interactive: + """ + log.info('Beginning radosbench...') + assert isinstance(config, dict), \ + "please list clients to run on" + radosbench = {} + + testdir = teuthology.get_testdir(ctx) + manager = ctx.managers['ceph'] + runtype = config.get('type', 'write') + + create_pool = config.get('create_pool', True) + for role in config.get('clients', ['client.0']): + assert isinstance(role, six.string_types) + PREFIX = 'client.' + assert role.startswith(PREFIX) + id_ = role[len(PREFIX):] + (remote,) = ctx.cluster.only(role).remotes.keys() + + if config.get('ec_pool', False): + profile = config.get('erasure_code_profile', {}) + profile_name = profile.get('name', 'teuthologyprofile') + manager.create_erasure_code_profile(profile_name, profile) + else: + profile_name = None + + cleanup = [] + if not config.get('cleanup', True): + cleanup = ['--no-cleanup'] + + pool = config.get('pool', 'data') + if create_pool: + if pool != 'data': + manager.create_pool(pool, erasure_code_profile_name=profile_name) + else: + pool = manager.create_pool_with_unique_name(erasure_code_profile_name=profile_name) + + concurrency = config.get('concurrency', 16) + osize = config.get('objectsize', 65536) + if osize == 0: + objectsize = [] + else: + objectsize = ['--object-size', str(osize)] + size = ['-b', str(config.get('size', 65536))] + # If doing a reading run then populate data + if runtype != "write": + proc = remote.run( + args=[ + "/bin/sh", "-c", + " ".join(['adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage', + 'rados', + '--no-log-to-stderr', + '--name', role] + + size + objectsize + + ['-t', str(concurrency)] + + ['-p' , pool, + 'bench', str(60), "write", "--no-cleanup" + ]).format(tdir=testdir), + ], + logger=log.getChild('radosbench.{id}'.format(id=id_)), + wait=True + ) + size = [] + objectsize = [] + + proc = remote.run( + args=[ + "/bin/sh", "-c", + " ".join(['adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage', + 'rados', + '--no-log-to-stderr', + '--name', role] + + size + objectsize + + ['-p' , pool, + 'bench', str(config.get('time', 360)), runtype, + ] + cleanup).format(tdir=testdir), + ], + logger=log.getChild('radosbench.{id}'.format(id=id_)), + stdin=run.PIPE, + wait=False + ) + radosbench[id_] = proc + + try: + yield + finally: + timeout = config.get('time', 360) * 30 + 300 + log.info('joining radosbench (timing out after %ss)', timeout) + run.wait(radosbench.values(), timeout=timeout) + + if pool != 'data' and create_pool: + manager.remove_pool(pool) diff --git a/qa/tasks/radosbenchsweep.py b/qa/tasks/radosbenchsweep.py new file mode 100644 index 00000000..0aeb7218 --- /dev/null +++ b/qa/tasks/radosbenchsweep.py @@ -0,0 +1,223 @@ +""" +Rados benchmarking sweep +""" +import contextlib +import logging +import re + +from io import BytesIO +from itertools import product + +from teuthology.orchestra import run +from teuthology import misc as teuthology + +import six + +log = logging.getLogger(__name__) + + +@contextlib.contextmanager +def task(ctx, config): + """ + Execute a radosbench parameter sweep + + Puts radosbench in a loop, taking values from the given config at each + iteration. If given, the min and max values below create a range, e.g. + min_replicas=1 and max_replicas=3 implies executing with 1-3 replicas. + + Parameters: + + clients: [client list] + time: seconds to run (default=120) + sizes: [list of object sizes] (default=[4M]) + mode: (default=write) + repetitions: execute the same configuration multiple times (default=1) + min_num_replicas: minimum number of replicas to use (default = 3) + max_num_replicas: maximum number of replicas to use (default = 3) + min_num_osds: the minimum number of OSDs in a pool (default=all) + max_num_osds: the maximum number of OSDs in a pool (default=all) + file: name of CSV-formatted output file (default='radosbench.csv') + columns: columns to include (default=all) + - rep: execution number (takes values from 'repetitions') + - num_osd: number of osds for pool + - num_replica: number of replicas + - avg_throughput: throughput + - avg_latency: latency + - stdev_throughput: + - stdev_latency: + + Example: + - radsobenchsweep: + columns: [rep, num_osd, num_replica, avg_throughput, stdev_throughput] + """ + log.info('Beginning radosbenchsweep...') + assert isinstance(config, dict), 'expecting dictionary for configuration' + + # get and validate config values + # { + + # only one client supported for now + if len(config.get('clients', [])) != 1: + raise Exception("Only one client can be specified") + + # only write mode + if config.get('mode', 'write') != 'write': + raise Exception("Only 'write' mode supported for now.") + + # OSDs + total_osds_in_cluster = teuthology.num_instances_of_type(ctx.cluster, 'osd') + min_num_osds = config.get('min_num_osds', total_osds_in_cluster) + max_num_osds = config.get('max_num_osds', total_osds_in_cluster) + + if max_num_osds > total_osds_in_cluster: + raise Exception('max_num_osds cannot be greater than total in cluster') + if min_num_osds < 1: + raise Exception('min_num_osds cannot be less than 1') + if min_num_osds > max_num_osds: + raise Exception('min_num_osds cannot be greater than max_num_osd') + osds = range(0, (total_osds_in_cluster + 1)) + + # replicas + min_num_replicas = config.get('min_num_replicas', 3) + max_num_replicas = config.get('max_num_replicas', 3) + + if min_num_replicas < 1: + raise Exception('min_num_replicas cannot be less than 1') + if min_num_replicas > max_num_replicas: + raise Exception('min_num_replicas cannot be greater than max_replicas') + if max_num_replicas > max_num_osds: + raise Exception('max_num_replicas cannot be greater than max_num_osds') + replicas = range(min_num_replicas, (max_num_replicas + 1)) + + # object size + sizes = config.get('size', [4 << 20]) + + # repetitions + reps = range(config.get('repetitions', 1)) + + # file + fname = config.get('file', 'radosbench.csv') + f = open('{}/{}'.format(ctx.archive, fname), 'w') + f.write(get_csv_header(config) + '\n') + # } + + # set default pools size=1 to avoid 'unhealthy' issues + ctx.manager.set_pool_property('data', 'size', 1) + ctx.manager.set_pool_property('metadata', 'size', 1) + ctx.manager.set_pool_property('rbd', 'size', 1) + + current_osds_out = 0 + + # sweep through all parameters + for osds_out, size, replica, rep in product(osds, sizes, replicas, reps): + + osds_in = total_osds_in_cluster - osds_out + + if osds_in == 0: + # we're done + break + + if current_osds_out != osds_out: + # take an osd out + ctx.manager.raw_cluster_cmd( + 'osd', 'reweight', str(osds_out-1), '0.0') + wait_until_healthy(ctx, config) + current_osds_out = osds_out + + if osds_in not in range(min_num_osds, (max_num_osds + 1)): + # no need to execute with a number of osds that wasn't requested + continue + + if osds_in < replica: + # cannot execute with more replicas than available osds + continue + + run_radosbench(ctx, config, f, osds_in, size, replica, rep) + + f.close() + + yield + + +def get_csv_header(conf): + all_columns = [ + 'rep', 'num_osd', 'num_replica', 'avg_throughput', + 'avg_latency', 'stdev_throughput', 'stdev_latency' + ] + given_columns = conf.get('columns', None) + if given_columns and len(given_columns) != 0: + for column in given_columns: + if column not in all_columns: + raise Exception('Unknown column ' + column) + return ','.join(conf['columns']) + else: + conf['columns'] = all_columns + return ','.join(all_columns) + + +def run_radosbench(ctx, config, f, num_osds, size, replica, rep): + pool = ctx.manager.create_pool_with_unique_name() + + ctx.manager.set_pool_property(pool, 'size', replica) + + wait_until_healthy(ctx, config) + + log.info('Executing with parameters: ') + log.info(' num_osd =' + str(num_osds)) + log.info(' size =' + str(size)) + log.info(' num_replicas =' + str(replica)) + log.info(' repetition =' + str(rep)) + + for role in config.get('clients', ['client.0']): + assert isinstance(role, six.string_types) + PREFIX = 'client.' + assert role.startswith(PREFIX) + id_ = role[len(PREFIX):] + (remote,) = ctx.cluster.only(role).remotes.keys() + + proc = remote.run( + args=[ + 'adjust-ulimits', + 'ceph-coverage', + '{}/archive/coverage'.format(teuthology.get_testdir(ctx)), + 'rados', + '--no-log-to-stderr', + '--name', role, + '-b', str(size), + '-p', pool, + 'bench', str(config.get('time', 120)), 'write', + ], + logger=log.getChild('radosbench.{id}'.format(id=id_)), + stdin=run.PIPE, + stdout=BytesIO(), + wait=False + ) + + # parse output to get summary and format it as CSV + proc.wait() + out = proc.stdout.getvalue() + all_values = { + 'stdev_throughput': re.sub(r'Stddev Bandwidth: ', '', re.search( + r'Stddev Bandwidth:.*', out).group(0)), + 'stdev_latency': re.sub(r'Stddev Latency: ', '', re.search( + r'Stddev Latency:.*', out).group(0)), + 'avg_throughput': re.sub(r'Bandwidth \(MB/sec\): ', '', re.search( + r'Bandwidth \(MB/sec\):.*', out).group(0)), + 'avg_latency': re.sub(r'Average Latency: ', '', re.search( + r'Average Latency:.*', out).group(0)), + 'rep': str(rep), + 'num_osd': str(num_osds), + 'num_replica': str(replica) + } + values_to_write = [] + for column in config['columns']: + values_to_write.extend([all_values[column]]) + f.write(','.join(values_to_write) + '\n') + + ctx.manager.remove_pool(pool) + + +def wait_until_healthy(ctx, config): + first_mon = teuthology.get_first_mon(ctx, config) + (mon_remote,) = ctx.cluster.only(first_mon).remotes.keys() + teuthology.wait_until_healthy(ctx, mon_remote) diff --git a/qa/tasks/radosgw_admin.py b/qa/tasks/radosgw_admin.py new file mode 100644 index 00000000..13b926a5 --- /dev/null +++ b/qa/tasks/radosgw_admin.py @@ -0,0 +1,953 @@ +""" +Rgw admin testing against a running instance +""" +# The test cases in this file have been annotated for inventory. +# To extract the inventory (in csv format) use the command: +# +# grep '^ *# TESTCASE' | sed 's/^ *# TESTCASE //' +# +# to run this standalone: +# python qa/tasks/radosgw_admin.py [USER] HOSTNAME +# + +import json +import logging +import time +import datetime +from six.moves import queue + +import sys +import six + +from io import BytesIO + +import boto.exception +import boto.s3.connection +import boto.s3.acl + +import httplib2 + + +from tasks.util.rgw import rgwadmin, get_user_summary, get_user_successful_ops + +log = logging.getLogger(__name__) + +def usage_acc_findentry2(entries, user, add=True): + for e in entries: + if e['user'] == user: + return e + if not add: + return None + e = {'user': user, 'buckets': []} + entries.append(e) + return e +def usage_acc_findsum2(summaries, user, add=True): + for e in summaries: + if e['user'] == user: + return e + if not add: + return None + e = {'user': user, 'categories': [], + 'total': {'bytes_received': 0, + 'bytes_sent': 0, 'ops': 0, 'successful_ops': 0 }} + summaries.append(e) + return e +def usage_acc_update2(x, out, b_in, err): + x['bytes_sent'] += b_in + x['bytes_received'] += out + x['ops'] += 1 + if not err: + x['successful_ops'] += 1 +def usage_acc_validate_fields(r, x, x2, what): + q=[] + for field in ['bytes_sent', 'bytes_received', 'ops', 'successful_ops']: + try: + if x2[field] < x[field]: + q.append("field %s: %d < %d" % (field, x2[field], x[field])) + except Exception as ex: + r.append( "missing/bad field " + field + " in " + what + " " + str(ex)) + return + if len(q) > 0: + r.append("incomplete counts in " + what + ": " + ", ".join(q)) +class usage_acc: + def __init__(self): + self.results = {'entries': [], 'summary': []} + def findentry(self, user): + return usage_acc_findentry2(self.results['entries'], user) + def findsum(self, user): + return usage_acc_findsum2(self.results['summary'], user) + def e2b(self, e, bucket, add=True): + for b in e['buckets']: + if b['bucket'] == bucket: + return b + if not add: + return None + b = {'bucket': bucket, 'categories': []} + e['buckets'].append(b) + return b + def c2x(self, c, cat, add=True): + for x in c: + if x['category'] == cat: + return x + if not add: + return None + x = {'bytes_received': 0, 'category': cat, + 'bytes_sent': 0, 'ops': 0, 'successful_ops': 0 } + c.append(x) + return x + def update(self, c, cat, user, out, b_in, err): + x = self.c2x(c, cat) + usage_acc_update2(x, out, b_in, err) + if not err and cat == 'create_bucket' and 'owner' not in x: + x['owner'] = user + def make_entry(self, cat, bucket, user, out, b_in, err): + if cat == 'create_bucket' and err: + return + e = self.findentry(user) + b = self.e2b(e, bucket) + self.update(b['categories'], cat, user, out, b_in, err) + s = self.findsum(user) + x = self.c2x(s['categories'], cat) + usage_acc_update2(x, out, b_in, err) + x = s['total'] + usage_acc_update2(x, out, b_in, err) + def generate_make_entry(self): + return lambda cat,bucket,user,out,b_in,err: self.make_entry(cat, bucket, user, out, b_in, err) + def get_usage(self): + return self.results + def compare_results(self, results): + if 'entries' not in results or 'summary' not in results: + return ['Missing entries or summary'] + r = [] + for e in self.results['entries']: + try: + e2 = usage_acc_findentry2(results['entries'], e['user'], False) + except Exception as ex: + r.append("malformed entry looking for user " + + e['user'] + " " + str(ex)) + break + if e2 == None: + r.append("missing entry for user " + e['user']) + continue + for b in e['buckets']: + c = b['categories'] + if b['bucket'] == 'nosuchbucket': + print("got here") + try: + b2 = self.e2b(e2, b['bucket'], False) + if b2 != None: + c2 = b2['categories'] + except Exception as ex: + r.append("malformed entry looking for bucket " + + b['bucket'] + " in user " + e['user'] + " " + str(ex)) + break + if b2 == None: + r.append("can't find bucket " + b['bucket'] + + " in user " + e['user']) + continue + for x in c: + try: + x2 = self.c2x(c2, x['category'], False) + except Exception as ex: + r.append("malformed entry looking for " + + x['category'] + " in bucket " + b['bucket'] + + " user " + e['user'] + " " + str(ex)) + break + usage_acc_validate_fields(r, x, x2, "entry: category " + + x['category'] + " bucket " + b['bucket'] + + " in user " + e['user']) + for s in self.results['summary']: + c = s['categories'] + try: + s2 = usage_acc_findsum2(results['summary'], s['user'], False) + except Exception as ex: + r.append("malformed summary looking for user " + e['user'] + + " " + str(ex)) + break + if s2 == None: + r.append("missing summary for user " + e['user'] + " " + str(ex)) + continue + try: + c2 = s2['categories'] + except Exception as ex: + r.append("malformed summary missing categories for user " + + e['user'] + " " + str(ex)) + break + for x in c: + try: + x2 = self.c2x(c2, x['category'], False) + except Exception as ex: + r.append("malformed summary looking for " + + x['category'] + " user " + e['user'] + " " + str(ex)) + break + usage_acc_validate_fields(r, x, x2, "summary: category " + + x['category'] + " in user " + e['user']) + x = s['total'] + try: + x2 = s2['total'] + except Exception as ex: + r.append("malformed summary looking for totals for user " + + e['user'] + " " + str(ex)) + break + usage_acc_validate_fields(r, x, x2, "summary: totals for user" + e['user']) + return r + +def ignore_this_entry(cat, bucket, user, out, b_in, err): + pass +class requestlog_queue(): + def __init__(self, add): + self.q = queue.Queue(1000) + self.adder = add + def handle_request_data(self, request, response, error=False): + now = datetime.datetime.now() + if error: + pass + elif response.status < 200 or response.status >= 400: + error = True + self.q.put({'t': now, 'o': request, 'i': response, 'e': error}) + def clear(self): + with self.q.mutex: + self.q.queue.clear() + def log_and_clear(self, cat, bucket, user, add_entry = None): + while not self.q.empty(): + j = self.q.get() + bytes_out = 0 + if 'Content-Length' in j['o'].headers: + bytes_out = int(j['o'].headers['Content-Length']) + bytes_in = 0 + msg = j['i'].msg if six.PY3 else j['i'].msg.dict + if 'content-length'in msg: + bytes_in = int(msg['content-length']) + log.info('RL: %s %s %s bytes_out=%d bytes_in=%d failed=%r' + % (cat, bucket, user, bytes_out, bytes_in, j['e'])) + if add_entry == None: + add_entry = self.adder + add_entry(cat, bucket, user, bytes_out, bytes_in, j['e']) + +def create_presigned_url(conn, method, bucket_name, key_name, expiration): + return conn.generate_url(expires_in=expiration, + method=method, + bucket=bucket_name, + key=key_name, + query_auth=True, + ) + +def send_raw_http_request(conn, method, bucket_name, key_name, follow_redirects = False): + url = create_presigned_url(conn, method, bucket_name, key_name, 3600) + print(url) + h = httplib2.Http() + h.follow_redirects = follow_redirects + return h.request(url, method) + + +def get_acl(key): + """ + Helper function to get the xml acl from a key, ensuring that the xml + version tag is removed from the acl response + """ + raw_acl = six.ensure_str(key.get_xml_acl()) + + def remove_version(string): + return string.split( + '' + )[-1] + + def remove_newlines(string): + return string.strip('\n') + + return remove_version( + remove_newlines(raw_acl) + ) + +def task(ctx, config): + """ + Test radosgw-admin functionality against a running rgw instance. + """ + global log + + assert ctx.rgw.config, \ + "radosgw_admin task needs a config passed from the rgw task" + config = ctx.rgw.config + log.debug('config is: %r', config) + + clients_from_config = config.keys() + + # choose first client as default + client = next(iter(clients_from_config)) + + # once the client is chosen, pull the host name and assigned port out of + # the role_endpoints that were assigned by the rgw task + endpoint = ctx.rgw.role_endpoints[client] + + ## + user1='foo' + user2='fud' + subuser1='foo:foo1' + subuser2='foo:foo2' + display_name1='Foo' + display_name2='Fud' + email='foo@foo.com' + access_key='9te6NH5mcdcq0Tc5i8i1' + secret_key='Ny4IOauQoL18Gp2zM7lC1vLmoawgqcYP/YGcWfXu' + access_key2='p5YnriCv1nAtykxBrupQ' + secret_key2='Q8Tk6Q/27hfbFSYdSkPtUqhqx1GgzvpXa4WARozh' + swift_secret1='gpS2G9RREMrnbqlp29PP2D36kgPR1tm72n5fPYfL' + swift_secret2='ri2VJQcKSYATOY6uaDUX7pxgkW+W1YmC6OCxPHwy' + + bucket_name='myfoo' + bucket_name2='mybar' + + # connect to rgw + connection = boto.s3.connection.S3Connection( + aws_access_key_id=access_key, + aws_secret_access_key=secret_key, + is_secure=False, + port=endpoint.port, + host=endpoint.hostname, + calling_format=boto.s3.connection.OrdinaryCallingFormat(), + ) + connection2 = boto.s3.connection.S3Connection( + aws_access_key_id=access_key2, + aws_secret_access_key=secret_key2, + is_secure=False, + port=endpoint.port, + host=endpoint.hostname, + calling_format=boto.s3.connection.OrdinaryCallingFormat(), + ) + + acc = usage_acc() + rl = requestlog_queue(acc.generate_make_entry()) + connection.set_request_hook(rl) + connection2.set_request_hook(rl) + + # legend (test cases can be easily grep-ed out) + # TESTCASE 'testname','object','method','operation','assertion' + + # TESTCASE 'usage-show0' 'usage' 'show' 'all usage' 'succeeds' + (err, summary0) = rgwadmin(ctx, client, ['usage', 'show'], check_status=True) + + # TESTCASE 'info-nosuch','user','info','non-existent user','fails' + (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1]) + assert err + + # TESTCASE 'create-ok','user','create','w/all valid info','succeeds' + (err, out) = rgwadmin(ctx, client, [ + 'user', 'create', + '--uid', user1, + '--display-name', display_name1, + '--email', email, + '--access-key', access_key, + '--secret', secret_key, + '--max-buckets', '4' + ], + check_status=True) + + # TESTCASE 'duplicate email','user','create','existing user email','fails' + (err, out) = rgwadmin(ctx, client, [ + 'user', 'create', + '--uid', user2, + '--display-name', display_name2, + '--email', email, + ]) + assert err + + # TESTCASE 'info-existing','user','info','existing user','returns correct info' + (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1], check_status=True) + assert out['user_id'] == user1 + assert out['email'] == email + assert out['display_name'] == display_name1 + assert len(out['keys']) == 1 + assert out['keys'][0]['access_key'] == access_key + assert out['keys'][0]['secret_key'] == secret_key + assert not out['suspended'] + + # TESTCASE 'suspend-ok','user','suspend','active user','succeeds' + (err, out) = rgwadmin(ctx, client, ['user', 'suspend', '--uid', user1], + check_status=True) + + # TESTCASE 'suspend-suspended','user','suspend','suspended user','succeeds w/advisory' + (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1], check_status=True) + assert out['suspended'] + + # TESTCASE 're-enable','user','enable','suspended user','succeeds' + (err, out) = rgwadmin(ctx, client, ['user', 'enable', '--uid', user1], check_status=True) + + # TESTCASE 'info-re-enabled','user','info','re-enabled user','no longer suspended' + (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1], check_status=True) + assert not out['suspended'] + + # TESTCASE 'add-keys','key','create','w/valid info','succeeds' + (err, out) = rgwadmin(ctx, client, [ + 'key', 'create', '--uid', user1, + '--access-key', access_key2, '--secret', secret_key2, + ], check_status=True) + + # TESTCASE 'info-new-key','user','info','after key addition','returns all keys' + (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1], + check_status=True) + assert len(out['keys']) == 2 + assert out['keys'][0]['access_key'] == access_key2 or out['keys'][1]['access_key'] == access_key2 + assert out['keys'][0]['secret_key'] == secret_key2 or out['keys'][1]['secret_key'] == secret_key2 + + # TESTCASE 'rm-key','key','rm','newly added key','succeeds, key is removed' + (err, out) = rgwadmin(ctx, client, [ + 'key', 'rm', '--uid', user1, + '--access-key', access_key2, + ], check_status=True) + assert len(out['keys']) == 1 + assert out['keys'][0]['access_key'] == access_key + assert out['keys'][0]['secret_key'] == secret_key + + # TESTCASE 'add-swift-key','key','create','swift key','succeeds' + subuser_access = 'full' + subuser_perm = 'full-control' + + (err, out) = rgwadmin(ctx, client, [ + 'subuser', 'create', '--subuser', subuser1, + '--access', subuser_access + ], check_status=True) + + # TESTCASE 'add-swift-key','key','create','swift key','succeeds' + (err, out) = rgwadmin(ctx, client, [ + 'subuser', 'modify', '--subuser', subuser1, + '--secret', swift_secret1, + '--key-type', 'swift', + ], check_status=True) + + # TESTCASE 'subuser-perm-mask', 'subuser', 'info', 'test subuser perm mask durability', 'succeeds' + (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1]) + + assert out['subusers'][0]['permissions'] == subuser_perm + + # TESTCASE 'info-swift-key','user','info','after key addition','returns all keys' + (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1], check_status=True) + assert len(out['swift_keys']) == 1 + assert out['swift_keys'][0]['user'] == subuser1 + assert out['swift_keys'][0]['secret_key'] == swift_secret1 + + # TESTCASE 'add-swift-subuser','key','create','swift sub-user key','succeeds' + (err, out) = rgwadmin(ctx, client, [ + 'subuser', 'create', '--subuser', subuser2, + '--secret', swift_secret2, + '--key-type', 'swift', + ], check_status=True) + + # TESTCASE 'info-swift-subuser','user','info','after key addition','returns all sub-users/keys' + (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1], check_status=True) + assert len(out['swift_keys']) == 2 + assert out['swift_keys'][0]['user'] == subuser2 or out['swift_keys'][1]['user'] == subuser2 + assert out['swift_keys'][0]['secret_key'] == swift_secret2 or out['swift_keys'][1]['secret_key'] == swift_secret2 + + # TESTCASE 'rm-swift-key1','key','rm','subuser','succeeds, one key is removed' + (err, out) = rgwadmin(ctx, client, [ + 'key', 'rm', '--subuser', subuser1, + '--key-type', 'swift', + ], check_status=True) + assert len(out['swift_keys']) == 1 + + # TESTCASE 'rm-subuser','subuser','rm','subuser','success, subuser is removed' + (err, out) = rgwadmin(ctx, client, [ + 'subuser', 'rm', '--subuser', subuser1, + ], check_status=True) + assert len(out['subusers']) == 1 + + # TESTCASE 'rm-subuser-with-keys','subuser','rm','subuser','succeeds, second subser and key is removed' + (err, out) = rgwadmin(ctx, client, [ + 'subuser', 'rm', '--subuser', subuser2, + '--key-type', 'swift', '--purge-keys', + ], check_status=True) + assert len(out['swift_keys']) == 0 + assert len(out['subusers']) == 0 + + # TESTCASE 'bucket-stats','bucket','stats','no session/buckets','succeeds, empty list' + (err, out) = rgwadmin(ctx, client, ['bucket', 'stats', '--uid', user1], + check_status=True) + assert len(out) == 0 + + # TESTCASE 'bucket-stats2','bucket','stats','no buckets','succeeds, empty list' + (err, out) = rgwadmin(ctx, client, ['bucket', 'list', '--uid', user1], check_status=True) + assert len(out) == 0 + + # create a first bucket + bucket = connection.create_bucket(bucket_name) + + rl.log_and_clear("create_bucket", bucket_name, user1) + + # TESTCASE 'bucket-list','bucket','list','one bucket','succeeds, expected list' + (err, out) = rgwadmin(ctx, client, ['bucket', 'list', '--uid', user1], check_status=True) + assert len(out) == 1 + assert out[0] == bucket_name + + bucket_list = connection.get_all_buckets() + assert len(bucket_list) == 1 + assert bucket_list[0].name == bucket_name + + rl.log_and_clear("list_buckets", '', user1) + + # TESTCASE 'bucket-list-all','bucket','list','all buckets','succeeds, expected list' + (err, out) = rgwadmin(ctx, client, ['bucket', 'list'], check_status=True) + assert len(out) >= 1 + assert bucket_name in out; + + # TESTCASE 'max-bucket-limit,'bucket','create','4 buckets','5th bucket fails due to max buckets == 4' + bucket2 = connection.create_bucket(bucket_name + '2') + rl.log_and_clear("create_bucket", bucket_name + '2', user1) + bucket3 = connection.create_bucket(bucket_name + '3') + rl.log_and_clear("create_bucket", bucket_name + '3', user1) + bucket4 = connection.create_bucket(bucket_name + '4') + rl.log_and_clear("create_bucket", bucket_name + '4', user1) + # the 5th should fail. + failed = False + try: + connection.create_bucket(bucket_name + '5') + except Exception: + failed = True + assert failed + rl.log_and_clear("create_bucket", bucket_name + '5', user1) + + # delete the buckets + bucket2.delete() + rl.log_and_clear("delete_bucket", bucket_name + '2', user1) + bucket3.delete() + rl.log_and_clear("delete_bucket", bucket_name + '3', user1) + bucket4.delete() + rl.log_and_clear("delete_bucket", bucket_name + '4', user1) + + # TESTCASE 'bucket-stats3','bucket','stats','new empty bucket','succeeds, empty list' + (err, out) = rgwadmin(ctx, client, [ + 'bucket', 'stats', '--bucket', bucket_name], check_status=True) + assert out['owner'] == user1 + bucket_id = out['id'] + + # TESTCASE 'bucket-stats4','bucket','stats','new empty bucket','succeeds, expected bucket ID' + (err, out) = rgwadmin(ctx, client, ['bucket', 'stats', '--uid', user1], check_status=True) + assert len(out) == 1 + assert out[0]['id'] == bucket_id # does it return the same ID twice in a row? + + # use some space + key = boto.s3.key.Key(bucket) + key.set_contents_from_string('one') + rl.log_and_clear("put_obj", bucket_name, user1) + + # TESTCASE 'bucket-stats5','bucket','stats','after creating key','succeeds, lists one non-empty object' + (err, out) = rgwadmin(ctx, client, [ + 'bucket', 'stats', '--bucket', bucket_name], check_status=True) + assert out['id'] == bucket_id + assert out['usage']['rgw.main']['num_objects'] == 1 + assert out['usage']['rgw.main']['size_kb'] > 0 + + # reclaim it + key.delete() + rl.log_and_clear("delete_obj", bucket_name, user1) + + # TESTCASE 'bucket unlink', 'bucket', 'unlink', 'unlink bucket from user', 'fails', 'access denied error' + (err, out) = rgwadmin(ctx, client, + ['bucket', 'unlink', '--uid', user1, '--bucket', bucket_name], + check_status=True) + + # create a second user to link the bucket to + (err, out) = rgwadmin(ctx, client, [ + 'user', 'create', + '--uid', user2, + '--display-name', display_name2, + '--access-key', access_key2, + '--secret', secret_key2, + '--max-buckets', '1', + ], + check_status=True) + + # try creating an object with the first user before the bucket is relinked + denied = False + key = boto.s3.key.Key(bucket) + + try: + key.set_contents_from_string('two') + except boto.exception.S3ResponseError: + denied = True + + assert not denied + rl.log_and_clear("put_obj", bucket_name, user1) + + # delete the object + key.delete() + rl.log_and_clear("delete_obj", bucket_name, user1) + + # link the bucket to another user + (err, out) = rgwadmin(ctx, client, ['metadata', 'get', 'bucket:{n}'.format(n=bucket_name)], + check_status=True) + + bucket_data = out['data'] + assert bucket_data['bucket']['name'] == bucket_name + + bucket_id = bucket_data['bucket']['bucket_id'] + + # link the bucket to another user + (err, out) = rgwadmin(ctx, client, ['bucket', 'link', '--uid', user2, '--bucket', bucket_name, '--bucket-id', bucket_id], + check_status=True) + + # try to remove user, should fail (has a linked bucket) + (err, out) = rgwadmin(ctx, client, ['user', 'rm', '--uid', user2]) + assert err + + # TESTCASE 'bucket unlink', 'bucket', 'unlink', 'unlink bucket from user', 'succeeds, bucket unlinked' + (err, out) = rgwadmin(ctx, client, ['bucket', 'unlink', '--uid', user2, '--bucket', bucket_name], + check_status=True) + + # relink the bucket to the first user and delete the second user + (err, out) = rgwadmin(ctx, client, + ['bucket', 'link', '--uid', user1, '--bucket', bucket_name, '--bucket-id', bucket_id], + check_status=True) + + (err, out) = rgwadmin(ctx, client, ['user', 'rm', '--uid', user2], + check_status=True) + + # TESTCASE 'object-rm', 'object', 'rm', 'remove object', 'succeeds, object is removed' + + # upload an object + object_name = 'four' + key = boto.s3.key.Key(bucket, object_name) + key.set_contents_from_string(object_name) + rl.log_and_clear("put_obj", bucket_name, user1) + + # fetch it too (for usage stats presently) + s = key.get_contents_as_string(encoding='ascii') + rl.log_and_clear("get_obj", bucket_name, user1) + assert s == object_name + # list bucket too (for usage stats presently) + keys = list(bucket.list()) + rl.log_and_clear("list_bucket", bucket_name, user1) + assert len(keys) == 1 + assert keys[0].name == object_name + + # now delete it + (err, out) = rgwadmin(ctx, client, + ['object', 'rm', '--bucket', bucket_name, '--object', object_name], + check_status=True) + + # TESTCASE 'bucket-stats6','bucket','stats','after deleting key','succeeds, lists one no objects' + (err, out) = rgwadmin(ctx, client, [ + 'bucket', 'stats', '--bucket', bucket_name], + check_status=True) + assert out['id'] == bucket_id + assert out['usage']['rgw.main']['num_objects'] == 0 + + # list log objects + # TESTCASE 'log-list','log','list','after activity','succeeds, lists one no objects' + (err, out) = rgwadmin(ctx, client, ['log', 'list'], check_status=True) + assert len(out) > 0 + + for obj in out: + # TESTCASE 'log-show','log','show','after activity','returns expected info' + if obj[:4] == 'meta' or obj[:4] == 'data' or obj[:18] == 'obj_delete_at_hint': + continue + + (err, rgwlog) = rgwadmin(ctx, client, ['log', 'show', '--object', obj], + check_status=True) + assert len(rgwlog) > 0 + + # exempt bucket_name2 from checking as it was only used for multi-region tests + assert rgwlog['bucket'].find(bucket_name) == 0 or rgwlog['bucket'].find(bucket_name2) == 0 + assert rgwlog['bucket'] != bucket_name or rgwlog['bucket_id'] == bucket_id + assert rgwlog['bucket_owner'] == user1 or rgwlog['bucket'] == bucket_name + '5' or rgwlog['bucket'] == bucket_name2 + for entry in rgwlog['log_entries']: + log.debug('checking log entry: ', entry) + assert entry['bucket'] == rgwlog['bucket'] + possible_buckets = [bucket_name + '5', bucket_name2] + user = entry['user'] + assert user == user1 or user.endswith('system-user') or \ + rgwlog['bucket'] in possible_buckets + + # TESTCASE 'log-rm','log','rm','delete log objects','succeeds' + (err, out) = rgwadmin(ctx, client, ['log', 'rm', '--object', obj], + check_status=True) + + # TODO: show log by bucket+date + + # TESTCASE 'user-suspend2','user','suspend','existing user','succeeds' + (err, out) = rgwadmin(ctx, client, ['user', 'suspend', '--uid', user1], + check_status=True) + + # TESTCASE 'user-suspend3','user','suspend','suspended user','cannot write objects' + denied = False + try: + key = boto.s3.key.Key(bucket) + key.set_contents_from_string('five') + except boto.exception.S3ResponseError as e: + denied = True + assert e.status == 403 + + assert denied + rl.log_and_clear("put_obj", bucket_name, user1) + + # TESTCASE 'user-renable2','user','enable','suspended user','succeeds' + (err, out) = rgwadmin(ctx, client, ['user', 'enable', '--uid', user1], + check_status=True) + + # TESTCASE 'user-renable3','user','enable','reenabled user','can write objects' + key = boto.s3.key.Key(bucket) + key.set_contents_from_string('six') + rl.log_and_clear("put_obj", bucket_name, user1) + + # TESTCASE 'gc-list', 'gc', 'list', 'get list of objects ready for garbage collection' + + # create an object large enough to be split into multiple parts + test_string = 'foo'*10000000 + + big_key = boto.s3.key.Key(bucket) + big_key.set_contents_from_string(test_string) + rl.log_and_clear("put_obj", bucket_name, user1) + + # now delete the head + big_key.delete() + rl.log_and_clear("delete_obj", bucket_name, user1) + + # wait a bit to give the garbage collector time to cycle + time.sleep(15) + + (err, out) = rgwadmin(ctx, client, ['gc', 'list']) + + assert len(out) > 0 + + # TESTCASE 'gc-process', 'gc', 'process', 'manually collect garbage' + (err, out) = rgwadmin(ctx, client, ['gc', 'process'], check_status=True) + + #confirm + (err, out) = rgwadmin(ctx, client, ['gc', 'list']) + + assert len(out) == 0 + + # TESTCASE 'rm-user-buckets','user','rm','existing user','fails, still has buckets' + (err, out) = rgwadmin(ctx, client, ['user', 'rm', '--uid', user1]) + assert err + + # delete should fail because ``key`` still exists + try: + bucket.delete() + except boto.exception.S3ResponseError as e: + assert e.status == 409 + rl.log_and_clear("delete_bucket", bucket_name, user1) + + key.delete() + rl.log_and_clear("delete_obj", bucket_name, user1) + bucket.delete() + rl.log_and_clear("delete_bucket", bucket_name, user1) + + # TESTCASE 'policy', 'bucket', 'policy', 'get bucket policy', 'returns S3 policy' + bucket = connection.create_bucket(bucket_name) + rl.log_and_clear("create_bucket", bucket_name, user1) + + # create an object + key = boto.s3.key.Key(bucket) + key.set_contents_from_string('seven') + rl.log_and_clear("put_obj", bucket_name, user1) + + # should be private already but guarantee it + key.set_acl('private') + rl.log_and_clear("put_acls", bucket_name, user1) + + (err, out) = rgwadmin(ctx, client, + ['policy', '--bucket', bucket.name, '--object', six.ensure_str(key.key)], + check_status=True, format='xml') + + acl = get_acl(key) + rl.log_and_clear("get_acls", bucket_name, user1) + + assert acl == out.strip('\n') + + # add another grantee by making the object public read + key.set_acl('public-read') + rl.log_and_clear("put_acls", bucket_name, user1) + + (err, out) = rgwadmin(ctx, client, + ['policy', '--bucket', bucket.name, '--object', six.ensure_str(key.key)], + check_status=True, format='xml') + + acl = get_acl(key) + rl.log_and_clear("get_acls", bucket_name, user1) + + assert acl == out.strip('\n') + + # TESTCASE 'rm-bucket', 'bucket', 'rm', 'bucket with objects', 'succeeds' + bucket = connection.create_bucket(bucket_name) + rl.log_and_clear("create_bucket", bucket_name, user1) + key_name = ['eight', 'nine', 'ten', 'eleven'] + for i in range(4): + key = boto.s3.key.Key(bucket) + key.set_contents_from_string(key_name[i]) + rl.log_and_clear("put_obj", bucket_name, user1) + + (err, out) = rgwadmin(ctx, client, + ['bucket', 'rm', '--bucket', bucket_name, '--purge-objects'], + check_status=True) + + # TESTCASE 'caps-add', 'caps', 'add', 'add user cap', 'succeeds' + caps='user=read' + (err, out) = rgwadmin(ctx, client, ['caps', 'add', '--uid', user1, '--caps', caps]) + + assert out['caps'][0]['perm'] == 'read' + + # TESTCASE 'caps-rm', 'caps', 'rm', 'remove existing cap from user', 'succeeds' + (err, out) = rgwadmin(ctx, client, ['caps', 'rm', '--uid', user1, '--caps', caps]) + + assert not out['caps'] + + # TESTCASE 'rm-user','user','rm','existing user','fails, still has buckets' + bucket = connection.create_bucket(bucket_name) + rl.log_and_clear("create_bucket", bucket_name, user1) + key = boto.s3.key.Key(bucket) + + (err, out) = rgwadmin(ctx, client, ['user', 'rm', '--uid', user1]) + assert err + + # TESTCASE 'rm-user2', 'user', 'rm', 'user with data', 'succeeds' + bucket = connection.create_bucket(bucket_name) + rl.log_and_clear("create_bucket", bucket_name, user1) + key = boto.s3.key.Key(bucket) + key.set_contents_from_string('twelve') + rl.log_and_clear("put_obj", bucket_name, user1) + + time.sleep(35) + + # need to wait for all usage data to get flushed, should take up to 30 seconds + timestamp = time.time() + while time.time() - timestamp <= (2 * 60): # wait up to 20 minutes + (err, out) = rgwadmin(ctx, client, ['usage', 'show', '--categories', 'delete_obj']) # one of the operations we did is delete_obj, should be present. + if get_user_successful_ops(out, user1) > 0: + break + time.sleep(1) + + assert time.time() - timestamp <= (20 * 60) + + # TESTCASE 'usage-show' 'usage' 'show' 'all usage' 'succeeds' + (err, out) = rgwadmin(ctx, client, ['usage', 'show'], check_status=True) + assert len(out['entries']) > 0 + assert len(out['summary']) > 0 + + r = acc.compare_results(out) + if len(r) != 0: + sys.stderr.write(("\n".join(r))+"\n") + assert(len(r) == 0) + + user_summary = get_user_summary(out, user1) + + total = user_summary['total'] + assert total['successful_ops'] > 0 + + # TESTCASE 'usage-show2' 'usage' 'show' 'user usage' 'succeeds' + (err, out) = rgwadmin(ctx, client, ['usage', 'show', '--uid', user1], + check_status=True) + assert len(out['entries']) > 0 + assert len(out['summary']) > 0 + user_summary = out['summary'][0] + for entry in user_summary['categories']: + assert entry['successful_ops'] > 0 + assert user_summary['user'] == user1 + + # TESTCASE 'usage-show3' 'usage' 'show' 'user usage categories' 'succeeds' + test_categories = ['create_bucket', 'put_obj', 'delete_obj', 'delete_bucket'] + for cat in test_categories: + (err, out) = rgwadmin(ctx, client, ['usage', 'show', '--uid', user1, '--categories', cat], + check_status=True) + assert len(out['summary']) > 0 + user_summary = out['summary'][0] + assert user_summary['user'] == user1 + assert len(user_summary['categories']) == 1 + entry = user_summary['categories'][0] + assert entry['category'] == cat + assert entry['successful_ops'] > 0 + + # should be all through with connection. (anything using connection + # should be BEFORE the usage stuff above.) + rl.log_and_clear("(before-close)", '-', '-', ignore_this_entry) + connection.close() + connection = None + + # the usage flush interval is 30 seconds, wait that much an then some + # to make sure everything has been flushed + time.sleep(35) + + # TESTCASE 'usage-trim' 'usage' 'trim' 'user usage' 'succeeds, usage removed' + (err, out) = rgwadmin(ctx, client, ['usage', 'trim', '--uid', user1], + check_status=True) + (err, out) = rgwadmin(ctx, client, ['usage', 'show', '--uid', user1], + check_status=True) + assert len(out['entries']) == 0 + assert len(out['summary']) == 0 + + (err, out) = rgwadmin(ctx, client, + ['user', 'rm', '--uid', user1, '--purge-data' ], + check_status=True) + + # TESTCASE 'rm-user3','user','rm','deleted user','fails' + (err, out) = rgwadmin(ctx, client, ['user', 'info', '--uid', user1]) + assert err + + # TESTCASE 'zone-info', 'zone', 'get', 'get zone info', 'succeeds, has default placement rule' + # + + (err, out) = rgwadmin(ctx, client, ['zone', 'get','--rgw-zone','default']) + orig_placement_pools = len(out['placement_pools']) + + # removed this test, it is not correct to assume that zone has default placement, it really + # depends on how we set it up before + # + # assert len(out) > 0 + # assert len(out['placement_pools']) == 1 + + # default_rule = out['placement_pools'][0] + # assert default_rule['key'] == 'default-placement' + + rule={'key': 'new-placement', 'val': {'data_pool': '.rgw.buckets.2', 'index_pool': '.rgw.buckets.index.2'}} + + out['placement_pools'].append(rule) + + (err, out) = rgwadmin(ctx, client, ['zone', 'set'], + stdin=BytesIO(six.ensure_binary(json.dumps(out))), + check_status=True) + + (err, out) = rgwadmin(ctx, client, ['zone', 'get']) + assert len(out) > 0 + assert len(out['placement_pools']) == orig_placement_pools + 1 + + zonecmd = ['zone', 'placement', 'rm', + '--rgw-zone', 'default', + '--placement-id', 'new-placement'] + + (err, out) = rgwadmin(ctx, client, zonecmd, check_status=True) + + # TESTCASE 'zonegroup-info', 'zonegroup', 'get', 'get zonegroup info', 'succeeds' + (err, out) = rgwadmin(ctx, client, ['zonegroup', 'get'], check_status=True) + +from teuthology.config import config +from teuthology.orchestra import cluster, remote +import argparse; + +def main(): + if len(sys.argv) == 3: + user = sys.argv[1] + "@" + host = sys.argv[2] + elif len(sys.argv) == 2: + user = "" + host = sys.argv[1] + else: + sys.stderr.write("usage: radosgw_admin.py [user] host\n") + exit(1) + client0 = remote.Remote(user + host) + ctx = config + ctx.cluster=cluster.Cluster(remotes=[(client0, + [ 'ceph.client.rgw.%s' % (host), ]),]) + + ctx.rgw = argparse.Namespace() + endpoints = {} + endpoints['ceph.client.rgw.%s' % host] = (host, 80) + ctx.rgw.role_endpoints = endpoints + ctx.rgw.realm = None + ctx.rgw.regions = {'region0': { 'api name': 'api1', + 'is master': True, 'master zone': 'r0z0', + 'zones': ['r0z0', 'r0z1'] }} + ctx.rgw.config = {'ceph.client.rgw.%s' % host: {'system user': {'name': '%s-system-user' % host}}} + task(config, None) + exit() + +if __name__ == '__main__': + main() diff --git a/qa/tasks/radosgw_admin_rest.py b/qa/tasks/radosgw_admin_rest.py new file mode 100644 index 00000000..24330ad3 --- /dev/null +++ b/qa/tasks/radosgw_admin_rest.py @@ -0,0 +1,721 @@ +""" +Run a series of rgw admin commands through the rest interface. + +The test cases in this file have been annotated for inventory. +To extract the inventory (in csv format) use the command: + + grep '^ *# TESTCASE' | sed 's/^ *# TESTCASE //' + +""" +import logging + + +import boto.exception +import boto.s3.connection +import boto.s3.acl + +import requests +import time + +from boto.connection import AWSAuthConnection +from teuthology import misc as teuthology +from tasks.util.rgw import get_user_summary, get_user_successful_ops, rgwadmin + +log = logging.getLogger(__name__) + +def rgwadmin_rest(connection, cmd, params=None, headers=None, raw=False): + """ + perform a rest command + """ + log.info('radosgw-admin-rest: %s %s' % (cmd, params)) + put_cmds = ['create', 'link', 'add'] + post_cmds = ['unlink', 'modify'] + delete_cmds = ['trim', 'rm', 'process'] + get_cmds = ['check', 'info', 'show', 'list'] + + bucket_sub_resources = ['object', 'policy', 'index'] + user_sub_resources = ['subuser', 'key', 'caps'] + zone_sub_resources = ['pool', 'log', 'garbage'] + + def get_cmd_method_and_handler(cmd): + """ + Get the rest command and handler from information in cmd and + from the imported requests object. + """ + if cmd[1] in put_cmds: + return 'PUT', requests.put + elif cmd[1] in delete_cmds: + return 'DELETE', requests.delete + elif cmd[1] in post_cmds: + return 'POST', requests.post + elif cmd[1] in get_cmds: + return 'GET', requests.get + + def get_resource(cmd): + """ + Get the name of the resource from information in cmd. + """ + if cmd[0] == 'bucket' or cmd[0] in bucket_sub_resources: + if cmd[0] == 'bucket': + return 'bucket', '' + else: + return 'bucket', cmd[0] + elif cmd[0] == 'user' or cmd[0] in user_sub_resources: + if cmd[0] == 'user': + return 'user', '' + else: + return 'user', cmd[0] + elif cmd[0] == 'usage': + return 'usage', '' + elif cmd[0] == 'zone' or cmd[0] in zone_sub_resources: + if cmd[0] == 'zone': + return 'zone', '' + else: + return 'zone', cmd[0] + + def build_admin_request(conn, method, resource = '', headers=None, data='', + query_args=None, params=None): + """ + Build an administative request adapted from the build_request() + method of boto.connection + """ + + path = conn.calling_format.build_path_base('admin', resource) + auth_path = conn.calling_format.build_auth_path('admin', resource) + host = conn.calling_format.build_host(conn.server_name(), 'admin') + if query_args: + path += '?' + query_args + boto.log.debug('path=%s' % path) + auth_path += '?' + query_args + boto.log.debug('auth_path=%s' % auth_path) + return AWSAuthConnection.build_base_http_request(conn, method, path, + auth_path, params, headers, data, host) + + method, handler = get_cmd_method_and_handler(cmd) + resource, query_args = get_resource(cmd) + request = build_admin_request(connection, method, resource, + query_args=query_args, headers=headers) + + url = '{protocol}://{host}{path}'.format(protocol=request.protocol, + host=request.host, path=request.path) + + request.authorize(connection=connection) + result = handler(url, params=params, headers=request.headers) + + if raw: + log.info(' text result: %s' % result.text) + return result.status_code, result.text + elif len(result.content) == 0: + # many admin requests return no body, so json() throws a JSONDecodeError + log.info(' empty result') + return result.status_code, None + else: + log.info(' json result: %s' % result.json()) + return result.status_code, result.json() + + +def task(ctx, config): + """ + Test radosgw-admin functionality through the RESTful interface + """ + assert config is None or isinstance(config, list) \ + or isinstance(config, dict), \ + "task s3tests only supports a list or dictionary for configuration" + all_clients = ['client.{id}'.format(id=id_) + for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')] + if config is None: + config = all_clients + if isinstance(config, list): + config = dict.fromkeys(config) + clients = config.keys() + + # just use the first client... + client = next(iter(clients)) + + ## + admin_user = 'ada' + admin_display_name = 'Ms. Admin User' + admin_access_key = 'MH1WC2XQ1S8UISFDZC8W' + admin_secret_key = 'dQyrTPA0s248YeN5bBv4ukvKU0kh54LWWywkrpoG' + admin_caps = 'users=read, write; usage=read, write; buckets=read, write; zone=read, write' + + user1 = 'foo' + user2 = 'fud' + subuser1 = 'foo:foo1' + subuser2 = 'foo:foo2' + display_name1 = 'Foo' + display_name2 = 'Fud' + email = 'foo@foo.com' + access_key = '9te6NH5mcdcq0Tc5i8i1' + secret_key = 'Ny4IOauQoL18Gp2zM7lC1vLmoawgqcYP/YGcWfXu' + access_key2 = 'p5YnriCv1nAtykxBrupQ' + secret_key2 = 'Q8Tk6Q/27hfbFSYdSkPtUqhqx1GgzvpXa4WARozh' + swift_secret1 = 'gpS2G9RREMrnbqlp29PP2D36kgPR1tm72n5fPYfL' + swift_secret2 = 'ri2VJQcKSYATOY6uaDUX7pxgkW+W1YmC6OCxPHwy' + + bucket_name = 'myfoo' + + # legend (test cases can be easily grep-ed out) + # TESTCASE 'testname','object','method','operation','assertion' + # TESTCASE 'create-admin-user','user','create','administrative user','succeeds' + (err, out) = rgwadmin(ctx, client, [ + 'user', 'create', + '--uid', admin_user, + '--display-name', admin_display_name, + '--access-key', admin_access_key, + '--secret', admin_secret_key, + '--max-buckets', '0', + '--caps', admin_caps + ]) + logging.error(out) + logging.error(err) + assert not err + + assert hasattr(ctx, 'rgw'), 'radosgw-admin-rest must run after the rgw task' + endpoint = ctx.rgw.role_endpoints.get(client) + assert endpoint, 'no rgw endpoint for {}'.format(client) + + admin_conn = boto.s3.connection.S3Connection( + aws_access_key_id=admin_access_key, + aws_secret_access_key=admin_secret_key, + is_secure=True if endpoint.cert else False, + port=endpoint.port, + host=endpoint.hostname, + calling_format=boto.s3.connection.OrdinaryCallingFormat(), + ) + + # TESTCASE 'info-nosuch','user','info','non-existent user','fails' + (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {"uid": user1}) + assert ret == 404 + + # TESTCASE 'create-ok','user','create','w/all valid info','succeeds' + (ret, out) = rgwadmin_rest(admin_conn, + ['user', 'create'], + {'uid' : user1, + 'display-name' : display_name1, + 'email' : email, + 'access-key' : access_key, + 'secret-key' : secret_key, + 'max-buckets' : '4' + }) + + assert ret == 200 + + # TESTCASE 'list-no-user','user','list','list user keys','user list object' + (ret, out) = rgwadmin_rest(admin_conn, ['user', 'list'], {'list' : '', 'max-entries' : 0}) + assert ret == 200 + assert out['count'] == 0 + assert out['truncated'] == True + assert len(out['keys']) == 0 + assert len(out['marker']) > 0 + + # TESTCASE 'list-user-without-marker','user','list','list user keys','user list object' + (ret, out) = rgwadmin_rest(admin_conn, ['user', 'list'], {'list' : '', 'max-entries' : 1}) + assert ret == 200 + assert out['count'] == 1 + assert out['truncated'] == True + assert len(out['keys']) == 1 + assert len(out['marker']) > 0 + marker = out['marker'] + + # TESTCASE 'list-user-with-marker','user','list','list user keys','user list object' + (ret, out) = rgwadmin_rest(admin_conn, ['user', 'list'], {'list' : '', 'max-entries' : 1, 'marker': marker}) + assert ret == 200 + assert out['count'] == 1 + assert out['truncated'] == False + assert len(out['keys']) == 1 + + # TESTCASE 'info-existing','user','info','existing user','returns correct info' + (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1}) + + assert out['user_id'] == user1 + assert out['email'] == email + assert out['display_name'] == display_name1 + assert len(out['keys']) == 1 + assert out['keys'][0]['access_key'] == access_key + assert out['keys'][0]['secret_key'] == secret_key + assert not out['suspended'] + assert out['tenant'] == '' + assert out['max_buckets'] == 4 + assert out['caps'] == [] + assert out['op_mask'] == 'read, write, delete' + assert out['default_placement'] == '' + assert out['default_storage_class'] == '' + assert out['placement_tags'] == [] + assert not out['bucket_quota']['enabled'] + assert not out['bucket_quota']['check_on_raw'] + assert out['bucket_quota']['max_size'] == -1 + assert out['bucket_quota']['max_size_kb'] == 0 + assert out['bucket_quota']['max_objects'] == -1 + assert not out['user_quota']['enabled'] + assert not out['user_quota']['check_on_raw'] + assert out['user_quota']['max_size'] == -1 + assert out['user_quota']['max_size_kb'] == 0 + assert out['user_quota']['max_objects'] == -1 + assert out['temp_url_keys'] == [] + assert out['type'] == 'rgw' + assert out['mfa_ids'] == [] + # TESTCASE 'info-existing','user','info','existing user query with wrong uid but correct access key','returns correct info' + (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'access-key' : access_key, 'uid': 'uid_not_exist'}) + + assert out['user_id'] == user1 + assert out['email'] == email + assert out['display_name'] == display_name1 + assert len(out['keys']) == 1 + assert out['keys'][0]['access_key'] == access_key + assert out['keys'][0]['secret_key'] == secret_key + assert not out['suspended'] + assert out['tenant'] == '' + assert out['max_buckets'] == 4 + assert out['caps'] == [] + assert out['op_mask'] == "read, write, delete" + assert out['default_placement'] == '' + assert out['default_storage_class'] == '' + assert out['placement_tags'] == [] + assert not out['bucket_quota']['enabled'] + assert not out['bucket_quota']['check_on_raw'] + assert out ['bucket_quota']['max_size'] == -1 + assert out ['bucket_quota']['max_size_kb'] == 0 + assert out ['bucket_quota']['max_objects'] == -1 + assert not out['user_quota']['enabled'] + assert not out['user_quota']['check_on_raw'] + assert out['user_quota']['max_size'] == -1 + assert out['user_quota']['max_size_kb'] == 0 + assert out['user_quota']['max_objects'] == -1 + assert out['temp_url_keys'] == [] + assert out['type'] == 'rgw' + assert out['mfa_ids'] == [] + + # TESTCASE 'suspend-ok','user','suspend','active user','succeeds' + (ret, out) = rgwadmin_rest(admin_conn, ['user', 'modify'], {'uid' : user1, 'suspended' : True}) + assert ret == 200 + + # TESTCASE 'suspend-suspended','user','suspend','suspended user','succeeds w/advisory' + (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1}) + assert ret == 200 + assert out['suspended'] + assert out['email'] == email + + # TESTCASE 're-enable','user','enable','suspended user','succeeds' + (ret, out) = rgwadmin_rest(admin_conn, ['user', 'modify'], {'uid' : user1, 'suspended' : 'false'}) + assert not err + + # TESTCASE 'info-re-enabled','user','info','re-enabled user','no longer suspended' + (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1}) + assert ret == 200 + assert not out['suspended'] + + # TESTCASE 'add-keys','key','create','w/valid info','succeeds' + (ret, out) = rgwadmin_rest(admin_conn, + ['key', 'create'], + {'uid' : user1, + 'access-key' : access_key2, + 'secret-key' : secret_key2 + }) + + + assert ret == 200 + + # TESTCASE 'info-new-key','user','info','after key addition','returns all keys' + (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1}) + assert ret == 200 + assert len(out['keys']) == 2 + assert out['keys'][0]['access_key'] == access_key2 or out['keys'][1]['access_key'] == access_key2 + assert out['keys'][0]['secret_key'] == secret_key2 or out['keys'][1]['secret_key'] == secret_key2 + + # TESTCASE 'rm-key','key','rm','newly added key','succeeds, key is removed' + (ret, out) = rgwadmin_rest(admin_conn, + ['key', 'rm'], + {'uid' : user1, + 'access-key' : access_key2 + }) + + assert ret == 200 + + (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1}) + + assert len(out['keys']) == 1 + assert out['keys'][0]['access_key'] == access_key + assert out['keys'][0]['secret_key'] == secret_key + + # TESTCASE 'add-swift-key','key','create','swift key','succeeds' + (ret, out) = rgwadmin_rest(admin_conn, + ['subuser', 'create'], + {'subuser' : subuser1, + 'secret-key' : swift_secret1, + 'key-type' : 'swift' + }) + + assert ret == 200 + + # TESTCASE 'info-swift-key','user','info','after key addition','returns all keys' + (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1}) + assert ret == 200 + assert len(out['swift_keys']) == 1 + assert out['swift_keys'][0]['user'] == subuser1 + assert out['swift_keys'][0]['secret_key'] == swift_secret1 + + # TESTCASE 'add-swift-subuser','key','create','swift sub-user key','succeeds' + (ret, out) = rgwadmin_rest(admin_conn, + ['subuser', 'create'], + {'subuser' : subuser2, + 'secret-key' : swift_secret2, + 'key-type' : 'swift' + }) + + assert ret == 200 + + # TESTCASE 'info-swift-subuser','user','info','after key addition','returns all sub-users/keys' + (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1}) + assert ret == 200 + assert len(out['swift_keys']) == 2 + assert out['swift_keys'][0]['user'] == subuser2 or out['swift_keys'][1]['user'] == subuser2 + assert out['swift_keys'][0]['secret_key'] == swift_secret2 or out['swift_keys'][1]['secret_key'] == swift_secret2 + + # TESTCASE 'rm-swift-key1','key','rm','subuser','succeeds, one key is removed' + (ret, out) = rgwadmin_rest(admin_conn, + ['key', 'rm'], + {'subuser' : subuser1, + 'key-type' :'swift' + }) + + assert ret == 200 + + (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1}) + assert len(out['swift_keys']) == 1 + + # TESTCASE 'rm-subuser','subuser','rm','subuser','success, subuser is removed' + (ret, out) = rgwadmin_rest(admin_conn, + ['subuser', 'rm'], + {'subuser' : subuser1 + }) + + assert ret == 200 + + (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1}) + assert len(out['subusers']) == 1 + + # TESTCASE 'rm-subuser-with-keys','subuser','rm','subuser','succeeds, second subser and key is removed' + (ret, out) = rgwadmin_rest(admin_conn, + ['subuser', 'rm'], + {'subuser' : subuser2, + 'key-type' : 'swift', + '{purge-keys' :True + }) + + assert ret == 200 + + (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1}) + assert len(out['swift_keys']) == 0 + assert len(out['subusers']) == 0 + + # TESTCASE 'bucket-stats','bucket','info','no session/buckets','succeeds, empty list' + (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'uid' : user1}) + assert ret == 200 + assert len(out) == 0 + + # connect to rgw + connection = boto.s3.connection.S3Connection( + aws_access_key_id=access_key, + aws_secret_access_key=secret_key, + is_secure=True if endpoint.cert else False, + port=endpoint.port, + host=endpoint.hostname, + calling_format=boto.s3.connection.OrdinaryCallingFormat(), + ) + + # TESTCASE 'bucket-stats2','bucket','stats','no buckets','succeeds, empty list' + (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'uid' : user1, 'stats' : True}) + assert ret == 200 + assert len(out) == 0 + + # create a first bucket + bucket = connection.create_bucket(bucket_name) + + # TESTCASE 'bucket-list','bucket','list','one bucket','succeeds, expected list' + (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'uid' : user1}) + assert ret == 200 + assert len(out) == 1 + assert out[0] == bucket_name + + # TESTCASE 'bucket-stats3','bucket','stats','new empty bucket','succeeds, empty list' + (ret, out) = rgwadmin_rest(admin_conn, + ['bucket', 'info'], {'bucket' : bucket_name, 'stats' : True}) + + assert ret == 200 + assert out['owner'] == user1 + assert out['tenant'] == '' + bucket_id = out['id'] + + # TESTCASE 'bucket-stats4','bucket','stats','new empty bucket','succeeds, expected bucket ID' + (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'uid' : user1, 'stats' : True}) + assert ret == 200 + assert len(out) == 1 + assert out[0]['id'] == bucket_id # does it return the same ID twice in a row? + + # use some space + key = boto.s3.key.Key(bucket) + key.set_contents_from_string('one') + + # TESTCASE 'bucket-stats5','bucket','stats','after creating key','succeeds, lists one non-empty object' + (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'bucket' : bucket_name, 'stats' : True}) + assert ret == 200 + assert out['id'] == bucket_id + assert out['usage']['rgw.main']['num_objects'] == 1 + assert out['usage']['rgw.main']['size_kb'] > 0 + + # TESTCASE 'bucket-stats6', 'bucket', 'stats', 'non-existent bucket', 'fails, 'bucket not found error' + (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'bucket' : 'doesnotexist'}) + assert ret == 404 + assert out['Code'] == 'NoSuchBucket' + + # reclaim it + key.delete() + + # TESTCASE 'bucket unlink', 'bucket', 'unlink', 'unlink bucket from user', 'fails', 'access denied error' + (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'unlink'], {'uid' : user1, 'bucket' : bucket_name}) + + assert ret == 200 + + # create a second user to link the bucket to + (ret, out) = rgwadmin_rest(admin_conn, + ['user', 'create'], + {'uid' : user2, + 'display-name' : display_name2, + 'access-key' : access_key2, + 'secret-key' : secret_key2, + 'max-buckets' : '1', + }) + + assert ret == 200 + + # try creating an object with the first user before the bucket is relinked + denied = False + key = boto.s3.key.Key(bucket) + + try: + key.set_contents_from_string('two') + except boto.exception.S3ResponseError: + denied = True + + assert not denied + + # delete the object + key.delete() + + # link the bucket to another user + (ret, out) = rgwadmin_rest(admin_conn, + ['bucket', 'link'], + {'uid' : user2, + 'bucket' : bucket_name, + 'bucket-id' : bucket_id, + }) + + assert ret == 200 + + # try creating an object with the first user which should cause an error + key = boto.s3.key.Key(bucket) + + try: + key.set_contents_from_string('three') + except boto.exception.S3ResponseError: + denied = True + + assert denied + + # relink the bucket to the first user and delete the second user + (ret, out) = rgwadmin_rest(admin_conn, + ['bucket', 'link'], + {'uid' : user1, + 'bucket' : bucket_name, + 'bucket-id' : bucket_id, + }) + assert ret == 200 + + (ret, out) = rgwadmin_rest(admin_conn, ['user', 'rm'], {'uid' : user2}) + assert ret == 200 + + # TESTCASE 'object-rm', 'object', 'rm', 'remove object', 'succeeds, object is removed' + + # upload an object + object_name = 'four' + key = boto.s3.key.Key(bucket, object_name) + key.set_contents_from_string(object_name) + + # now delete it + (ret, out) = rgwadmin_rest(admin_conn, ['object', 'rm'], {'bucket' : bucket_name, 'object' : object_name}) + assert ret == 200 + + # TESTCASE 'bucket-stats6','bucket','stats','after deleting key','succeeds, lists one no objects' + (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'info'], {'bucket' : bucket_name, 'stats' : True}) + assert ret == 200 + assert out['id'] == bucket_id + assert out['usage']['rgw.main']['num_objects'] == 0 + + # create a bucket for deletion stats + useless_bucket = connection.create_bucket('useless_bucket') + useless_key = useless_bucket.new_key('useless_key') + useless_key.set_contents_from_string('useless string') + + # delete it + useless_key.delete() + useless_bucket.delete() + + # wait for the statistics to flush + time.sleep(60) + + # need to wait for all usage data to get flushed, should take up to 30 seconds + timestamp = time.time() + while time.time() - timestamp <= (20 * 60): # wait up to 20 minutes + (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'], {'categories' : 'delete_obj'}) # last operation we did is delete obj, wait for it to flush + + if get_user_successful_ops(out, user1) > 0: + break + time.sleep(1) + + assert time.time() - timestamp <= (20 * 60) + + # TESTCASE 'usage-show' 'usage' 'show' 'all usage' 'succeeds' + (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show']) + assert ret == 200 + assert len(out['entries']) > 0 + assert len(out['summary']) > 0 + user_summary = get_user_summary(out, user1) + total = user_summary['total'] + assert total['successful_ops'] > 0 + + # TESTCASE 'usage-show2' 'usage' 'show' 'user usage' 'succeeds' + (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'], {'uid' : user1}) + assert ret == 200 + assert len(out['entries']) > 0 + assert len(out['summary']) > 0 + user_summary = out['summary'][0] + for entry in user_summary['categories']: + assert entry['successful_ops'] > 0 + assert user_summary['user'] == user1 + + # TESTCASE 'usage-show3' 'usage' 'show' 'user usage categories' 'succeeds' + test_categories = ['create_bucket', 'put_obj', 'delete_obj', 'delete_bucket'] + for cat in test_categories: + (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'], {'uid' : user1, 'categories' : cat}) + assert ret == 200 + assert len(out['summary']) > 0 + user_summary = out['summary'][0] + assert user_summary['user'] == user1 + assert len(user_summary['categories']) == 1 + entry = user_summary['categories'][0] + assert entry['category'] == cat + assert entry['successful_ops'] > 0 + + # TESTCASE 'usage-trim' 'usage' 'trim' 'user usage' 'succeeds, usage removed' + (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'trim'], {'uid' : user1}) + assert ret == 200 + (ret, out) = rgwadmin_rest(admin_conn, ['usage', 'show'], {'uid' : user1}) + assert ret == 200 + assert len(out['entries']) == 0 + assert len(out['summary']) == 0 + + # TESTCASE 'user-suspend2','user','suspend','existing user','succeeds' + (ret, out) = rgwadmin_rest(admin_conn, ['user', 'modify'], {'uid' : user1, 'suspended' : True}) + assert ret == 200 + + # TESTCASE 'user-suspend3','user','suspend','suspended user','cannot write objects' + try: + key = boto.s3.key.Key(bucket) + key.set_contents_from_string('five') + except boto.exception.S3ResponseError as e: + assert e.status == 403 + + # TESTCASE 'user-renable2','user','enable','suspended user','succeeds' + (ret, out) = rgwadmin_rest(admin_conn, ['user', 'modify'], {'uid' : user1, 'suspended' : 'false'}) + assert ret == 200 + + # TESTCASE 'user-renable3','user','enable','reenabled user','can write objects' + key = boto.s3.key.Key(bucket) + key.set_contents_from_string('six') + + # TESTCASE 'garbage-list', 'garbage', 'list', 'get list of objects ready for garbage collection' + + # create an object large enough to be split into multiple parts + test_string = 'foo'*10000000 + + big_key = boto.s3.key.Key(bucket) + big_key.set_contents_from_string(test_string) + + # now delete the head + big_key.delete() + + # TESTCASE 'rm-user-buckets','user','rm','existing user','fails, still has buckets' + (ret, out) = rgwadmin_rest(admin_conn, ['user', 'rm'], {'uid' : user1}) + assert ret == 409 + + # delete should fail because ``key`` still exists + try: + bucket.delete() + except boto.exception.S3ResponseError as e: + assert e.status == 409 + + key.delete() + bucket.delete() + + # TESTCASE 'policy', 'bucket', 'policy', 'get bucket policy', 'returns S3 policy' + bucket = connection.create_bucket(bucket_name) + + # create an object + key = boto.s3.key.Key(bucket) + key.set_contents_from_string('seven') + + # should be private already but guarantee it + key.set_acl('private') + + (ret, out) = rgwadmin_rest(admin_conn, ['policy', 'show'], {'bucket' : bucket.name, 'object' : key.key}) + assert ret == 200 + assert len(out['acl']['grant_map']) == 1 + + # add another grantee by making the object public read + key.set_acl('public-read') + + (ret, out) = rgwadmin_rest(admin_conn, ['policy', 'show'], {'bucket' : bucket.name, 'object' : key.key}) + assert ret == 200 + assert len(out['acl']['grant_map']) == 2 + + # TESTCASE 'rm-bucket', 'bucket', 'rm', 'bucket with objects', 'succeeds' + bucket = connection.create_bucket(bucket_name) + key_name = ['eight', 'nine', 'ten', 'eleven'] + for i in range(4): + key = boto.s3.key.Key(bucket) + key.set_contents_from_string(key_name[i]) + + (ret, out) = rgwadmin_rest(admin_conn, ['bucket', 'rm'], {'bucket' : bucket_name, 'purge-objects' : True}) + assert ret == 200 + + # TESTCASE 'caps-add', 'caps', 'add', 'add user cap', 'succeeds' + caps = 'usage=read' + (ret, out) = rgwadmin_rest(admin_conn, ['caps', 'add'], {'uid' : user1, 'user-caps' : caps}) + assert ret == 200 + assert out[0]['perm'] == 'read' + + # TESTCASE 'caps-rm', 'caps', 'rm', 'remove existing cap from user', 'succeeds' + (ret, out) = rgwadmin_rest(admin_conn, ['caps', 'rm'], {'uid' : user1, 'user-caps' : caps}) + assert ret == 200 + assert not out + + # TESTCASE 'rm-user','user','rm','existing user','fails, still has buckets' + bucket = connection.create_bucket(bucket_name) + key = boto.s3.key.Key(bucket) + + (ret, out) = rgwadmin_rest(admin_conn, ['user', 'rm'], {'uid' : user1}) + assert ret == 409 + + # TESTCASE 'rm-user2', 'user', 'rm', user with data', 'succeeds' + bucket = connection.create_bucket(bucket_name) + key = boto.s3.key.Key(bucket) + key.set_contents_from_string('twelve') + + (ret, out) = rgwadmin_rest(admin_conn, ['user', 'rm'], {'uid' : user1, 'purge-data' : True}) + assert ret == 200 + + # TESTCASE 'rm-user3','user','info','deleted user','fails' + (ret, out) = rgwadmin_rest(admin_conn, ['user', 'info'], {'uid' : user1}) + assert ret == 404 + diff --git a/qa/tasks/ragweed.py b/qa/tasks/ragweed.py new file mode 100644 index 00000000..d906cdca --- /dev/null +++ b/qa/tasks/ragweed.py @@ -0,0 +1,390 @@ +""" +Run a set of s3 tests on rgw. +""" +from io import BytesIO +from configobj import ConfigObj +import base64 +import contextlib +import logging +import os +import random +import six +import string + +from teuthology import misc as teuthology +from teuthology import contextutil +from teuthology.config import config as teuth_config +from teuthology.orchestra import run +from teuthology.orchestra.connection import split_user + +log = logging.getLogger(__name__) + + +def get_ragweed_branches(config, client_conf): + """ + figure out the ragweed branch according to the per-client settings + + use force-branch is specified, and fall back to the ones deduced using ceph + branch under testing + """ + force_branch = client_conf.get('force-branch', None) + if force_branch: + return [force_branch] + else: + S3_BRANCHES = ['master', 'nautilus', 'mimic', + 'luminous', 'kraken', 'jewel'] + ceph_branch = config.get('branch') + suite_branch = config.get('suite_branch', ceph_branch) + if suite_branch in S3_BRANCHES: + branch = client_conf.get('branch', 'ceph-' + suite_branch) + else: + branch = client_conf.get('branch', suite_branch) + default_branch = client_conf.get('default-branch', None) + if default_branch: + return [branch, default_branch] + else: + return [branch] + + +@contextlib.contextmanager +def download(ctx, config): + """ + Download the s3 tests from the git builder. + Remove downloaded s3 file upon exit. + + The context passed in should be identical to the context + passed in to the main task. + """ + assert isinstance(config, dict) + log.info('Downloading ragweed...') + testdir = teuthology.get_testdir(ctx) + for (client, cconf) in config.items(): + ragweed_repo = ctx.config.get('ragweed_repo', + teuth_config.ceph_git_base_url + 'ragweed.git') + for branch in get_ragweed_branches(ctx.config, cconf): + log.info("Using branch '%s' for ragweed", branch) + try: + ctx.cluster.only(client).sh( + script=f'git clone -b {branch} {ragweed_repo} {testdir}/ragweed') + break + except Exception as e: + exc = e + else: + raise exc + + sha1 = cconf.get('sha1') + if sha1 is not None: + ctx.cluster.only(client).run( + args=[ + 'cd', '{tdir}/ragweed'.format(tdir=testdir), + run.Raw('&&'), + 'git', 'reset', '--hard', sha1, + ], + ) + try: + yield + finally: + log.info('Removing ragweed...') + testdir = teuthology.get_testdir(ctx) + for client in config: + ctx.cluster.only(client).run( + args=[ + 'rm', + '-rf', + '{tdir}/ragweed'.format(tdir=testdir), + ], + ) + + +def _config_user(ragweed_conf, section, user): + """ + Configure users for this section by stashing away keys, ids, and + email addresses. + """ + ragweed_conf[section].setdefault('user_id', user) + ragweed_conf[section].setdefault('email', '{user}+test@test.test'.format(user=user)) + ragweed_conf[section].setdefault('display_name', 'Mr. {user}'.format(user=user)) + ragweed_conf[section].setdefault('access_key', ''.join(random.choice(string.ascii_uppercase) for i in range(20))) + ragweed_conf[section].setdefault('secret_key', base64.b64encode(os.urandom(40)).decode('ascii')) + + +@contextlib.contextmanager +def create_users(ctx, config, run_stages): + """ + Create a main and an alternate s3 user. + """ + assert isinstance(config, dict) + + for client, properties in config['config'].items(): + run_stages[client] = properties.get('stages', 'prepare,check').split(',') + + log.info('Creating rgw users...') + testdir = teuthology.get_testdir(ctx) + users = {'user regular': 'ragweed', 'user system': 'sysuser'} + for client in config['clients']: + if not 'prepare' in run_stages[client]: + # should have been prepared in a previous run + continue + + ragweed_conf = config['ragweed_conf'][client] + ragweed_conf.setdefault('fixtures', {}) + ragweed_conf['rgw'].setdefault('bucket_prefix', 'test-' + client) + for section, user in users.items(): + _config_user(ragweed_conf, section, '{user}.{client}'.format(user=user, client=client)) + log.debug('Creating user {user} on {host}'.format(user=ragweed_conf[section]['user_id'], host=client)) + if user == 'sysuser': + sys_str = 'true' + else: + sys_str = 'false' + ctx.cluster.only(client).run( + args=[ + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'radosgw-admin', + '-n', client, + 'user', 'create', + '--uid', ragweed_conf[section]['user_id'], + '--display-name', ragweed_conf[section]['display_name'], + '--access-key', ragweed_conf[section]['access_key'], + '--secret', ragweed_conf[section]['secret_key'], + '--email', ragweed_conf[section]['email'], + '--system', sys_str, + ], + ) + try: + yield + finally: + for client in config['clients']: + if not 'check' in run_stages[client]: + # only remove user if went through the check stage + continue + for user in users.values(): + uid = '{user}.{client}'.format(user=user, client=client) + ctx.cluster.only(client).run( + args=[ + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'radosgw-admin', + '-n', client, + 'user', 'rm', + '--uid', uid, + '--purge-data', + ], + ) + + +@contextlib.contextmanager +def configure(ctx, config, run_stages): + """ + Configure the ragweed. This includes the running of the + bootstrap code and the updating of local conf files. + """ + assert isinstance(config, dict) + log.info('Configuring ragweed...') + testdir = teuthology.get_testdir(ctx) + for client, properties in config['clients'].items(): + (remote,) = ctx.cluster.only(client).remotes.keys() + remote.run( + args=[ + 'cd', + '{tdir}/ragweed'.format(tdir=testdir), + run.Raw('&&'), + './bootstrap', + ], + ) + + preparing = 'prepare' in run_stages[client] + if not preparing: + # should have been prepared in a previous run + continue + + ragweed_conf = config['ragweed_conf'][client] + if properties is not None and 'rgw_server' in properties: + host = None + for target, roles in zip(ctx.config['targets'].keys(), ctx.config['roles']): + log.info('roles: ' + str(roles)) + log.info('target: ' + str(target)) + if properties['rgw_server'] in roles: + _, host = split_user(target) + assert host is not None, "Invalid client specified as the rgw_server" + ragweed_conf['rgw']['host'] = host + else: + ragweed_conf['rgw']['host'] = 'localhost' + + if properties is not None and 'slow_backend' in properties: + ragweed_conf['fixtures']['slow backend'] = properties['slow_backend'] + + conf_fp = BytesIO() + ragweed_conf.write(conf_fp) + teuthology.write_file( + remote=remote, + path='{tdir}/archive/ragweed.{client}.conf'.format(tdir=testdir, client=client), + data=conf_fp.getvalue(), + ) + + log.info('Configuring boto...') + boto_src = os.path.join(os.path.dirname(__file__), 'boto.cfg.template') + for client, properties in config['clients'].items(): + with open(boto_src, 'r') as f: + (remote,) = ctx.cluster.only(client).remotes.keys() + conf = f.read().format( + idle_timeout=config.get('idle_timeout', 30) + ) + teuthology.write_file( + remote=remote, + path='{tdir}/boto.cfg'.format(tdir=testdir), + data=conf, + ) + + try: + yield + + finally: + log.info('Cleaning up boto...') + for client, properties in config['clients'].items(): + (remote,) = ctx.cluster.only(client).remotes.keys() + remote.run( + args=[ + 'rm', + '{tdir}/boto.cfg'.format(tdir=testdir), + ], + ) + +@contextlib.contextmanager +def run_tests(ctx, config, run_stages): + """ + Run the ragweed after everything is set up. + + :param ctx: Context passed to task + :param config: specific configuration information + """ + assert isinstance(config, dict) + testdir = teuthology.get_testdir(ctx) + attrs = ["!fails_on_rgw"] + for client, client_config in config.items(): + stages = ','.join(run_stages[client]) + args = [ + 'RAGWEED_CONF={tdir}/archive/ragweed.{client}.conf'.format(tdir=testdir, client=client), + 'RAGWEED_STAGES={stages}'.format(stages=stages), + 'BOTO_CONFIG={tdir}/boto.cfg'.format(tdir=testdir), + '{tdir}/ragweed/virtualenv/bin/nosetests'.format(tdir=testdir), + '-w', + '{tdir}/ragweed'.format(tdir=testdir), + '-v', + '-a', ','.join(attrs), + ] + if client_config is not None and 'extra_args' in client_config: + args.extend(client_config['extra_args']) + + ctx.cluster.only(client).run( + args=args, + label="ragweed tests against rgw" + ) + yield + +@contextlib.contextmanager +def task(ctx, config): + """ + Run the ragweed suite against rgw. + + To run all tests on all clients:: + + tasks: + - ceph: + - rgw: + - ragweed: + + To restrict testing to particular clients:: + + tasks: + - ceph: + - rgw: [client.0] + - ragweed: [client.0] + + To run against a server on client.1 and increase the boto timeout to 10m:: + + tasks: + - ceph: + - rgw: [client.1] + - ragweed: + client.0: + rgw_server: client.1 + idle_timeout: 600 + stages: prepare,check + + To pass extra arguments to nose (e.g. to run a certain test):: + + tasks: + - ceph: + - rgw: [client.0] + - ragweed: + client.0: + extra_args: ['test_s3:test_object_acl_grand_public_read'] + client.1: + extra_args: ['--exclude', 'test_100_continue'] + """ + assert hasattr(ctx, 'rgw'), 'ragweed must run after the rgw task' + assert config is None or isinstance(config, list) \ + or isinstance(config, dict), \ + "task ragweed only supports a list or dictionary for configuration" + all_clients = ['client.{id}'.format(id=id_) + for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')] + if config is None: + config = all_clients + if isinstance(config, list): + config = dict.fromkeys(config) + clients = config.keys() + + overrides = ctx.config.get('overrides', {}) + # merge each client section, not the top level. + for client in config.keys(): + if not config[client]: + config[client] = {} + teuthology.deep_merge(config[client], overrides.get('ragweed', {})) + + log.debug('ragweed config is %s', config) + + ragweed_conf = {} + for client in clients: + endpoint = ctx.rgw.role_endpoints.get(client) + assert endpoint, 'ragweed: no rgw endpoint for {}'.format(client) + + ragweed_conf[client] = ConfigObj( + indent_type='', + infile={ + 'rgw': + { + 'port' : endpoint.port, + 'is_secure' : endpoint.cert is not None, + }, + 'fixtures' : {}, + 'user system' : {}, + 'user regular' : {}, + 'rados': + { + 'ceph_conf' : '/etc/ceph/ceph.conf', + }, + } + ) + + run_stages = {} + + with contextutil.nested( + lambda: download(ctx=ctx, config=config), + lambda: create_users(ctx=ctx, config=dict( + clients=clients, + ragweed_conf=ragweed_conf, + config=config, + ), + run_stages=run_stages), + lambda: configure(ctx=ctx, config=dict( + clients=config, + ragweed_conf=ragweed_conf, + ), + run_stages=run_stages), + lambda: run_tests(ctx=ctx, config=config, run_stages=run_stages), + ): + pass + yield diff --git a/qa/tasks/rbd.py b/qa/tasks/rbd.py new file mode 100644 index 00000000..b1183fb8 --- /dev/null +++ b/qa/tasks/rbd.py @@ -0,0 +1,628 @@ +""" +Rbd testing task +""" +import contextlib +import logging +import os +import tempfile +import sys + +from io import BytesIO +from teuthology.orchestra import run +from teuthology import misc as teuthology +from teuthology import contextutil +from teuthology.parallel import parallel +from teuthology.task.common_fs_utils import generic_mkfs +from teuthology.task.common_fs_utils import generic_mount +from teuthology.task.common_fs_utils import default_image_name + +import six + +#V1 image unsupported but required for testing purposes +os.environ["RBD_FORCE_ALLOW_V1"] = "1" + +log = logging.getLogger(__name__) + +@contextlib.contextmanager +def create_image(ctx, config): + """ + Create an rbd image. + + For example:: + + tasks: + - ceph: + - rbd.create_image: + client.0: + image_name: testimage + image_size: 100 + image_format: 1 + client.1: + + Image size is expressed as a number of megabytes; default value + is 10240. + + Image format value must be either 1 or 2; default value is 1. + + """ + assert isinstance(config, dict) or isinstance(config, list), \ + "task create_image only supports a list or dictionary for configuration" + + if isinstance(config, dict): + images = config.items() + else: + images = [(role, None) for role in config] + + testdir = teuthology.get_testdir(ctx) + for role, properties in images: + if properties is None: + properties = {} + name = properties.get('image_name', default_image_name(role)) + size = properties.get('image_size', 10240) + fmt = properties.get('image_format', 1) + (remote,) = ctx.cluster.only(role).remotes.keys() + log.info('Creating image {name} with size {size}'.format(name=name, + size=size)) + args = [ + 'adjust-ulimits', + 'ceph-coverage'.format(tdir=testdir), + '{tdir}/archive/coverage'.format(tdir=testdir), + 'rbd', + '-p', 'rbd', + 'create', + '--size', str(size), + name, + ] + # omit format option if using the default (format 1) + # since old versions of don't support it + if int(fmt) != 1: + args += ['--image-format', str(fmt)] + remote.run(args=args) + try: + yield + finally: + log.info('Deleting rbd images...') + for role, properties in images: + if properties is None: + properties = {} + name = properties.get('image_name', default_image_name(role)) + (remote,) = ctx.cluster.only(role).remotes.keys() + remote.run( + args=[ + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'rbd', + '-p', 'rbd', + 'rm', + name, + ], + ) + +@contextlib.contextmanager +def clone_image(ctx, config): + """ + Clones a parent imag + + For example:: + + tasks: + - ceph: + - rbd.clone_image: + client.0: + parent_name: testimage + image_name: cloneimage + """ + assert isinstance(config, dict) or isinstance(config, list), \ + "task clone_image only supports a list or dictionary for configuration" + + if isinstance(config, dict): + images = config.items() + else: + images = [(role, None) for role in config] + + testdir = teuthology.get_testdir(ctx) + for role, properties in images: + if properties is None: + properties = {} + + name = properties.get('image_name', default_image_name(role)) + parent_name = properties.get('parent_name') + assert parent_name is not None, \ + "parent_name is required" + parent_spec = '{name}@{snap}'.format(name=parent_name, snap=name) + + (remote,) = ctx.cluster.only(role).remotes.keys() + log.info('Clone image {parent} to {child}'.format(parent=parent_name, + child=name)) + for cmd in [('snap', 'create', parent_spec), + ('snap', 'protect', parent_spec), + ('clone', parent_spec, name)]: + args = [ + 'adjust-ulimits', + 'ceph-coverage'.format(tdir=testdir), + '{tdir}/archive/coverage'.format(tdir=testdir), + 'rbd', '-p', 'rbd' + ] + args.extend(cmd) + remote.run(args=args) + + try: + yield + finally: + log.info('Deleting rbd clones...') + for role, properties in images: + if properties is None: + properties = {} + name = properties.get('image_name', default_image_name(role)) + parent_name = properties.get('parent_name') + parent_spec = '{name}@{snap}'.format(name=parent_name, snap=name) + + (remote,) = ctx.cluster.only(role).remotes.keys() + + for cmd in [('rm', name), + ('snap', 'unprotect', parent_spec), + ('snap', 'rm', parent_spec)]: + args = [ + 'adjust-ulimits', + 'ceph-coverage'.format(tdir=testdir), + '{tdir}/archive/coverage'.format(tdir=testdir), + 'rbd', '-p', 'rbd' + ] + args.extend(cmd) + remote.run(args=args) + +@contextlib.contextmanager +def modprobe(ctx, config): + """ + Load the rbd kernel module.. + + For example:: + + tasks: + - ceph: + - rbd.create_image: [client.0] + - rbd.modprobe: [client.0] + """ + log.info('Loading rbd kernel module...') + for role in config: + (remote,) = ctx.cluster.only(role).remotes.keys() + remote.run( + args=[ + 'sudo', + 'modprobe', + 'rbd', + ], + ) + try: + yield + finally: + log.info('Unloading rbd kernel module...') + for role in config: + (remote,) = ctx.cluster.only(role).remotes.keys() + remote.run( + args=[ + 'sudo', + 'modprobe', + '-r', + 'rbd', + # force errors to be ignored; necessary if more + # than one device was created, which may mean + # the module isn't quite ready to go the first + # time through. + run.Raw('||'), + 'true', + ], + ) + +@contextlib.contextmanager +def dev_create(ctx, config): + """ + Map block devices to rbd images. + + For example:: + + tasks: + - ceph: + - rbd.create_image: [client.0] + - rbd.modprobe: [client.0] + - rbd.dev_create: + client.0: testimage.client.0 + """ + assert isinstance(config, dict) or isinstance(config, list), \ + "task dev_create only supports a list or dictionary for configuration" + + if isinstance(config, dict): + role_images = config.items() + else: + role_images = [(role, None) for role in config] + + log.info('Creating rbd block devices...') + + testdir = teuthology.get_testdir(ctx) + + for role, image in role_images: + if image is None: + image = default_image_name(role) + (remote,) = ctx.cluster.only(role).remotes.keys() + + remote.run( + args=[ + 'sudo', + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'rbd', + '--user', role.rsplit('.')[-1], + '-p', 'rbd', + 'map', + image, + run.Raw('&&'), + # wait for the symlink to be created by udev + 'while', 'test', '!', '-e', '/dev/rbd/rbd/{image}'.format(image=image), run.Raw(';'), 'do', + 'sleep', '1', run.Raw(';'), + 'done', + ], + ) + try: + yield + finally: + log.info('Unmapping rbd devices...') + for role, image in role_images: + if image is None: + image = default_image_name(role) + (remote,) = ctx.cluster.only(role).remotes.keys() + remote.run( + args=[ + 'LD_LIBRARY_PATH={tdir}/binary/usr/local/lib'.format(tdir=testdir), + 'sudo', + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'rbd', + '-p', 'rbd', + 'unmap', + '/dev/rbd/rbd/{imgname}'.format(imgname=image), + run.Raw('&&'), + # wait for the symlink to be deleted by udev + 'while', 'test', '-e', '/dev/rbd/rbd/{image}'.format(image=image), + run.Raw(';'), + 'do', + 'sleep', '1', run.Raw(';'), + 'done', + ], + ) + + +def rbd_devname_rtn(ctx, image): + return '/dev/rbd/rbd/{image}'.format(image=image) + +def canonical_path(ctx, role, path): + """ + Determine the canonical path for a given path on the host + representing the given role. A canonical path contains no + . or .. components, and includes no symbolic links. + """ + version_fp = BytesIO() + ctx.cluster.only(role).run( + args=[ 'readlink', '-f', path ], + stdout=version_fp, + ) + canonical_path = six.ensure_str(version_fp.getvalue()).rstrip('\n') + version_fp.close() + return canonical_path + +@contextlib.contextmanager +def run_xfstests(ctx, config): + """ + Run xfstests over specified devices. + + Warning: both the test and scratch devices specified will be + overwritten. Normally xfstests modifies (but does not destroy) + the test device, but for now the run script used here re-makes + both filesystems. + + Note: Only one instance of xfstests can run on a single host at + a time, although this is not enforced. + + This task in its current form needs some improvement. For + example, it assumes all roles provided in the config are + clients, and that the config provided is a list of key/value + pairs. For now please use the xfstests() interface, below. + + For example:: + + tasks: + - ceph: + - rbd.run_xfstests: + client.0: + count: 2 + test_dev: 'test_dev' + scratch_dev: 'scratch_dev' + fs_type: 'xfs' + tests: 'generic/100 xfs/003 xfs/005 xfs/006 generic/015' + exclude: + - generic/42 + randomize: true + """ + with parallel() as p: + for role, properties in config.items(): + p.spawn(run_xfstests_one_client, ctx, role, properties) + exc_info = None + while True: + try: + p.next() + except StopIteration: + break + except: + exc_info = sys.exc_info() + if exc_info: + six.reraise(exc_info[0], exc_info[1], exc_info[2]) + yield + +def run_xfstests_one_client(ctx, role, properties): + """ + Spawned routine to handle xfs tests for a single client + """ + testdir = teuthology.get_testdir(ctx) + try: + count = properties.get('count') + test_dev = properties.get('test_dev') + assert test_dev is not None, \ + "task run_xfstests requires test_dev to be defined" + test_dev = canonical_path(ctx, role, test_dev) + + scratch_dev = properties.get('scratch_dev') + assert scratch_dev is not None, \ + "task run_xfstests requires scratch_dev to be defined" + scratch_dev = canonical_path(ctx, role, scratch_dev) + + fs_type = properties.get('fs_type') + tests = properties.get('tests') + exclude_list = properties.get('exclude') + randomize = properties.get('randomize') + + (remote,) = ctx.cluster.only(role).remotes.keys() + + # Fetch the test script + test_root = teuthology.get_testdir(ctx) + test_script = 'run_xfstests.sh' + test_path = os.path.join(test_root, test_script) + + xfstests_url = properties.get('xfstests_url') + assert xfstests_url is not None, \ + "task run_xfstests requires xfstests_url to be defined" + + xfstests_krbd_url = xfstests_url + '/' + test_script + + log.info('Fetching {script} for {role} from {url}'.format( + script=test_script, + role=role, + url=xfstests_krbd_url)) + + args = [ 'wget', '-O', test_path, '--', xfstests_krbd_url ] + remote.run(args=args) + + log.info('Running xfstests on {role}:'.format(role=role)) + log.info(' iteration count: {count}:'.format(count=count)) + log.info(' test device: {dev}'.format(dev=test_dev)) + log.info(' scratch device: {dev}'.format(dev=scratch_dev)) + log.info(' using fs_type: {fs_type}'.format(fs_type=fs_type)) + log.info(' tests to run: {tests}'.format(tests=tests)) + log.info(' exclude list: {}'.format(' '.join(exclude_list))) + log.info(' randomize: {randomize}'.format(randomize=randomize)) + + if exclude_list: + with tempfile.NamedTemporaryFile(mode='w', prefix='exclude') as exclude_file: + for test in exclude_list: + exclude_file.write("{}\n".format(test)) + exclude_file.flush() + remote.put_file(exclude_file.name, exclude_file.name) + + # Note that the device paths are interpreted using + # readlink -f in order to get their canonical + # pathname (so it matches what the kernel remembers). + args = [ + '/usr/bin/sudo', + 'TESTDIR={tdir}'.format(tdir=testdir), + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + '/bin/bash', + test_path, + '-c', str(count), + '-f', fs_type, + '-t', test_dev, + '-s', scratch_dev, + ] + if exclude_list: + args.extend(['-x', exclude_file.name]) + if randomize: + args.append('-r') + if tests: + args.extend(['--', tests]) + remote.run(args=args, logger=log.getChild(role)) + finally: + log.info('Removing {script} on {role}'.format(script=test_script, + role=role)) + remote.run(args=['rm', '-f', test_path]) + +@contextlib.contextmanager +def xfstests(ctx, config): + """ + Run xfstests over rbd devices. This interface sets up all + required configuration automatically if not otherwise specified. + Note that only one instance of xfstests can run on a single host + at a time. By default, the set of tests specified is run once. + If a (non-zero) count value is supplied, the complete set of + tests will be run that number of times. + + For example:: + + tasks: + - ceph: + # Image sizes are in MB + - rbd.xfstests: + client.0: + count: 3 + test_image: 'test_image' + test_size: 250 + test_format: 2 + scratch_image: 'scratch_image' + scratch_size: 250 + scratch_format: 1 + fs_type: 'xfs' + tests: 'generic/100 xfs/003 xfs/005 xfs/006 generic/015' + exclude: + - generic/42 + randomize: true + xfstests_url: 'https://raw.github.com/ceph/ceph-ci/wip-55555/qa' + """ + if config is None: + config = { 'all': None } + assert isinstance(config, dict) or isinstance(config, list), \ + "task xfstests only supports a list or dictionary for configuration" + if isinstance(config, dict): + config = teuthology.replace_all_with_clients(ctx.cluster, config) + runs = config.items() + else: + runs = [(role, None) for role in config] + + running_xfstests = {} + for role, properties in runs: + assert role.startswith('client.'), \ + "task xfstests can only run on client nodes" + for host, roles_for_host in ctx.cluster.remotes.items(): + if role in roles_for_host: + assert host not in running_xfstests, \ + "task xfstests allows only one instance at a time per host" + running_xfstests[host] = True + + images_config = {} + scratch_config = {} + modprobe_config = {} + image_map_config = {} + scratch_map_config = {} + xfstests_config = {} + for role, properties in runs: + if properties is None: + properties = {} + + test_image = properties.get('test_image', 'test_image.{role}'.format(role=role)) + test_size = properties.get('test_size', 10000) # 10G + test_fmt = properties.get('test_format', 1) + scratch_image = properties.get('scratch_image', 'scratch_image.{role}'.format(role=role)) + scratch_size = properties.get('scratch_size', 10000) # 10G + scratch_fmt = properties.get('scratch_format', 1) + + images_config[role] = dict( + image_name=test_image, + image_size=test_size, + image_format=test_fmt, + ) + + scratch_config[role] = dict( + image_name=scratch_image, + image_size=scratch_size, + image_format=scratch_fmt, + ) + + xfstests_branch = properties.get('xfstests_branch', 'master') + xfstests_url = properties.get('xfstests_url', 'https://raw.github.com/ceph/ceph/{branch}/qa'.format(branch=xfstests_branch)) + + xfstests_config[role] = dict( + count=properties.get('count', 1), + test_dev='/dev/rbd/rbd/{image}'.format(image=test_image), + scratch_dev='/dev/rbd/rbd/{image}'.format(image=scratch_image), + fs_type=properties.get('fs_type', 'xfs'), + randomize=properties.get('randomize', False), + tests=properties.get('tests'), + exclude=properties.get('exclude', []), + xfstests_url=xfstests_url, + ) + + log.info('Setting up xfstests using RBD images:') + log.info(' test ({size} MB): {image}'.format(size=test_size, + image=test_image)) + log.info(' scratch ({size} MB): {image}'.format(size=scratch_size, + image=scratch_image)) + modprobe_config[role] = None + image_map_config[role] = test_image + scratch_map_config[role] = scratch_image + + with contextutil.nested( + lambda: create_image(ctx=ctx, config=images_config), + lambda: create_image(ctx=ctx, config=scratch_config), + lambda: modprobe(ctx=ctx, config=modprobe_config), + lambda: dev_create(ctx=ctx, config=image_map_config), + lambda: dev_create(ctx=ctx, config=scratch_map_config), + lambda: run_xfstests(ctx=ctx, config=xfstests_config), + ): + yield + + +@contextlib.contextmanager +def task(ctx, config): + """ + Create and mount an rbd image. + + For example, you can specify which clients to run on:: + + tasks: + - ceph: + - rbd: [client.0, client.1] + + There are a few image options:: + + tasks: + - ceph: + - rbd: + client.0: # uses defaults + client.1: + image_name: foo + image_size: 2048 + image_format: 2 + fs_type: xfs + + To use default options on all clients:: + + tasks: + - ceph: + - rbd: + all: + + To create 20GiB images and format them with xfs on all clients:: + + tasks: + - ceph: + - rbd: + all: + image_size: 20480 + fs_type: xfs + """ + if config is None: + config = { 'all': None } + norm_config = config + if isinstance(config, dict): + norm_config = teuthology.replace_all_with_clients(ctx.cluster, config) + if isinstance(norm_config, dict): + role_images = {} + for role, properties in norm_config.items(): + if properties is None: + properties = {} + role_images[role] = properties.get('image_name') + else: + role_images = norm_config + + log.debug('rbd config is: %s', norm_config) + + with contextutil.nested( + lambda: create_image(ctx=ctx, config=norm_config), + lambda: modprobe(ctx=ctx, config=norm_config), + lambda: dev_create(ctx=ctx, config=role_images), + lambda: generic_mkfs(ctx=ctx, config=norm_config, + devname_rtn=rbd_devname_rtn), + lambda: generic_mount(ctx=ctx, config=role_images, + devname_rtn=rbd_devname_rtn), + ): + yield diff --git a/qa/tasks/rbd_fio.py b/qa/tasks/rbd_fio.py new file mode 100644 index 00000000..4f321284 --- /dev/null +++ b/qa/tasks/rbd_fio.py @@ -0,0 +1,224 @@ +""" + Long running fio tests on rbd mapped devices for format/features provided in config + Many fio parameters can be configured so that this task can be used along with thrash/power-cut tests + and exercise IO on full disk for all format/features + - This test should not be run on VM due to heavy use of resource + +""" +import contextlib +import json +import logging +import os + +from teuthology.parallel import parallel +from teuthology import misc as teuthology +from tempfile import NamedTemporaryFile +from teuthology.orchestra import run +from teuthology.packaging import install_package, remove_package + +log = logging.getLogger(__name__) + +@contextlib.contextmanager +def task(ctx, config): + """ + client.0: + fio-io-size: 100g or 80% or 100m + fio-version: 2.2.9 + formats: [2] + features: [[layering],[striping],[layering,exclusive-lock,object-map]] + test-clone-io: 1 #remove this option to not run create rbd clone and not run io on clone + io-engine: "sync or rbd or any io-engine" + rw: randrw + client.1: + fio-io-size: 100g + fio-version: 2.2.9 + rw: read + image-size:20480 + +or + all: + fio-io-size: 400g + rw: randrw + formats: [2] + features: [[layering],[striping]] + io-engine: libaio + + Create rbd image + device and exercise IO for format/features provided in config file + Config can be per client or one config can be used for all clients, fio jobs are run in parallel for client provided + + """ + if config.get('all'): + client_config = config['all'] + clients = ctx.cluster.only(teuthology.is_type('client')) + rbd_test_dir = teuthology.get_testdir(ctx) + "/rbd_fio_test" + for remote,role in clients.remotes.items(): + if 'client_config' in locals(): + with parallel() as p: + p.spawn(run_fio, remote, client_config, rbd_test_dir) + else: + for client_config in config: + if client_config in role: + with parallel() as p: + p.spawn(run_fio, remote, config[client_config], rbd_test_dir) + + yield + + +def get_ioengine_package_name(ioengine, remote): + system_type = teuthology.get_system_type(remote) + if ioengine == 'rbd': + return 'librbd1-devel' if system_type == 'rpm' else 'librbd-dev' + elif ioengine == 'libaio': + return 'libaio-devel' if system_type == 'rpm' else 'libaio-dev' + else: + return None + + +def run_rbd_map(remote, image, iodepth): + iodepth = max(iodepth, 128) # RBD_QUEUE_DEPTH_DEFAULT + dev = remote.sh(['sudo', 'rbd', 'device', 'map', '-o', + 'queue_depth={}'.format(iodepth), image]).rstrip('\n') + teuthology.sudo_write_file( + remote, + '/sys/block/{}/queue/nr_requests'.format(os.path.basename(dev)), + str(iodepth)) + return dev + + +def run_fio(remote, config, rbd_test_dir): + """ + create fio config file with options based on above config + get the fio from github, generate binary, and use it to run on + the generated fio config file + """ + fio_config=NamedTemporaryFile(mode='w', prefix='fio_rbd_', dir='/tmp/', delete=False) + fio_config.write('[global]\n') + if config.get('io-engine'): + ioengine=config['io-engine'] + fio_config.write('ioengine={ioe}\n'.format(ioe=ioengine)) + else: + fio_config.write('ioengine=sync\n') + if config.get('bs'): + bs=config['bs'] + fio_config.write('bs={bs}\n'.format(bs=bs)) + else: + fio_config.write('bs=4k\n') + iodepth = config.get('io-depth', 2) + fio_config.write('iodepth={iod}\n'.format(iod=iodepth)) + if config.get('fio-io-size'): + size=config['fio-io-size'] + fio_config.write('size={size}\n'.format(size=size)) + else: + fio_config.write('size=100m\n') + + fio_config.write('time_based\n') + if config.get('runtime'): + runtime=config['runtime'] + fio_config.write('runtime={runtime}\n'.format(runtime=runtime)) + else: + fio_config.write('runtime=1800\n') + fio_config.write('allow_file_create=0\n') + image_size=10240 + if config.get('image_size'): + image_size=config['image_size'] + + formats=[1,2] + features=[['layering'],['striping'],['exclusive-lock','object-map']] + fio_version='2.21' + if config.get('formats'): + formats=config['formats'] + if config.get('features'): + features=config['features'] + if config.get('fio-version'): + fio_version=config['fio-version'] + + # handle package required for ioengine, if any + sn=remote.shortname + ioengine_pkg = get_ioengine_package_name(ioengine, remote) + if ioengine_pkg: + install_package(ioengine_pkg, remote) + + fio_config.write('norandommap\n') + if ioengine == 'rbd': + fio_config.write('clientname=admin\n') + fio_config.write('pool=rbd\n') + fio_config.write('invalidate=0\n') + elif ioengine == 'libaio': + fio_config.write('direct=1\n') + for frmt in formats: + for feature in features: + log.info("Creating rbd images on {sn}".format(sn=sn)) + feature_name = '-'.join(feature) + rbd_name = 'i{i}f{f}{sn}'.format(i=frmt,f=feature_name,sn=sn) + rbd_snap_name = 'i{i}f{f}{sn}@i{i}f{f}{sn}Snap'.format(i=frmt,f=feature_name,sn=sn) + rbd_clone_name = 'i{i}f{f}{sn}Clone'.format(i=frmt,f=feature_name,sn=sn) + create_args=['rbd', 'create', + '--size', '{size}'.format(size=image_size), + '--image', rbd_name, + '--image-format', '{f}'.format(f=frmt)] + map(lambda x: create_args.extend(['--image-feature', x]), feature) + remote.run(args=create_args) + remote.run(args=['rbd', 'info', rbd_name]) + if ioengine != 'rbd': + rbd_dev = run_rbd_map(remote, rbd_name, iodepth) + if config.get('test-clone-io'): + log.info("Testing clones using fio") + remote.run(args=['rbd', 'snap', 'create', rbd_snap_name]) + remote.run(args=['rbd', 'snap', 'protect', rbd_snap_name]) + remote.run(args=['rbd', 'clone', rbd_snap_name, rbd_clone_name]) + rbd_clone_dev = run_rbd_map(remote, rbd_clone_name, iodepth) + fio_config.write('[{rbd_dev}]\n'.format(rbd_dev=rbd_dev)) + if config.get('rw'): + rw=config['rw'] + fio_config.write('rw={rw}\n'.format(rw=rw)) + else: + fio_config .write('rw=randrw\n') + fio_config.write('filename={rbd_dev}\n'.format(rbd_dev=rbd_dev)) + if config.get('test-clone-io'): + fio_config.write('[{rbd_clone_dev}]\n'.format(rbd_clone_dev=rbd_clone_dev)) + fio_config.write('rw={rw}\n'.format(rw=rw)) + fio_config.write('filename={rbd_clone_dev}\n'.format(rbd_clone_dev=rbd_clone_dev)) + else: + if config.get('test-clone-io'): + log.info("Testing clones using fio") + remote.run(args=['rbd', 'snap', 'create', rbd_snap_name]) + remote.run(args=['rbd', 'snap', 'protect', rbd_snap_name]) + remote.run(args=['rbd', 'clone', rbd_snap_name, rbd_clone_name]) + fio_config.write('[{img_name}]\n'.format(img_name=rbd_name)) + if config.get('rw'): + rw=config['rw'] + fio_config.write('rw={rw}\n'.format(rw=rw)) + else: + fio_config.write('rw=randrw\n') + fio_config.write('rbdname={img_name}\n'.format(img_name=rbd_name)) + if config.get('test-clone-io'): + fio_config.write('[{clone_img_name}]\n'.format(clone_img_name=rbd_clone_name)) + fio_config.write('rw={rw}\n'.format(rw=rw)) + fio_config.write('rbdname={clone_img_name}\n'.format(clone_img_name=rbd_clone_name)) + + + fio_config.close() + remote.put_file(fio_config.name,fio_config.name) + try: + log.info("Running rbd feature - fio test on {sn}".format(sn=sn)) + fio = "https://github.com/axboe/fio/archive/fio-" + fio_version + ".tar.gz" + remote.run(args=['mkdir', run.Raw(rbd_test_dir),]) + remote.run(args=['cd' , run.Raw(rbd_test_dir), + run.Raw(';'), 'wget', fio, run.Raw(';'), run.Raw('tar -xvf fio*tar.gz'), run.Raw(';'), + run.Raw('cd fio-fio*'), run.Raw(';'), './configure', run.Raw(';'), 'make']) + remote.run(args=['ceph', '-s']) + remote.run(args=[run.Raw('{tdir}/fio-fio-{v}/fio --showcmd {f}'.format(tdir=rbd_test_dir,v=fio_version,f=fio_config.name))]) + remote.run(args=['sudo', run.Raw('{tdir}/fio-fio-{v}/fio {f}'.format(tdir=rbd_test_dir,v=fio_version,f=fio_config.name))]) + remote.run(args=['ceph', '-s']) + finally: + out = remote.sh('rbd device list --format=json') + mapped_images = json.loads(out) + if mapped_images: + log.info("Unmapping rbd images on {sn}".format(sn=sn)) + for image in mapped_images: + remote.run(args=['sudo', 'rbd', 'device', 'unmap', + str(image['device'])]) + log.info("Cleaning up fio install") + remote.run(args=['rm','-rf', run.Raw(rbd_test_dir)]) + if ioengine_pkg: + remove_package(ioengine_pkg, remote) diff --git a/qa/tasks/rbd_fsx.py b/qa/tasks/rbd_fsx.py new file mode 100644 index 00000000..396d8fed --- /dev/null +++ b/qa/tasks/rbd_fsx.py @@ -0,0 +1,114 @@ +""" +Run fsx on an rbd image +""" +import contextlib +import logging + +from teuthology.exceptions import ConfigError +from teuthology.parallel import parallel +from teuthology import misc as teuthology + +log = logging.getLogger(__name__) + +@contextlib.contextmanager +def task(ctx, config): + """ + Run fsx on an rbd image. + + Currently this requires running as client.admin + to create a pool. + + Specify which clients to run on as a list:: + + tasks: + ceph: + rbd_fsx: + clients: [client.0, client.1] + + You can optionally change some properties of fsx: + + tasks: + ceph: + rbd_fsx: + clients: + seed: + ops: + size: + valgrind: [--tool=] + """ + log.info('starting rbd_fsx...') + with parallel() as p: + for role in config['clients']: + p.spawn(_run_one_client, ctx, config, role) + yield + +def _run_one_client(ctx, config, role): + """Spawned task that runs the client""" + krbd = config.get('krbd', False) + nbd = config.get('nbd', False) + testdir = teuthology.get_testdir(ctx) + (remote,) = ctx.cluster.only(role).remotes.keys() + + args = [] + if krbd or nbd: + args.append('sudo') # rbd(-nbd) map/unmap need privileges + args.extend([ + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir) + ]) + + overrides = ctx.config.get('overrides', {}) + teuthology.deep_merge(config, overrides.get('rbd_fsx', {})) + + if config.get('valgrind'): + args = teuthology.get_valgrind_args( + testdir, + 'fsx_{id}'.format(id=role), + args, + config.get('valgrind') + ) + + cluster_name, type_, client_id = teuthology.split_role(role) + if type_ != 'client': + msg = 'client role ({0}) must be a client'.format(role) + raise ConfigError(msg) + + args.extend([ + 'ceph_test_librbd_fsx', + '--cluster', cluster_name, + '--id', client_id, + '-d', # debug output for all operations + '-W', '-R', # mmap doesn't work with rbd + '-p', str(config.get('progress_interval', 100)), # show progress + '-P', '{tdir}/archive'.format(tdir=testdir), + '-r', str(config.get('readbdy',1)), + '-w', str(config.get('writebdy',1)), + '-t', str(config.get('truncbdy',1)), + '-h', str(config.get('holebdy',1)), + '-l', str(config.get('size', 250000000)), + '-S', str(config.get('seed', 0)), + '-N', str(config.get('ops', 1000)), + ]) + if krbd: + args.append('-K') # -K enables krbd mode + if nbd: + args.append('-M') # -M enables nbd mode + if config.get('direct_io', False): + args.append('-Z') # -Z use direct IO + if not config.get('randomized_striping', True): + args.append('-U') # -U disables randomized striping + if not config.get('punch_holes', True): + args.append('-H') # -H disables discard ops + if config.get('deep_copy', False): + args.append('-g') # -g deep copy instead of clone + if config.get('journal_replay', False): + args.append('-j') # -j replay all IO events from journal + if config.get('keep_images', False): + args.append('-k') # -k keep images on success + args.extend([ + config.get('pool_name', 'pool_{pool}'.format(pool=role)), + 'image_{image}'.format(image=role), + ]) + + remote.run(args=args) diff --git a/qa/tasks/rbd_mirror.py b/qa/tasks/rbd_mirror.py new file mode 100644 index 00000000..5d6d1b2b --- /dev/null +++ b/qa/tasks/rbd_mirror.py @@ -0,0 +1,119 @@ +""" +Task for running rbd mirroring daemons and configuring mirroring +""" + +import logging + +from teuthology.orchestra import run +from teuthology import misc +from teuthology.exceptions import ConfigError +from teuthology.task import Task +from tasks.util import get_remote_for_role + +log = logging.getLogger(__name__) + + +class RBDMirror(Task): + """ + Run an rbd-mirror daemon to sync rbd images between clusters. + + This requires two clients (one from each cluster) on the same host + to connect with. The pool configuration should be adjusted by later + test scripts to include the remote client and cluster name. This task + just needs to know how to connect to the local cluster. + + For example: + + roles: + - [primary.mon.a, primary.osd.0, primary.osd.1, primary.osd.2] + - [secondary.mon.a, secondary.osd.0, secondary.osd.1, secondary.osd.2] + - [primary.client.mirror, secondary.client.mirror] + tasks: + - ceph: + cluster: primary + - ceph: + cluster: secondary + - rbd-mirror: + client: primary.client.mirror + + To mirror back to the primary cluster as well, add another + rbd_mirror instance: + + - rbd-mirror: + client: secondary.client.mirror + + Possible options for this task are: + + client: role - ceph client to connect as + valgrind: [--tool=] - none by default + coverage: bool - whether this run may be collecting coverage data + thrash: bool - whether this run may be thrashed + """ + def __init__(self, ctx, config): + super(RBDMirror, self).__init__(ctx, config) + self.log = log + + def setup(self): + super(RBDMirror, self).setup() + try: + self.client = self.config['client'] + except KeyError: + raise ConfigError('rbd-mirror requires a client to connect with') + + self.cluster_name, type_, self.client_id = misc.split_role(self.client) + + if type_ != 'client': + msg = 'client role ({0}) must be a client'.format(self.client) + raise ConfigError(msg) + + self.remote = get_remote_for_role(self.ctx, self.client) + + def begin(self): + super(RBDMirror, self).begin() + testdir = misc.get_testdir(self.ctx) + daemon_signal = 'kill' + if 'coverage' in self.config or 'valgrind' in self.config or \ + self.config.get('thrash', False): + daemon_signal = 'term' + + args = [ + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'daemon-helper', + daemon_signal, + ] + + if 'valgrind' in self.config: + args = misc.get_valgrind_args( + testdir, + 'rbd-mirror-{id}'.format(id=self.client), + args, + self.config.get('valgrind') + ) + + args.extend([ + 'rbd-mirror', '--foreground', + '--cluster', + self.cluster_name, + '--id', + self.client_id, + ]) + + self.ctx.daemons.add_daemon( + self.remote, 'rbd-mirror', self.client, + cluster=self.cluster_name, + args=args, + logger=self.log.getChild(self.client), + stdin=run.PIPE, + wait=False, + ) + + def end(self): + mirror_daemon = self.ctx.daemons.get_daemon('rbd-mirror', + self.client, + self.cluster_name) + mirror_daemon.stop() + super(RBDMirror, self).end() + +task = RBDMirror diff --git a/qa/tasks/rbd_mirror_thrash.py b/qa/tasks/rbd_mirror_thrash.py new file mode 100644 index 00000000..67e1c332 --- /dev/null +++ b/qa/tasks/rbd_mirror_thrash.py @@ -0,0 +1,214 @@ +""" +Task for thrashing rbd-mirror daemons +""" + +import contextlib +import logging +import random +import signal +import socket +import time + +from gevent import sleep +from gevent.greenlet import Greenlet +from gevent.event import Event + +from teuthology.exceptions import CommandFailedError +from teuthology.orchestra import run + +log = logging.getLogger(__name__) + + +class RBDMirrorThrasher(Greenlet): + """ + RBDMirrorThrasher:: + + The RBDMirrorThrasher thrashes rbd-mirror daemons during execution of other + tasks (workunits, etc). + + The config is optional. Many of the config parameters are a maximum value + to use when selecting a random value from a range. The config is a dict + containing some or all of: + + cluster: [default: ceph] cluster to thrash + + max_thrash: [default: 1] the maximum number of active rbd-mirror daemons per + cluster will be thrashed at any given time. + + min_thrash_delay: [default: 60] minimum number of seconds to delay before + thrashing again. + + max_thrash_delay: [default: 120] maximum number of seconds to delay before + thrashing again. + + max_revive_delay: [default: 10] maximum number of seconds to delay before + bringing back a thrashed rbd-mirror daemon. + + randomize: [default: true] enables randomization and use the max/min values + + seed: [no default] seed the random number generator + + Examples:: + + The following example disables randomization, and uses the max delay + values: + + tasks: + - ceph: + - rbd_mirror_thrash: + randomize: False + max_thrash_delay: 10 + """ + + def __init__(self, ctx, config, cluster, daemons): + Greenlet.__init__(self) + + self.ctx = ctx + self.config = config + self.cluster = cluster + self.daemons = daemons + + self.e = None + self.logger = log + self.name = 'thrasher.rbd_mirror.[{cluster}]'.format(cluster = cluster) + self.stopping = Event() + + self.randomize = bool(self.config.get('randomize', True)) + self.max_thrash = int(self.config.get('max_thrash', 1)) + self.min_thrash_delay = float(self.config.get('min_thrash_delay', 60.0)) + self.max_thrash_delay = float(self.config.get('max_thrash_delay', 120.0)) + self.max_revive_delay = float(self.config.get('max_revive_delay', 10.0)) + + def _run(self): + try: + self.do_thrash() + except Exception as e: + self.e = e + self.logger.exception("exception:") + + def log(self, x): + """Write data to logger assigned to this RBDMirrorThrasher""" + self.logger.info(x) + + def stop(self): + self.stopping.set() + + def do_thrash(self): + """ + Perform the random thrashing action + """ + + self.log('starting thrash for cluster {cluster}'.format(cluster=self.cluster)) + stats = { + "kill": 0, + } + + while not self.stopping.is_set(): + delay = self.max_thrash_delay + if self.randomize: + delay = random.randrange(self.min_thrash_delay, self.max_thrash_delay) + + if delay > 0.0: + self.log('waiting for {delay} secs before thrashing'.format(delay=delay)) + self.stopping.wait(delay) + if self.stopping.is_set(): + continue + + killed_daemons = [] + + weight = 1.0 / len(self.daemons) + count = 0 + for daemon in self.daemons: + skip = random.uniform(0.0, 1.0) + if weight <= skip: + self.log('skipping daemon {label} with skip ({skip}) > weight ({weight})'.format( + label=daemon.id_, skip=skip, weight=weight)) + continue + + self.log('kill {label}'.format(label=daemon.id_)) + try: + daemon.signal(signal.SIGTERM) + except socket.error: + pass + killed_daemons.append(daemon) + stats['kill'] += 1 + + # if we've reached max_thrash, we're done + count += 1 + if count >= self.max_thrash: + break + + if killed_daemons: + # wait for a while before restarting + delay = self.max_revive_delay + if self.randomize: + delay = random.randrange(0.0, self.max_revive_delay) + + self.log('waiting for {delay} secs before reviving daemons'.format(delay=delay)) + sleep(delay) + + for daemon in killed_daemons: + self.log('waiting for {label}'.format(label=daemon.id_)) + try: + run.wait([daemon.proc], timeout=600) + except CommandFailedError: + pass + except: + self.log('Failed to stop {label}'.format(label=daemon.id_)) + + try: + # try to capture a core dump + daemon.signal(signal.SIGABRT) + except socket.error: + pass + raise + finally: + daemon.reset() + + for daemon in killed_daemons: + self.log('reviving {label}'.format(label=daemon.id_)) + daemon.start() + + for stat in stats: + self.log("stat['{key}'] = {value}".format(key = stat, value = stats[stat])) + +@contextlib.contextmanager +def task(ctx, config): + """ + Stress test the rbd-mirror by thrashing while another task/workunit + is running. + + Please refer to RBDMirrorThrasher class for further information on the + available options. + """ + if config is None: + config = {} + assert isinstance(config, dict), \ + 'rbd_mirror_thrash task only accepts a dict for configuration' + + cluster = config.get('cluster', 'ceph') + daemons = list(ctx.daemons.iter_daemons_of_role('rbd-mirror', cluster)) + assert len(daemons) > 0, \ + 'rbd_mirror_thrash task requires at least 1 rbd-mirror daemon' + + # choose random seed + if 'seed' in config: + seed = int(config['seed']) + else: + seed = int(time.time()) + log.info('rbd_mirror_thrash using random seed: {seed}'.format(seed=seed)) + random.seed(seed) + + thrasher = RBDMirrorThrasher(ctx, config, cluster, daemons) + thrasher.start() + + try: + log.debug('Yielding') + yield + finally: + log.info('joining rbd_mirror_thrash') + thrasher.stop() + if thrasher.e: + raise RuntimeError('error during thrashing') + thrasher.join() + log.info('done joining') diff --git a/qa/tasks/rebuild_mondb.py b/qa/tasks/rebuild_mondb.py new file mode 100644 index 00000000..008e312e --- /dev/null +++ b/qa/tasks/rebuild_mondb.py @@ -0,0 +1,224 @@ +""" +Test if we can recover the leveldb from OSD after where all leveldbs are +corrupted +""" + +import logging +import os.path +import shutil +import tempfile + +from tasks import ceph_manager +from teuthology import misc as teuthology + +log = logging.getLogger(__name__) + + +def _push_directory(path, remote, remote_dir): + """ + local_temp_path=`mktemp` + tar czf $local_temp_path $path + ssh remote mkdir -p remote_dir + remote_temp_path=`mktemp` + scp $local_temp_path $remote_temp_path + rm $local_temp_path + tar xzf $remote_temp_path -C $remote_dir + ssh remote:$remote_temp_path + """ + fd, local_temp_path = tempfile.mkstemp(suffix='.tgz', + prefix='rebuild_mondb-') + os.close(fd) + cmd = ' '.join(['tar', 'cz', + '-f', local_temp_path, + '-C', path, + '--', '.']) + teuthology.sh(cmd) + _, fname = os.path.split(local_temp_path) + fd, remote_temp_path = tempfile.mkstemp(suffix='.tgz', + prefix='rebuild_mondb-') + os.close(fd) + remote.put_file(local_temp_path, remote_temp_path) + os.remove(local_temp_path) + remote.run(args=['sudo', + 'tar', 'xz', + '-C', remote_dir, + '-f', remote_temp_path]) + remote.run(args=['sudo', 'rm', '-fr', remote_temp_path]) + + +def _nuke_mons(manager, mons, mon_id): + assert mons + is_mon = teuthology.is_type('mon') + for remote, roles in mons.remotes.items(): + for role in roles: + if not is_mon(role): + continue + cluster, _, m = teuthology.split_role(role) + log.info('killing {cluster}:mon.{mon}'.format( + cluster=cluster, + mon=m)) + manager.kill_mon(m) + mon_data = os.path.join('/var/lib/ceph/mon/', + '{0}-{1}'.format(cluster, m)) + if m == mon_id: + # so we will only need to recreate the store.db for the + # first mon, would be easier than mkfs on it then replace + # the its store.db with the recovered one + store_dir = os.path.join(mon_data, 'store.db') + remote.run(args=['sudo', 'rm', '-r', store_dir]) + else: + remote.run(args=['sudo', 'rm', '-r', mon_data]) + + +def _rebuild_db(ctx, manager, cluster_name, mon, mon_id, keyring_path): + local_mstore = tempfile.mkdtemp() + + # collect the maps from all OSDs + is_osd = teuthology.is_type('osd') + osds = ctx.cluster.only(is_osd) + assert osds + for osd, roles in osds.remotes.items(): + for role in roles: + if not is_osd(role): + continue + cluster, _, osd_id = teuthology.split_role(role) + assert cluster_name == cluster + log.info('collecting maps from {cluster}:osd.{osd}'.format( + cluster=cluster, + osd=osd_id)) + # push leveldb to OSD + osd_mstore = os.path.join(teuthology.get_testdir(ctx), 'mon-store') + osd.run(args=['sudo', 'mkdir', '-m', 'o+x', '-p', osd_mstore]) + + _push_directory(local_mstore, osd, osd_mstore) + log.info('rm -rf {0}'.format(local_mstore)) + shutil.rmtree(local_mstore) + # update leveldb with OSD data + options = '--no-mon-config --op update-mon-db --mon-store-path {0}' + log.info('cot {0}'.format(osd_mstore)) + manager.objectstore_tool(pool=None, + options=options.format(osd_mstore), + args='', + osd=osd_id, + do_revive=False) + # pull the updated mon db + log.info('pull dir {0} -> {1}'.format(osd_mstore, local_mstore)) + local_mstore = tempfile.mkdtemp() + teuthology.pull_directory(osd, osd_mstore, local_mstore) + log.info('rm -rf osd:{0}'.format(osd_mstore)) + osd.run(args=['sudo', 'rm', '-fr', osd_mstore]) + + # recover the first_mon with re-built mon db + # pull from recovered leveldb from client + mon_store_dir = os.path.join('/var/lib/ceph/mon', + '{0}-{1}'.format(cluster_name, mon_id)) + _push_directory(local_mstore, mon, mon_store_dir) + mon.run(args=['sudo', 'chown', '-R', 'ceph:ceph', mon_store_dir]) + shutil.rmtree(local_mstore) + + # fill up the caps in the keyring file + mon.run(args=['sudo', + 'ceph-authtool', keyring_path, + '-n', 'mon.', + '--cap', 'mon', 'allow *']) + mon.run(args=['sudo', + 'ceph-authtool', keyring_path, + '-n', 'client.admin', + '--cap', 'mon', 'allow *', + '--cap', 'osd', 'allow *', + '--cap', 'mds', 'allow *', + '--cap', 'mgr', 'allow *']) + mon.run(args=['sudo', '-u', 'ceph', + 'CEPH_ARGS=--no-mon-config', + 'ceph-monstore-tool', mon_store_dir, + 'rebuild', '--', + '--keyring', keyring_path, + '--monmap', '/tmp/monmap', + ]) + + +def _revive_mons(manager, mons, recovered, keyring_path): + # revive monitors + # the initial monmap is in the ceph.conf, so we are good. + n_mons = 0 + is_mon = teuthology.is_type('mon') + for remote, roles in mons.remotes.items(): + for role in roles: + if not is_mon(role): + continue + cluster, _, m = teuthology.split_role(role) + if recovered != m: + log.info('running mkfs on {cluster}:mon.{mon}'.format( + cluster=cluster, + mon=m)) + remote.run( + args=[ + 'sudo', + 'ceph-mon', + '--cluster', cluster, + '--mkfs', + '-i', m, + '--keyring', keyring_path, + '--monmap', '/tmp/monmap']) + log.info('reviving mon.{0}'.format(m)) + manager.revive_mon(m) + n_mons += 1 + manager.wait_for_mon_quorum_size(n_mons, timeout=30) + + +def _revive_mgrs(ctx, manager): + is_mgr = teuthology.is_type('mgr') + mgrs = ctx.cluster.only(is_mgr) + for _, roles in mgrs.remotes.items(): + for role in roles: + if not is_mgr(role): + continue + _, _, mgr_id = teuthology.split_role(role) + log.info('reviving mgr.{0}'.format(mgr_id)) + manager.revive_mgr(mgr_id) + + +def _revive_osds(ctx, manager): + is_osd = teuthology.is_type('osd') + osds = ctx.cluster.only(is_osd) + for _, roles in osds.remotes.items(): + for role in roles: + if not is_osd(role): + continue + _, _, osd_id = teuthology.split_role(role) + log.info('reviving osd.{0}'.format(osd_id)) + manager.revive_osd(osd_id) + + +def task(ctx, config): + """ + Test monitor recovery from OSD + """ + if config is None: + config = {} + assert isinstance(config, dict), \ + 'task only accepts a dict for configuration' + + first_mon = teuthology.get_first_mon(ctx, config) + (mon,) = ctx.cluster.only(first_mon).remotes.keys() + + # stash a monmap for later + mon.run(args=['ceph', 'mon', 'getmap', '-o', '/tmp/monmap']) + + manager = ceph_manager.CephManager( + mon, + ctx=ctx, + logger=log.getChild('ceph_manager')) + + mons = ctx.cluster.only(teuthology.is_type('mon')) + # note down the first cluster_name and mon_id + # we will recover it later on + cluster_name, _, mon_id = teuthology.split_role(first_mon) + _nuke_mons(manager, mons, mon_id) + default_keyring = '/etc/ceph/{cluster}.keyring'.format( + cluster=cluster_name) + keyring_path = config.get('keyring_path', default_keyring) + _rebuild_db(ctx, manager, cluster_name, mon, mon_id, keyring_path) + _revive_mons(manager, mons, mon_id, keyring_path) + _revive_mgrs(ctx, manager) + _revive_osds(ctx, manager) diff --git a/qa/tasks/reg11184.py b/qa/tasks/reg11184.py new file mode 100644 index 00000000..86cfbf39 --- /dev/null +++ b/qa/tasks/reg11184.py @@ -0,0 +1,242 @@ +""" +Special regression test for tracker #11184 + +Synopsis: osd/SnapMapper.cc: 282: FAILED assert(check(oid)) + +This is accomplished by moving a pg that wasn't part of split and still include +divergent priors. +""" +import logging +import time + +from teuthology.exceptions import CommandFailedError +from teuthology.orchestra import run +from teuthology import misc as teuthology +from tasks.util.rados import rados +import os + + +log = logging.getLogger(__name__) + + +def task(ctx, config): + """ + Test handling of divergent entries during export / import + to regression test tracker #11184 + + overrides: + ceph: + conf: + osd: + debug osd: 5 + + Requires 3 osds on a single test node. + """ + if config is None: + config = {} + assert isinstance(config, dict), \ + 'divergent_priors task only accepts a dict for configuration' + + manager = ctx.managers['ceph'] + + while len(manager.get_osd_status()['up']) < 3: + time.sleep(10) + osds = [0, 1, 2] + manager.flush_pg_stats(osds) + manager.raw_cluster_cmd('osd', 'set', 'noout') + manager.raw_cluster_cmd('osd', 'set', 'noin') + manager.raw_cluster_cmd('osd', 'set', 'nodown') + manager.wait_for_clean() + + # something that is always there + dummyfile = '/etc/fstab' + dummyfile2 = '/etc/resolv.conf' + testdir = teuthology.get_testdir(ctx) + + # create 1 pg pool + log.info('creating foo') + manager.raw_cluster_cmd('osd', 'pool', 'create', 'foo', '1') + manager.raw_cluster_cmd( + 'osd', 'pool', 'application', 'enable', + 'foo', 'rados', run.Raw('||'), 'true') + + # Remove extra pool to simlify log output + manager.raw_cluster_cmd('osd', 'pool', 'delete', 'rbd', 'rbd', '--yes-i-really-really-mean-it') + + for i in osds: + manager.set_config(i, osd_min_pg_log_entries=10) + manager.set_config(i, osd_max_pg_log_entries=10) + manager.set_config(i, osd_pg_log_trim_min=5) + + # determine primary + divergent = manager.get_pg_primary('foo', 0) + log.info("primary and soon to be divergent is %d", divergent) + non_divergent = list(osds) + non_divergent.remove(divergent) + + log.info('writing initial objects') + first_mon = teuthology.get_first_mon(ctx, config) + (mon,) = ctx.cluster.only(first_mon).remotes.keys() + # write 100 objects + for i in range(100): + rados(ctx, mon, ['-p', 'foo', 'put', 'existing_%d' % i, dummyfile]) + + manager.wait_for_clean() + + # blackhole non_divergent + log.info("blackholing osds %s", str(non_divergent)) + for i in non_divergent: + manager.set_config(i, objectstore_blackhole=1) + + DIVERGENT_WRITE = 5 + DIVERGENT_REMOVE = 5 + # Write some soon to be divergent + log.info('writing divergent objects') + for i in range(DIVERGENT_WRITE): + rados(ctx, mon, ['-p', 'foo', 'put', 'existing_%d' % i, + dummyfile2], wait=False) + # Remove some soon to be divergent + log.info('remove divergent objects') + for i in range(DIVERGENT_REMOVE): + rados(ctx, mon, ['-p', 'foo', 'rm', + 'existing_%d' % (i + DIVERGENT_WRITE)], wait=False) + time.sleep(10) + mon.run( + args=['killall', '-9', 'rados'], + wait=True, + check_status=False) + + # kill all the osds but leave divergent in + log.info('killing all the osds') + for i in osds: + manager.kill_osd(i) + for i in osds: + manager.mark_down_osd(i) + for i in non_divergent: + manager.mark_out_osd(i) + + # bring up non-divergent + log.info("bringing up non_divergent %s", str(non_divergent)) + for i in non_divergent: + manager.revive_osd(i) + for i in non_divergent: + manager.mark_in_osd(i) + + # write 1 non-divergent object (ensure that old divergent one is divergent) + objname = "existing_%d" % (DIVERGENT_WRITE + DIVERGENT_REMOVE) + log.info('writing non-divergent object ' + objname) + rados(ctx, mon, ['-p', 'foo', 'put', objname, dummyfile2]) + + manager.wait_for_recovery() + + # ensure no recovery of up osds first + log.info('delay recovery') + for i in non_divergent: + manager.wait_run_admin_socket( + 'osd', i, ['set_recovery_delay', '100000']) + + # bring in our divergent friend + log.info("revive divergent %d", divergent) + manager.raw_cluster_cmd('osd', 'set', 'noup') + manager.revive_osd(divergent) + + log.info('delay recovery divergent') + manager.wait_run_admin_socket( + 'osd', divergent, ['set_recovery_delay', '100000']) + + manager.raw_cluster_cmd('osd', 'unset', 'noup') + while len(manager.get_osd_status()['up']) < 3: + time.sleep(10) + + log.info('wait for peering') + rados(ctx, mon, ['-p', 'foo', 'put', 'foo', dummyfile]) + + # At this point the divergent_priors should have been detected + + log.info("killing divergent %d", divergent) + manager.kill_osd(divergent) + + # Split pgs for pool foo + manager.raw_cluster_cmd('osd', 'pool', 'set', 'foo', 'pg_num', '2') + time.sleep(5) + + manager.raw_cluster_cmd('pg','dump') + + # Export a pg + (exp_remote,) = ctx.\ + cluster.only('osd.{o}'.format(o=divergent)).remotes.keys() + FSPATH = manager.get_filepath() + JPATH = os.path.join(FSPATH, "journal") + prefix = ("sudo adjust-ulimits ceph-objectstore-tool " + "--data-path {fpath} --journal-path {jpath} " + "--log-file=" + "/var/log/ceph/objectstore_tool.$$.log ". + format(fpath=FSPATH, jpath=JPATH)) + pid = os.getpid() + expfile = os.path.join(testdir, "exp.{pid}.out".format(pid=pid)) + cmd = ((prefix + "--op export-remove --pgid 2.0 --file {file}"). + format(id=divergent, file=expfile)) + try: + exp_remote.sh(cmd, wait=True) + except CommandFailedError as e: + assert e.exitstatus == 0 + + # Kill one of non-divergent OSDs + log.info('killing osd.%d' % non_divergent[0]) + manager.kill_osd(non_divergent[0]) + manager.mark_down_osd(non_divergent[0]) + # manager.mark_out_osd(non_divergent[0]) + + # An empty collection for pg 2.0 might need to be cleaned up + cmd = ((prefix + "--force --op remove --pgid 2.0"). + format(id=non_divergent[0])) + exp_remote.sh(cmd, wait=True, check_status=False) + + cmd = ((prefix + "--op import --file {file}"). + format(id=non_divergent[0], file=expfile)) + try: + exp_remote.sh(cmd, wait=True) + except CommandFailedError as e: + assert e.exitstatus == 0 + + # bring in our divergent friend and other node + log.info("revive divergent %d", divergent) + manager.revive_osd(divergent) + manager.mark_in_osd(divergent) + log.info("revive %d", non_divergent[0]) + manager.revive_osd(non_divergent[0]) + + while len(manager.get_osd_status()['up']) < 3: + time.sleep(10) + + log.info('delay recovery divergent') + manager.set_config(divergent, osd_recovery_delay_start=100000) + log.info('mark divergent in') + manager.mark_in_osd(divergent) + + log.info('wait for peering') + rados(ctx, mon, ['-p', 'foo', 'put', 'foo', dummyfile]) + + log.info("killing divergent %d", divergent) + manager.kill_osd(divergent) + log.info("reviving divergent %d", divergent) + manager.revive_osd(divergent) + time.sleep(3) + + log.info('allowing recovery') + # Set osd_recovery_delay_start back to 0 and kick the queue + for i in osds: + manager.raw_cluster_cmd('tell', 'osd.%d' % i, 'debug', + 'kick_recovery_wq', ' 0') + + log.info('reading divergent objects') + for i in range(DIVERGENT_WRITE + DIVERGENT_REMOVE): + exit_status = rados(ctx, mon, ['-p', 'foo', 'get', 'existing_%d' % i, + '/tmp/existing']) + assert exit_status == 0 + + (remote,) = ctx.\ + cluster.only('osd.{o}'.format(o=divergent)).remotes.keys() + cmd = 'rm {file}'.format(file=expfile) + remote.run(args=cmd, wait=True) + log.info("success") diff --git a/qa/tasks/rep_lost_unfound_delete.py b/qa/tasks/rep_lost_unfound_delete.py new file mode 100644 index 00000000..d422a33b --- /dev/null +++ b/qa/tasks/rep_lost_unfound_delete.py @@ -0,0 +1,178 @@ +""" +Lost_unfound +""" +import logging +import time + +from tasks import ceph_manager +from tasks.util.rados import rados +from teuthology import misc as teuthology +from teuthology.orchestra import run + +log = logging.getLogger(__name__) + +def task(ctx, config): + """ + Test handling of lost objects. + + A pretty rigid cluseter is brought up andtested by this task + """ + POOL = 'unfounddel_pool' + if config is None: + config = {} + assert isinstance(config, dict), \ + 'lost_unfound task only accepts a dict for configuration' + first_mon = teuthology.get_first_mon(ctx, config) + (mon,) = ctx.cluster.only(first_mon).remotes.keys() + + manager = ceph_manager.CephManager( + mon, + ctx=ctx, + logger=log.getChild('ceph_manager'), + ) + + while len(manager.get_osd_status()['up']) < 3: + time.sleep(10) + manager.flush_pg_stats([0, 1, 2]) + manager.wait_for_clean() + + manager.create_pool(POOL) + + # something that is always there + dummyfile = '/etc/fstab' + + # take an osd out until the very end + manager.kill_osd(2) + manager.mark_down_osd(2) + manager.mark_out_osd(2) + + # kludge to make sure they get a map + rados(ctx, mon, ['-p', POOL, 'put', 'dummy', dummyfile]) + + manager.flush_pg_stats([0, 1]) + manager.wait_for_recovery() + + # create old objects + for f in range(1, 10): + rados(ctx, mon, ['-p', POOL, 'put', 'existing_%d' % f, dummyfile]) + rados(ctx, mon, ['-p', POOL, 'put', 'existed_%d' % f, dummyfile]) + rados(ctx, mon, ['-p', POOL, 'rm', 'existed_%d' % f]) + + # delay recovery, and make the pg log very long (to prevent backfill) + manager.raw_cluster_cmd( + 'tell', 'osd.1', + 'injectargs', + '--osd-recovery-delay-start 1000 --osd-min-pg-log-entries 100000000' + ) + + manager.kill_osd(0) + manager.mark_down_osd(0) + + for f in range(1, 10): + rados(ctx, mon, ['-p', POOL, 'put', 'new_%d' % f, dummyfile]) + rados(ctx, mon, ['-p', POOL, 'put', 'existed_%d' % f, dummyfile]) + rados(ctx, mon, ['-p', POOL, 'put', 'existing_%d' % f, dummyfile]) + + # bring osd.0 back up, let it peer, but don't replicate the new + # objects... + log.info('osd.0 command_args is %s' % 'foo') + log.info(ctx.daemons.get_daemon('osd', 0).command_args) + ctx.daemons.get_daemon('osd', 0).command_kwargs['args'].extend([ + '--osd-recovery-delay-start', '1000' + ]) + manager.revive_osd(0) + manager.mark_in_osd(0) + manager.wait_till_osd_is_up(0) + + manager.flush_pg_stats([0, 1]) + manager.wait_till_active() + + # take out osd.1 and the only copy of those objects. + manager.kill_osd(1) + manager.mark_down_osd(1) + manager.mark_out_osd(1) + manager.raw_cluster_cmd('osd', 'lost', '1', '--yes-i-really-mean-it') + + # bring up osd.2 so that things would otherwise, in theory, recovery fully + manager.revive_osd(2) + manager.mark_in_osd(2) + manager.wait_till_osd_is_up(2) + + manager.flush_pg_stats([0, 2]) + manager.wait_till_active() + manager.flush_pg_stats([0, 2]) + + # verify that there are unfound objects + unfound = manager.get_num_unfound_objects() + log.info("there are %d unfound objects" % unfound) + assert unfound + + testdir = teuthology.get_testdir(ctx) + procs = [] + if config.get('parallel_bench', True): + procs.append(mon.run( + args=[ + "/bin/sh", "-c", + " ".join(['adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage', + 'rados', + '--no-log-to-stderr', + '--name', 'client.admin', + '-b', str(4<<10), + '-p' , POOL, + '-t', '20', + 'bench', '240', 'write', + ]).format(tdir=testdir), + ], + logger=log.getChild('radosbench.{id}'.format(id='client.admin')), + stdin=run.PIPE, + wait=False + )) + time.sleep(10) + + # mark stuff lost + pgs = manager.get_pg_stats() + for pg in pgs: + if pg['stat_sum']['num_objects_unfound'] > 0: + primary = 'osd.%d' % pg['acting'][0] + + # verify that i can list them direct from the osd + log.info('listing missing/lost in %s state %s', pg['pgid'], + pg['state']); + m = manager.list_pg_unfound(pg['pgid']) + #log.info('%s' % m) + assert m['num_unfound'] == pg['stat_sum']['num_objects_unfound'] + num_unfound=0 + for o in m['objects']: + if len(o['locations']) == 0: + num_unfound += 1 + assert m['num_unfound'] == num_unfound + + log.info("reverting unfound in %s on %s", pg['pgid'], primary) + manager.raw_cluster_cmd('pg', pg['pgid'], + 'mark_unfound_lost', 'delete') + else: + log.info("no unfound in %s", pg['pgid']) + + manager.raw_cluster_cmd('tell', 'osd.0', 'debug', 'kick_recovery_wq', '5') + manager.raw_cluster_cmd('tell', 'osd.2', 'debug', 'kick_recovery_wq', '5') + manager.flush_pg_stats([0, 2]) + manager.wait_for_recovery() + + # verify result + for f in range(1, 10): + err = rados(ctx, mon, ['-p', POOL, 'get', 'new_%d' % f, '-']) + assert err + err = rados(ctx, mon, ['-p', POOL, 'get', 'existed_%d' % f, '-']) + assert err + err = rados(ctx, mon, ['-p', POOL, 'get', 'existing_%d' % f, '-']) + assert err + + # see if osd.1 can cope + manager.mark_in_osd(1) + manager.revive_osd(1) + manager.wait_till_osd_is_up(1) + manager.wait_for_clean() + run.wait(procs) + diff --git a/qa/tasks/repair_test.py b/qa/tasks/repair_test.py new file mode 100644 index 00000000..973273bb --- /dev/null +++ b/qa/tasks/repair_test.py @@ -0,0 +1,309 @@ +""" +Test pool repairing after objects are damaged. +""" +import logging +import time + +from teuthology import misc as teuthology + +log = logging.getLogger(__name__) + + +def choose_primary(manager, pool, num): + """ + Return primary to test on. + """ + log.info("Choosing primary") + return manager.get_pg_primary(pool, num) + + +def choose_replica(manager, pool, num): + """ + Return replica to test on. + """ + log.info("Choosing replica") + return manager.get_pg_replica(pool, num) + + +def trunc(manager, osd, pool, obj): + """ + truncate an object + """ + log.info("truncating object") + return manager.osd_admin_socket( + osd, + ['truncobj', pool, obj, '1']) + + +def dataerr(manager, osd, pool, obj): + """ + cause an error in the data + """ + log.info("injecting data err on object") + return manager.osd_admin_socket( + osd, + ['injectdataerr', pool, obj]) + + +def mdataerr(manager, osd, pool, obj): + """ + cause an error in the mdata + """ + log.info("injecting mdata err on object") + return manager.osd_admin_socket( + osd, + ['injectmdataerr', pool, obj]) + + +def omaperr(manager, osd, pool, obj): + """ + Cause an omap error. + """ + log.info("injecting omap err on object") + return manager.osd_admin_socket(osd, ['setomapval', pool, obj, + 'badkey', 'badval']) + + +def repair_test_1(manager, corrupter, chooser, scrub_type): + """ + Creates an object in the pool, corrupts it, + scrubs it, and verifies that the pool is inconsistent. It then repairs + the pool, rescrubs it, and verifies that the pool is consistent + + :param corrupter: error generating function (truncate, data-error, or + meta-data error, for example). + :param chooser: osd type chooser (primary or replica) + :param scrub_type: regular scrub or deep-scrub + """ + pool = "repair_pool_1" + manager.wait_for_clean() + with manager.pool(pool, 1): + + log.info("starting repair test type 1") + victim_osd = chooser(manager, pool, 0) + + # create object + log.info("doing put") + manager.do_put(pool, 'repair_test_obj', '/etc/hosts') + + # corrupt object + log.info("corrupting object") + corrupter(manager, victim_osd, pool, 'repair_test_obj') + + # verify inconsistent + log.info("scrubbing") + manager.do_pg_scrub(pool, 0, scrub_type) + + manager.with_pg_state(pool, 0, lambda s: 'inconsistent' in s) + + # repair + log.info("repairing") + manager.do_pg_scrub(pool, 0, "repair") + + log.info("re-scrubbing") + manager.do_pg_scrub(pool, 0, scrub_type) + + # verify consistent + manager.with_pg_state(pool, 0, lambda s: 'inconsistent' not in s) + log.info("done") + + +def repair_test_2(ctx, manager, config, chooser): + """ + First creates a set of objects and + sets the omap value. It then corrupts an object, does both a scrub + and a deep-scrub, and then corrupts more objects. After that, it + repairs the pool and makes sure that the pool is consistent some + time after a deep-scrub. + + :param chooser: primary or replica selection routine. + """ + pool = "repair_pool_2" + manager.wait_for_clean() + with manager.pool(pool, 1): + log.info("starting repair test type 2") + victim_osd = chooser(manager, pool, 0) + first_mon = teuthology.get_first_mon(ctx, config) + (mon,) = ctx.cluster.only(first_mon).remotes.keys() + + # create object + log.info("doing put and setomapval") + manager.do_put(pool, 'file1', '/etc/hosts') + manager.do_rados(mon, ['-p', pool, 'setomapval', 'file1', + 'key', 'val']) + manager.do_put(pool, 'file2', '/etc/hosts') + manager.do_put(pool, 'file3', '/etc/hosts') + manager.do_put(pool, 'file4', '/etc/hosts') + manager.do_put(pool, 'file5', '/etc/hosts') + manager.do_rados(mon, ['-p', pool, 'setomapval', 'file5', + 'key', 'val']) + manager.do_put(pool, 'file6', '/etc/hosts') + + # corrupt object + log.info("corrupting object") + omaperr(manager, victim_osd, pool, 'file1') + + # verify inconsistent + log.info("scrubbing") + manager.do_pg_scrub(pool, 0, 'deep-scrub') + + manager.with_pg_state(pool, 0, lambda s: 'inconsistent' in s) + + # Regression test for bug #4778, should still + # be inconsistent after scrub + manager.do_pg_scrub(pool, 0, 'scrub') + + manager.with_pg_state(pool, 0, lambda s: 'inconsistent' in s) + + # Additional corruptions including 2 types for file1 + log.info("corrupting more objects") + dataerr(manager, victim_osd, pool, 'file1') + mdataerr(manager, victim_osd, pool, 'file2') + trunc(manager, victim_osd, pool, 'file3') + omaperr(manager, victim_osd, pool, 'file6') + + # see still inconsistent + log.info("scrubbing") + manager.do_pg_scrub(pool, 0, 'deep-scrub') + + manager.with_pg_state(pool, 0, lambda s: 'inconsistent' in s) + + # repair + log.info("repairing") + manager.do_pg_scrub(pool, 0, "repair") + + # Let repair clear inconsistent flag + time.sleep(10) + + # verify consistent + manager.with_pg_state(pool, 0, lambda s: 'inconsistent' not in s) + + # In the future repair might determine state of + # inconsistency itself, verify with a deep-scrub + log.info("scrubbing") + manager.do_pg_scrub(pool, 0, 'deep-scrub') + + # verify consistent + manager.with_pg_state(pool, 0, lambda s: 'inconsistent' not in s) + + log.info("done") + + +def hinfoerr(manager, victim, pool, obj): + """ + cause an error in the hinfo_key + """ + log.info("remove the hinfo_key") + manager.objectstore_tool(pool, + options='', + args='rm-attr hinfo_key', + object_name=obj, + osd=victim) + + +def repair_test_erasure_code(manager, corrupter, victim, scrub_type): + """ + Creates an object in the pool, corrupts it, + scrubs it, and verifies that the pool is inconsistent. It then repairs + the pool, rescrubs it, and verifies that the pool is consistent + + :param corrupter: error generating function. + :param chooser: osd type chooser (primary or replica) + :param scrub_type: regular scrub or deep-scrub + """ + pool = "repair_pool_3" + manager.wait_for_clean() + with manager.pool(pool_name=pool, pg_num=1, + erasure_code_profile_name='default'): + + log.info("starting repair test for erasure code") + + # create object + log.info("doing put") + manager.do_put(pool, 'repair_test_obj', '/etc/hosts') + + # corrupt object + log.info("corrupting object") + corrupter(manager, victim, pool, 'repair_test_obj') + + # verify inconsistent + log.info("scrubbing") + manager.do_pg_scrub(pool, 0, scrub_type) + + manager.with_pg_state(pool, 0, lambda s: 'inconsistent' in s) + + # repair + log.info("repairing") + manager.do_pg_scrub(pool, 0, "repair") + + log.info("re-scrubbing") + manager.do_pg_scrub(pool, 0, scrub_type) + + # verify consistent + manager.with_pg_state(pool, 0, lambda s: 'inconsistent' not in s) + log.info("done") + + +def task(ctx, config): + """ + Test [deep] repair in several situations: + Repair [Truncate, Data EIO, MData EIO] on [Primary|Replica] + + The config should be as follows: + + Must include the log-whitelist below + Must enable filestore_debug_inject_read_err config + + example: + + tasks: + - chef: + - install: + - ceph: + log-whitelist: + - 'candidate had a stat error' + - 'candidate had a read error' + - 'deep-scrub 0 missing, 1 inconsistent objects' + - 'deep-scrub 0 missing, 4 inconsistent objects' + - 'deep-scrub [0-9]+ errors' + - '!= omap_digest' + - '!= data_digest' + - 'repair 0 missing, 1 inconsistent objects' + - 'repair 0 missing, 4 inconsistent objects' + - 'repair [0-9]+ errors, [0-9]+ fixed' + - 'scrub 0 missing, 1 inconsistent objects' + - 'scrub [0-9]+ errors' + - 'size 1 != size' + - 'attr name mismatch' + - 'Regular scrub request, deep-scrub details will be lost' + - 'candidate size [0-9]+ info size [0-9]+ mismatch' + conf: + osd: + filestore debug inject read err: true + - repair_test: + + """ + if config is None: + config = {} + assert isinstance(config, dict), \ + 'repair_test task only accepts a dict for config' + + manager = ctx.managers['ceph'] + manager.wait_for_all_osds_up() + + manager.raw_cluster_cmd('osd', 'set', 'noscrub') + manager.raw_cluster_cmd('osd', 'set', 'nodeep-scrub') + + repair_test_1(manager, mdataerr, choose_primary, "scrub") + repair_test_1(manager, mdataerr, choose_replica, "scrub") + repair_test_1(manager, dataerr, choose_primary, "deep-scrub") + repair_test_1(manager, dataerr, choose_replica, "deep-scrub") + repair_test_1(manager, trunc, choose_primary, "scrub") + repair_test_1(manager, trunc, choose_replica, "scrub") + repair_test_2(ctx, manager, config, choose_primary) + repair_test_2(ctx, manager, config, choose_replica) + + repair_test_erasure_code(manager, hinfoerr, 'primary', "deep-scrub") + + manager.raw_cluster_cmd('osd', 'unset', 'noscrub') + manager.raw_cluster_cmd('osd', 'unset', 'nodeep-scrub') diff --git a/qa/tasks/resolve_stuck_peering.py b/qa/tasks/resolve_stuck_peering.py new file mode 100644 index 00000000..d140544c --- /dev/null +++ b/qa/tasks/resolve_stuck_peering.py @@ -0,0 +1,112 @@ +""" +Resolve stuck peering +""" +import logging +import time + +from teuthology import misc as teuthology +from tasks.util.rados import rados + +log = logging.getLogger(__name__) + +def task(ctx, config): + """ + Test handling resolve stuck peering + + requires 3 osds on a single test node + """ + if config is None: + config = {} + assert isinstance(config, dict), \ + 'Resolve stuck peering only accepts a dict for config' + + manager = ctx.managers['ceph'] + + while len(manager.get_osd_status()['up']) < 3: + time.sleep(10) + + + manager.wait_for_clean() + + dummyfile = '/etc/fstab' + dummyfile1 = '/etc/resolv.conf' + + #create 1 PG pool + pool='foo' + log.info('creating pool foo') + manager.raw_cluster_cmd('osd', 'pool', 'create', '%s' % pool, '1') + + #set min_size of the pool to 1 + #so that we can continue with I/O + #when 2 osds are down + manager.set_pool_property(pool, "min_size", 1) + + osds = [0, 1, 2] + + primary = manager.get_pg_primary('foo', 0) + log.info("primary osd is %d", primary) + + others = list(osds) + others.remove(primary) + + log.info('writing initial objects') + first_mon = teuthology.get_first_mon(ctx, config) + (mon,) = ctx.cluster.only(first_mon).remotes.keys() + #create few objects + for i in range(100): + rados(ctx, mon, ['-p', 'foo', 'put', 'existing_%d' % i, dummyfile]) + + manager.wait_for_clean() + + #kill other osds except primary + log.info('killing other osds except primary') + for i in others: + manager.kill_osd(i) + for i in others: + manager.mark_down_osd(i) + + + for i in range(100): + rados(ctx, mon, ['-p', 'foo', 'put', 'new_%d' % i, dummyfile1]) + + #kill primary osd + manager.kill_osd(primary) + manager.mark_down_osd(primary) + + #revive other 2 osds + for i in others: + manager.revive_osd(i) + + #make sure that pg is down + #Assuming pg number for single pg pool will start from 0 + pgnum=0 + pgstr = manager.get_pgid(pool, pgnum) + stats = manager.get_single_pg_stats(pgstr) + print(stats['state']) + + timeout=60 + start=time.time() + + while 'down' not in stats['state']: + assert time.time() - start < timeout, \ + 'failed to reach down state before timeout expired' + stats = manager.get_single_pg_stats(pgstr) + + #mark primary as lost + manager.raw_cluster_cmd('osd', 'lost', '%d' % primary,\ + '--yes-i-really-mean-it') + + + #expect the pg status to be active+undersized+degraded + #pg should recover and become active+clean within timeout + stats = manager.get_single_pg_stats(pgstr) + print(stats['state']) + + timeout=10 + start=time.time() + + while manager.get_num_down(): + assert time.time() - start < timeout, \ + 'failed to recover before timeout expired' + + manager.revive_osd(primary) diff --git a/qa/tasks/restart.py b/qa/tasks/restart.py new file mode 100644 index 00000000..52b685c9 --- /dev/null +++ b/qa/tasks/restart.py @@ -0,0 +1,163 @@ +""" +Daemon restart +""" +import logging +import pipes + +from teuthology import misc as teuthology +from teuthology.orchestra import run as tor + +from teuthology.orchestra import run +log = logging.getLogger(__name__) + +def restart_daemon(ctx, config, role, id_, *args): + """ + Handle restart (including the execution of the command parameters passed) + """ + log.info('Restarting {r}.{i} daemon...'.format(r=role, i=id_)) + daemon = ctx.daemons.get_daemon(role, id_) + log.debug('Waiting for exit of {r}.{i} daemon...'.format(r=role, i=id_)) + try: + daemon.wait_for_exit() + except tor.CommandFailedError as e: + log.debug('Command Failed: {e}'.format(e=e)) + if len(args) > 0: + confargs = ['--{k}={v}'.format(k=k, v=v) for k,v in zip(args[0::2], args[1::2])] + log.debug('Doing restart of {r}.{i} daemon with args: {a}...'.format(r=role, i=id_, a=confargs)) + daemon.restart_with_args(confargs) + else: + log.debug('Doing restart of {r}.{i} daemon...'.format(r=role, i=id_)) + daemon.restart() + +def get_tests(ctx, config, role, remote, testdir): + """Download restart tests""" + srcdir = '{tdir}/restart.{role}'.format(tdir=testdir, role=role) + + refspec = config.get('branch') + if refspec is None: + refspec = config.get('sha1') + if refspec is None: + refspec = config.get('tag') + if refspec is None: + refspec = 'HEAD' + log.info('Pulling restart qa/workunits from ref %s', refspec) + + remote.run( + logger=log.getChild(role), + args=[ + 'mkdir', '--', srcdir, + run.Raw('&&'), + 'git', + 'archive', + '--remote=git://git.ceph.com/ceph.git', + '%s:qa/workunits' % refspec, + run.Raw('|'), + 'tar', + '-C', srcdir, + '-x', + '-f-', + run.Raw('&&'), + 'cd', '--', srcdir, + run.Raw('&&'), + 'if', 'test', '-e', 'Makefile', run.Raw(';'), 'then', 'make', run.Raw(';'), 'fi', + run.Raw('&&'), + 'find', '-executable', '-type', 'f', '-printf', r'%P\0'.format(srcdir=srcdir), + run.Raw('>{tdir}/restarts.list'.format(tdir=testdir)), + ], + ) + restarts = sorted(teuthology.get_file( + remote, + '{tdir}/restarts.list'.format(tdir=testdir)).split('\0')) + return (srcdir, restarts) + +def task(ctx, config): + """ + Execute commands and allow daemon restart with config options. + Each process executed can output to stdout restart commands of the form: + restart + This will restart the daemon . with the specified config values once + by modifying the conf file with those values, and then replacing the old conf file + once the daemon is restarted. + This task does not kill a running daemon, it assumes the daemon will abort on an + assert specified in the config. + + tasks: + - install: + - ceph: + - restart: + exec: + client.0: + - test_backtraces.py + + """ + assert isinstance(config, dict), "task kill got invalid config" + + testdir = teuthology.get_testdir(ctx) + + try: + assert 'exec' in config, "config requires exec key with : entries" + for role, task in config['exec'].items(): + log.info('restart for role {r}'.format(r=role)) + (remote,) = ctx.cluster.only(role).remotes.keys() + srcdir, restarts = get_tests(ctx, config, role, remote, testdir) + log.info('Running command on role %s host %s', role, remote.name) + spec = '{spec}'.format(spec=task[0]) + log.info('Restarts list: %s', restarts) + log.info('Spec is %s', spec) + to_run = [w for w in restarts if w == task or w.find(spec) != -1] + log.info('To run: %s', to_run) + for c in to_run: + log.info('Running restart script %s...', c) + args = [ + run.Raw('TESTDIR="{tdir}"'.format(tdir=testdir)), + ] + env = config.get('env') + if env is not None: + for var, val in env.items(): + quoted_val = pipes.quote(val) + env_arg = '{var}={val}'.format(var=var, val=quoted_val) + args.append(run.Raw(env_arg)) + args.extend([ + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + '{srcdir}/{c}'.format( + srcdir=srcdir, + c=c, + ), + ]) + proc = remote.run( + args=args, + stdout=tor.PIPE, + stdin=tor.PIPE, + stderr=log, + wait=False, + ) + log.info('waiting for a command from script') + while True: + l = proc.stdout.readline() + if not l or l == '': + break + log.debug('script command: {c}'.format(c=l)) + ll = l.strip() + cmd = ll.split(' ') + if cmd[0] == "done": + break + assert cmd[0] == 'restart', "script sent invalid command request to kill task" + # cmd should be: restart + # or to clear, just: restart + restart_daemon(ctx, config, cmd[1], cmd[2], *cmd[3:]) + proc.stdin.writelines(['restarted\n']) + proc.stdin.flush() + try: + proc.wait() + except tor.CommandFailedError: + raise Exception('restart task got non-zero exit status from script: {s}'.format(s=c)) + finally: + log.info('Finishing %s on %s...', task, role) + remote.run( + logger=log.getChild(role), + args=[ + 'rm', '-rf', '--', '{tdir}/restarts.list'.format(tdir=testdir), srcdir, + ], + ) diff --git a/qa/tasks/rgw.py b/qa/tasks/rgw.py new file mode 100644 index 00000000..e747426c --- /dev/null +++ b/qa/tasks/rgw.py @@ -0,0 +1,357 @@ +""" +rgw routines +""" +import argparse +import contextlib +import logging + +from teuthology.orchestra import run +from teuthology import misc as teuthology +from teuthology import contextutil +from teuthology.exceptions import ConfigError +from tasks.util import get_remote_for_role +from tasks.util.rgw import rgwadmin, wait_for_radosgw +from tasks.util.rados import (create_ec_pool, + create_replicated_pool, + create_cache_pool) + +log = logging.getLogger(__name__) + +class RGWEndpoint: + def __init__(self, hostname=None, port=None, cert=None, dns_name=None, website_dns_name=None): + self.hostname = hostname + self.port = port + self.cert = cert + self.dns_name = dns_name + self.website_dns_name = website_dns_name + + def url(self): + proto = 'https' if self.cert else 'http' + return '{proto}://{hostname}:{port}/'.format(proto=proto, hostname=self.hostname, port=self.port) + +@contextlib.contextmanager +def start_rgw(ctx, config, clients): + """ + Start rgw on remote sites. + """ + log.info('Starting rgw...') + testdir = teuthology.get_testdir(ctx) + for client in clients: + (remote,) = ctx.cluster.only(client).remotes.keys() + cluster_name, daemon_type, client_id = teuthology.split_role(client) + client_with_id = daemon_type + '.' + client_id + client_with_cluster = cluster_name + '.' + client_with_id + + client_config = config.get(client) + if client_config is None: + client_config = {} + log.info("rgw %s config is %s", client, client_config) + cmd_prefix = [ + 'sudo', + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'daemon-helper', + 'term', + ] + + rgw_cmd = ['radosgw'] + + log.info("Using %s as radosgw frontend", ctx.rgw.frontend) + + endpoint = ctx.rgw.role_endpoints[client] + frontends = ctx.rgw.frontend + frontend_prefix = client_config.get('frontend_prefix', None) + if frontend_prefix: + frontends += ' prefix={pfx}'.format(pfx=frontend_prefix) + + if endpoint.cert: + # add the ssl certificate path + frontends += ' ssl_certificate={}'.format(endpoint.cert.certificate) + if ctx.rgw.frontend == 'civetweb': + frontends += ' port={}s'.format(endpoint.port) + else: + frontends += ' ssl_port={}'.format(endpoint.port) + else: + frontends += ' port={}'.format(endpoint.port) + + rgw_cmd.extend([ + '--rgw-frontends', frontends, + '-n', client_with_id, + '--cluster', cluster_name, + '-k', '/etc/ceph/{client_with_cluster}.keyring'.format(client_with_cluster=client_with_cluster), + '--log-file', + '/var/log/ceph/rgw.{client_with_cluster}.log'.format(client_with_cluster=client_with_cluster), + '--rgw_ops_log_socket_path', + '{tdir}/rgw.opslog.{client_with_cluster}.sock'.format(tdir=testdir, + client_with_cluster=client_with_cluster) + ]) + + keystone_role = client_config.get('use-keystone-role', None) + if keystone_role is not None: + if not ctx.keystone: + raise ConfigError('rgw must run after the keystone task') + url = 'http://{host}:{port}/v1/KEY_$(tenant_id)s'.format(host=endpoint.hostname, + port=endpoint.port) + ctx.keystone.create_endpoint(ctx, keystone_role, 'swift', url) + + keystone_host, keystone_port = \ + ctx.keystone.public_endpoints[keystone_role] + rgw_cmd.extend([ + '--rgw_keystone_url', + 'http://{khost}:{kport}'.format(khost=keystone_host, + kport=keystone_port), + ]) + + + if client_config.get('dns-name') is not None: + rgw_cmd.extend(['--rgw-dns-name', endpoint.dns_name]) + if client_config.get('dns-s3website-name') is not None: + rgw_cmd.extend(['--rgw-dns-s3website-name', endpoint.website_dns_name]) + + rgw_cmd.extend([ + '--foreground', + run.Raw('|'), + 'sudo', + 'tee', + '/var/log/ceph/rgw.{client_with_cluster}.stdout'.format(tdir=testdir, + client_with_cluster=client_with_cluster), + run.Raw('2>&1'), + ]) + + if client_config.get('valgrind'): + cmd_prefix = teuthology.get_valgrind_args( + testdir, + client_with_cluster, + cmd_prefix, + client_config.get('valgrind') + ) + + run_cmd = list(cmd_prefix) + run_cmd.extend(rgw_cmd) + + ctx.daemons.add_daemon( + remote, 'rgw', client_with_id, + cluster=cluster_name, + args=run_cmd, + logger=log.getChild(client), + stdin=run.PIPE, + wait=False, + ) + + # XXX: add_daemon() doesn't let us wait until radosgw finishes startup + for client in clients: + endpoint = ctx.rgw.role_endpoints[client] + url = endpoint.url() + log.info('Polling {client} until it starts accepting connections on {url}'.format(client=client, url=url)) + (remote,) = ctx.cluster.only(client).remotes.keys() + wait_for_radosgw(url, remote) + + try: + yield + finally: + for client in clients: + cluster_name, daemon_type, client_id = teuthology.split_role(client) + client_with_id = daemon_type + '.' + client_id + client_with_cluster = cluster_name + '.' + client_with_id + ctx.daemons.get_daemon('rgw', client_with_id, cluster_name).stop() + ctx.cluster.only(client).run( + args=[ + 'rm', + '-f', + '{tdir}/rgw.opslog.{client}.sock'.format(tdir=testdir, + client=client_with_cluster), + ], + ) + +def assign_endpoints(ctx, config, default_cert): + role_endpoints = {} + for role, client_config in config.items(): + client_config = client_config or {} + remote = get_remote_for_role(ctx, role) + + cert = client_config.get('ssl certificate', default_cert) + if cert: + # find the certificate created by the ssl task + if not hasattr(ctx, 'ssl_certificates'): + raise ConfigError('rgw: no ssl task found for option "ssl certificate"') + ssl_certificate = ctx.ssl_certificates.get(cert, None) + if not ssl_certificate: + raise ConfigError('rgw: missing ssl certificate "{}"'.format(cert)) + else: + ssl_certificate = None + + port = client_config.get('port', 443 if ssl_certificate else 80) + + # if dns-name is given, use it as the hostname (or as a prefix) + dns_name = client_config.get('dns-name', '') + if len(dns_name) == 0 or dns_name.endswith('.'): + dns_name += remote.hostname + + website_dns_name = client_config.get('dns-s3website-name') + if website_dns_name is not None and (len(website_dns_name) == 0 or website_dns_name.endswith('.')): + website_dns_name += remote.hostname + + role_endpoints[role] = RGWEndpoint(remote.hostname, port, ssl_certificate, dns_name, website_dns_name) + + return role_endpoints + +@contextlib.contextmanager +def create_pools(ctx, clients): + """Create replicated or erasure coded data pools for rgw.""" + + log.info('Creating data pools') + for client in clients: + log.debug("Obtaining remote for client {}".format(client)) + (remote,) = ctx.cluster.only(client).remotes.keys() + data_pool = 'default.rgw.buckets.data' + cluster_name, daemon_type, client_id = teuthology.split_role(client) + + if ctx.rgw.ec_data_pool: + create_ec_pool(remote, data_pool, client, ctx.rgw.data_pool_pg_size, + ctx.rgw.erasure_code_profile, cluster_name, 'rgw') + else: + create_replicated_pool(remote, data_pool, ctx.rgw.data_pool_pg_size, cluster_name, 'rgw') + + index_pool = 'default.rgw.buckets.index' + create_replicated_pool(remote, index_pool, ctx.rgw.index_pool_pg_size, cluster_name, 'rgw') + + if ctx.rgw.cache_pools: + create_cache_pool(remote, data_pool, data_pool + '.cache', 64, + 64*1024*1024, cluster_name) + log.debug('Pools created') + yield + +@contextlib.contextmanager +def configure_compression(ctx, clients, compression): + """ set a compression type in the default zone placement """ + log.info('Configuring compression type = %s', compression) + for client in clients: + # XXX: the 'default' zone and zonegroup aren't created until we run RGWRados::init_complete(). + # issue a 'radosgw-admin user list' command to trigger this + rgwadmin(ctx, client, cmd=['user', 'list'], check_status=True) + + rgwadmin(ctx, client, + cmd=['zone', 'placement', 'modify', '--rgw-zone', 'default', + '--placement-id', 'default-placement', + '--compression', compression], + check_status=True) + yield + +@contextlib.contextmanager +def configure_storage_classes(ctx, clients, storage_classes): + """ set a compression type in the default zone placement """ + + sc = [s.strip() for s in storage_classes.split(',')] + + for client in clients: + # XXX: the 'default' zone and zonegroup aren't created until we run RGWRados::init_complete(). + # issue a 'radosgw-admin user list' command to trigger this + rgwadmin(ctx, client, cmd=['user', 'list'], check_status=True) + + for storage_class in sc: + log.info('Configuring storage class type = %s', storage_class) + rgwadmin(ctx, client, + cmd=['zonegroup', 'placement', 'add', + '--rgw-zone', 'default', + '--placement-id', 'default-placement', + '--storage-class', storage_class], + check_status=True) + rgwadmin(ctx, client, + cmd=['zone', 'placement', 'add', + '--rgw-zone', 'default', + '--placement-id', 'default-placement', + '--storage-class', storage_class, + '--data-pool', 'default.rgw.buckets.data.' + storage_class.lower()], + check_status=True) + yield + +@contextlib.contextmanager +def task(ctx, config): + """ + For example, to run rgw on all clients:: + + tasks: + - ceph: + - rgw: + + To only run on certain clients:: + + tasks: + - ceph: + - rgw: [client.0, client.3] + + or + + tasks: + - ceph: + - rgw: + client.0: + client.3: + + To run radosgw through valgrind: + + tasks: + - ceph: + - rgw: + client.0: + valgrind: [--tool=memcheck] + client.3: + valgrind: [--tool=memcheck] + + To configure data or index pool pg_size: + + overrides: + rgw: + data_pool_pg_size: 256 + index_pool_pg_size: 128 + """ + if config is None: + config = dict(('client.{id}'.format(id=id_), None) + for id_ in teuthology.all_roles_of_type( + ctx.cluster, 'client')) + elif isinstance(config, list): + config = dict((name, None) for name in config) + + clients = config.keys() # http://tracker.ceph.com/issues/20417 + + overrides = ctx.config.get('overrides', {}) + teuthology.deep_merge(config, overrides.get('rgw', {})) + + ctx.rgw = argparse.Namespace() + + ctx.rgw.ec_data_pool = bool(config.pop('ec-data-pool', False)) + ctx.rgw.erasure_code_profile = config.pop('erasure_code_profile', {}) + ctx.rgw.cache_pools = bool(config.pop('cache-pools', False)) + ctx.rgw.frontend = config.pop('frontend', 'civetweb') + ctx.rgw.compression_type = config.pop('compression type', None) + ctx.rgw.storage_classes = config.pop('storage classes', None) + default_cert = config.pop('ssl certificate', None) + ctx.rgw.data_pool_pg_size = config.pop('data_pool_pg_size', 64) + ctx.rgw.index_pool_pg_size = config.pop('index_pool_pg_size', 64) + ctx.rgw.config = config + + log.debug("config is {}".format(config)) + log.debug("client list is {}".format(clients)) + + ctx.rgw.role_endpoints = assign_endpoints(ctx, config, default_cert) + + subtasks = [ + lambda: create_pools(ctx=ctx, clients=clients), + ] + if ctx.rgw.compression_type: + subtasks.extend([ + lambda: configure_compression(ctx=ctx, clients=clients, + compression=ctx.rgw.compression_type), + ]) + if ctx.rgw.storage_classes: + subtasks.extend([ + lambda: configure_storage_classes(ctx=ctx, clients=clients, + storage_classes=ctx.rgw.storage_classes), + ]) + subtasks.extend([ + lambda: start_rgw(ctx=ctx, config=config, clients=clients), + ]) + + with contextutil.nested(*subtasks): + yield diff --git a/qa/tasks/rgw_logsocket.py b/qa/tasks/rgw_logsocket.py new file mode 100644 index 00000000..d76e59d7 --- /dev/null +++ b/qa/tasks/rgw_logsocket.py @@ -0,0 +1,165 @@ +""" +rgw s3tests logging wrappers +""" +from io import BytesIO +from configobj import ConfigObj +import contextlib +import logging +from tasks import s3tests + +from teuthology import misc as teuthology +from teuthology import contextutil + +log = logging.getLogger(__name__) + + +@contextlib.contextmanager +def download(ctx, config): + """ + Run s3tests download function + """ + return s3tests.download(ctx, config) + +def _config_user(s3tests_conf, section, user): + """ + Run s3tests user config function + """ + return s3tests._config_user(s3tests_conf, section, user) + +@contextlib.contextmanager +def create_users(ctx, config): + """ + Run s3tests user create function + """ + return s3tests.create_users(ctx, config) + +@contextlib.contextmanager +def configure(ctx, config): + """ + Run s3tests user configure function + """ + return s3tests.configure(ctx, config) + +@contextlib.contextmanager +def run_tests(ctx, config): + """ + Run remote netcat tests + """ + assert isinstance(config, dict) + testdir = teuthology.get_testdir(ctx) + for client, client_config in config.items(): + client_config['extra_args'] = [ + 's3tests.functional.test_s3:test_bucket_list_return_data', + ] +# args = [ +# 'S3TEST_CONF={tdir}/archive/s3-tests.{client}.conf'.format(tdir=testdir, client=client), +# '{tdir}/s3-tests/virtualenv/bin/nosetests'.format(tdir=testdir), +# '-w', +# '{tdir}/s3-tests'.format(tdir=testdir), +# '-v', +# 's3tests.functional.test_s3:test_bucket_list_return_data', +# ] +# if client_config is not None and 'extra_args' in client_config: +# args.extend(client_config['extra_args']) +# +# ctx.cluster.only(client).run( +# args=args, +# ) + + s3tests.run_tests(ctx, config) + + netcat_out = BytesIO() + + for client, client_config in config.items(): + ctx.cluster.only(client).run( + args = [ + 'netcat', + '-w', '5', + '-U', '{tdir}/rgw.opslog.sock'.format(tdir=testdir), + ], + stdout = netcat_out, + ) + + out = netcat_out.getvalue() + + assert len(out) > 100 + + log.info('Received', out) + + yield + + +@contextlib.contextmanager +def task(ctx, config): + """ + Run some s3-tests suite against rgw, verify opslog socket returns data + + Must restrict testing to a particular client:: + + tasks: + - ceph: + - rgw: [client.0] + - s3tests: [client.0] + + To pass extra arguments to nose (e.g. to run a certain test):: + + tasks: + - ceph: + - rgw: [client.0] + - s3tests: + client.0: + extra_args: ['test_s3:test_object_acl_grand_public_read'] + client.1: + extra_args: ['--exclude', 'test_100_continue'] + """ + assert hasattr(ctx, 'rgw'), 'rgw-logsocket must run after the rgw task' + assert config is None or isinstance(config, list) \ + or isinstance(config, dict), \ + "task rgw-logsocket only supports a list or dictionary for configuration" + all_clients = ['client.{id}'.format(id=id_) + for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')] + if config is None: + config = all_clients + if isinstance(config, list): + config = dict.fromkeys(config) + clients = config.keys() + + overrides = ctx.config.get('overrides', {}) + # merge each client section, not the top level. + for (client, cconf) in config.items(): + teuthology.deep_merge(cconf, overrides.get('rgw-logsocket', {})) + + log.debug('config is %s', config) + + s3tests_conf = {} + for client in clients: + endpoint = ctx.rgw.role_endpoints.get(client) + assert endpoint, 'rgw-logsocket: no rgw endpoint for {}'.format(client) + + s3tests_conf[client] = ConfigObj( + indent_type='', + infile={ + 'DEFAULT': + { + 'port' : endpoint.port, + 'is_secure' : endpoint.cert is not None, + }, + 'fixtures' : {}, + 's3 main' : {}, + 's3 alt' : {}, + } + ) + + with contextutil.nested( + lambda: download(ctx=ctx, config=config), + lambda: create_users(ctx=ctx, config=dict( + clients=clients, + s3tests_conf=s3tests_conf, + )), + lambda: configure(ctx=ctx, config=dict( + clients=config, + s3tests_conf=s3tests_conf, + )), + lambda: run_tests(ctx=ctx, config=config), + ): + yield diff --git a/qa/tasks/rgw_multi b/qa/tasks/rgw_multi new file mode 120000 index 00000000..abfc703b --- /dev/null +++ b/qa/tasks/rgw_multi @@ -0,0 +1 @@ +../../src/test/rgw/rgw_multi \ No newline at end of file diff --git a/qa/tasks/rgw_multisite.py b/qa/tasks/rgw_multisite.py new file mode 100644 index 00000000..266d0fb6 --- /dev/null +++ b/qa/tasks/rgw_multisite.py @@ -0,0 +1,436 @@ +""" +rgw multisite configuration routines +""" +import argparse +import logging +import random +import string +from copy import deepcopy +from tasks.util.rgw import rgwadmin, wait_for_radosgw +from tasks.util.rados import create_ec_pool, create_replicated_pool +from tasks.rgw_multi import multisite +from tasks.rgw_multi.zone_rados import RadosZone as RadosZone +from tasks.rgw_multi.zone_ps import PSZone as PSZone + +from teuthology.orchestra import run +from teuthology import misc +from teuthology.exceptions import ConfigError +from teuthology.task import Task + +log = logging.getLogger(__name__) + +class RGWMultisite(Task): + """ + Performs rgw multisite configuration to match the given realm definition. + + - rgw-multisite: + realm: + name: test-realm + is_default: true + + List one or more zonegroup definitions. These are provided as json + input to `radosgw-admin zonegroup set`, with the exception of these keys: + + * 'is_master' is passed on the command line as --master + * 'is_default' is passed on the command line as --default + * 'is_pubsub' is used to create a zone with tier-type=pubsub + * 'endpoints' given as client names are replaced with actual endpoints + + zonegroups: + - name: test-zonegroup + api_name: test-api + is_master: true + is_default: true + endpoints: [c1.client.0] + + List each of the zones to be created in this zonegroup. + + zones: + - name: test-zone1 + is_master: true + is_default: true + endpoints: [c1.client.0] + - name: test-zone2 + is_default: true + endpoints: [c2.client.0] + + A complete example: + + tasks: + - install: + - ceph: {cluster: c1} + - ceph: {cluster: c2} + - rgw: + c1.client.0: + c2.client.0: + - rgw-multisite: + realm: + name: test-realm + is_default: true + zonegroups: + - name: test-zonegroup + is_master: true + is_default: true + zones: + - name: test-zone1 + is_master: true + is_default: true + endpoints: [c1.client.0] + - name: test-zone2 + is_default: true + endpoints: [c2.client.0] + - name: test-zone3 + is_pubsub: true + endpoints: [c1.client.1] + + """ + def __init__(self, ctx, config): + super(RGWMultisite, self).__init__(ctx, config) + + def setup(self): + super(RGWMultisite, self).setup() + + overrides = self.ctx.config.get('overrides', {}) + misc.deep_merge(self.config, overrides.get('rgw-multisite', {})) + + if not self.ctx.rgw: + raise ConfigError('rgw-multisite must run after the rgw task') + role_endpoints = self.ctx.rgw.role_endpoints + + # construct Clusters and Gateways for each client in the rgw task + clusters, gateways = extract_clusters_and_gateways(self.ctx, + role_endpoints) + + # get the master zone and zonegroup configuration + mz, mzg = extract_master_zone_zonegroup(self.config['zonegroups']) + cluster1 = cluster_for_zone(clusters, mz) + + # create the realm and period on the master zone's cluster + log.info('creating realm..') + realm = create_realm(cluster1, self.config['realm']) + period = realm.current_period + + creds = gen_credentials() + + # create the master zonegroup and its master zone + log.info('creating master zonegroup..') + master_zonegroup = create_zonegroup(cluster1, gateways, period, + deepcopy(mzg)) + period.master_zonegroup = master_zonegroup + + log.info('creating master zone..') + master_zone = create_zone(self.ctx, cluster1, gateways, creds, + master_zonegroup, deepcopy(mz)) + master_zonegroup.master_zone = master_zone + + period.update(master_zone, commit=True) + restart_zone_gateways(master_zone) # restart with --rgw-zone + + # create the admin user on the master zone + log.info('creating admin user..') + user_args = ['--display-name', 'Realm Admin', '--system'] + user_args += creds.credential_args() + admin_user = multisite.User('realm-admin') + admin_user.create(master_zone, user_args) + + # process 'zonegroups' + for zg_config in self.config['zonegroups']: + zones_config = zg_config.pop('zones') + + zonegroup = None + for zone_config in zones_config: + # get the cluster for this zone + cluster = cluster_for_zone(clusters, zone_config) + + if cluster != cluster1: # already created on master cluster + log.info('pulling realm configuration to %s', cluster.name) + realm.pull(cluster, master_zone.gateways[0], creds) + + # use the first zone's cluster to create the zonegroup + if not zonegroup: + if zg_config['name'] == master_zonegroup.name: + zonegroup = master_zonegroup + else: + log.info('creating zonegroup..') + zonegroup = create_zonegroup(cluster, gateways, + period, zg_config) + + if zone_config['name'] == master_zone.name: + # master zone was already created + zone = master_zone + else: + # create the zone and commit the period + log.info('creating zone..') + zone = create_zone(self.ctx, cluster, gateways, creds, + zonegroup, zone_config) + period.update(zone, commit=True) + + restart_zone_gateways(zone) # restart with --rgw-zone + + # attach configuration to the ctx for other tasks + self.ctx.rgw_multisite = argparse.Namespace() + self.ctx.rgw_multisite.clusters = clusters + self.ctx.rgw_multisite.gateways = gateways + self.ctx.rgw_multisite.realm = realm + self.ctx.rgw_multisite.admin_user = admin_user + + log.info('rgw multisite configuration completed') + + def end(self): + del self.ctx.rgw_multisite + +class Cluster(multisite.Cluster): + """ Issues 'radosgw-admin' commands with the rgwadmin() helper """ + def __init__(self, ctx, name, client): + super(Cluster, self).__init__() + self.ctx = ctx + self.name = name + self.client = client + + def admin(self, args = None, **kwargs): + """ radosgw-admin command """ + args = args or [] + args += ['--cluster', self.name] + args += ['--debug-rgw', str(kwargs.pop('debug_rgw', 0))] + args += ['--debug-ms', str(kwargs.pop('debug_ms', 0))] + if kwargs.pop('read_only', False): + args += ['--rgw-cache-enabled', 'false'] + kwargs['decode'] = False + check_retcode = kwargs.pop('check_retcode', True) + r, s = rgwadmin(self.ctx, self.client, args, **kwargs) + if check_retcode: + assert r == 0 + return s, r + +class Gateway(multisite.Gateway): + """ Controls a radosgw instance using its daemon """ + def __init__(self, role, remote, daemon, *args, **kwargs): + super(Gateway, self).__init__(*args, **kwargs) + self.role = role + self.remote = remote + self.daemon = daemon + + def set_zone(self, zone): + """ set the zone and add its args to the daemon's command line """ + assert self.zone is None, 'zone can only be set once' + self.zone = zone + # daemon.restart_with_args() would be perfect for this, except that + # radosgw args likely include a pipe and redirect. zone arguments at + # the end won't actually apply to radosgw + args = self.daemon.command_kwargs.get('args', []) + try: + # insert zone args before the first | + pipe = args.index(run.Raw('|')) + args = args[0:pipe] + zone.zone_args() + args[pipe:] + except ValueError: + args += zone.zone_args() + self.daemon.command_kwargs['args'] = args + + def start(self, args = None): + """ (re)start the daemon """ + self.daemon.restart() + # wait until startup completes + wait_for_radosgw(self.endpoint(), self.remote) + + def stop(self): + """ stop the daemon """ + self.daemon.stop() + +def extract_clusters_and_gateways(ctx, role_endpoints): + """ create cluster and gateway instances for all of the radosgw roles """ + clusters = {} + gateways = {} + for role, endpoint in role_endpoints.items(): + cluster_name, daemon_type, client_id = misc.split_role(role) + # find or create the cluster by name + cluster = clusters.get(cluster_name) + if not cluster: + clusters[cluster_name] = cluster = Cluster(ctx, cluster_name, role) + # create a gateway for this daemon + client_with_id = daemon_type + '.' + client_id # match format from rgw.py + daemon = ctx.daemons.get_daemon('rgw', client_with_id, cluster_name) + if not daemon: + raise ConfigError('no daemon for role=%s cluster=%s type=rgw id=%s' % \ + (role, cluster_name, client_id)) + (remote,) = ctx.cluster.only(role).remotes.keys() + gateways[role] = Gateway(role, remote, daemon, endpoint.hostname, + endpoint.port, cluster) + return clusters, gateways + +def create_realm(cluster, config): + """ create a realm from configuration and initialize its first period """ + realm = multisite.Realm(config['name']) + args = [] + if config.get('is_default', False): + args += ['--default'] + realm.create(cluster, args) + realm.current_period = multisite.Period(realm) + return realm + +def extract_user_credentials(config): + """ extract keys from configuration """ + return multisite.Credentials(config['access_key'], config['secret_key']) + +def extract_master_zone(zonegroup_config): + """ find and return the master zone definition """ + master = None + for zone in zonegroup_config['zones']: + if not zone.get('is_master', False): + continue + if master: + raise ConfigError('zones %s and %s cannot both set \'is_master\'' % \ + (master['name'], zone['name'])) + master = zone + # continue the loop so we can detect duplicates + if not master: + raise ConfigError('one zone must set \'is_master\' in zonegroup %s' % \ + zonegroup_config['name']) + return master + +def extract_master_zone_zonegroup(zonegroups_config): + """ find and return the master zone and zonegroup definitions """ + master_zone, master_zonegroup = (None, None) + for zonegroup in zonegroups_config: + # verify that all zonegroups have a master zone set, even if they + # aren't in the master zonegroup + zone = extract_master_zone(zonegroup) + if not zonegroup.get('is_master', False): + continue + if master_zonegroup: + raise ConfigError('zonegroups %s and %s cannot both set \'is_master\'' % \ + (master_zonegroup['name'], zonegroup['name'])) + master_zonegroup = zonegroup + master_zone = zone + # continue the loop so we can detect duplicates + if not master_zonegroup: + raise ConfigError('one zonegroup must set \'is_master\'') + return master_zone, master_zonegroup + +def extract_zone_cluster_name(zone_config): + """ return the cluster (must be common to all zone endpoints) """ + cluster_name = None + endpoints = zone_config.get('endpoints') + if not endpoints: + raise ConfigError('zone %s missing \'endpoints\' list' % \ + zone_config['name']) + for role in endpoints: + name, _, _ = misc.split_role(role) + if not cluster_name: + cluster_name = name + elif cluster_name != name: + raise ConfigError('all zone %s endpoints must be in the same cluster' % \ + zone_config['name']) + return cluster_name + +def cluster_for_zone(clusters, zone_config): + """ return the cluster entry for the given zone """ + name = extract_zone_cluster_name(zone_config) + try: + return clusters[name] + except KeyError: + raise ConfigError('no cluster %s found' % name) + +def gen_access_key(): + return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(16)) + +def gen_secret(): + return ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase + string.digits) for _ in range(32)) + +def gen_credentials(): + return multisite.Credentials(gen_access_key(), gen_secret()) + +def extract_gateway_endpoints(gateways, endpoints_config): + """ return a list of gateway endpoints associated with the given roles """ + endpoints = [] + for role in endpoints_config: + try: + # replace role names with their gateway's endpoint + endpoints.append(gateways[role].endpoint()) + except KeyError: + raise ConfigError('no radosgw endpoint found for role %s' % role) + return endpoints + +def is_default_arg(config): + return ['--default'] if config.pop('is_default', False) else [] + +def is_master_arg(config): + return ['--master'] if config.pop('is_master', False) else [] + +def create_zonegroup(cluster, gateways, period, config): + """ pass the zonegroup configuration to `zonegroup set` """ + config.pop('zones', None) # remove 'zones' from input to `zonegroup set` + endpoints = config.get('endpoints') + if endpoints: + # replace client names with their gateway endpoints + config['endpoints'] = extract_gateway_endpoints(gateways, endpoints) + zonegroup = multisite.ZoneGroup(config['name'], period) + # `zonegroup set` needs --default on command line, and 'is_master' in json + args = is_default_arg(config) + zonegroup.set(cluster, config, args) + period.zonegroups.append(zonegroup) + return zonegroup + +def create_zone(ctx, cluster, gateways, creds, zonegroup, config): + """ create a zone with the given configuration """ + zone = multisite.Zone(config['name'], zonegroup, cluster) + if config.pop('is_pubsub', False): + zone = PSZone(config['name'], zonegroup, cluster) + else: + zone = RadosZone(config['name'], zonegroup, cluster) + + # collect Gateways for the zone's endpoints + endpoints = config.get('endpoints') + if not endpoints: + raise ConfigError('no \'endpoints\' for zone %s' % config['name']) + zone.gateways = [gateways[role] for role in endpoints] + for gateway in zone.gateways: + gateway.set_zone(zone) + + # format the gateway endpoints + endpoints = [g.endpoint() for g in zone.gateways] + + args = is_default_arg(config) + args += is_master_arg(config) + args += creds.credential_args() + if len(endpoints): + args += ['--endpoints', ','.join(endpoints)] + zone.create(cluster, args) + zonegroup.zones.append(zone) + + create_zone_pools(ctx, zone) + if ctx.rgw.compression_type: + configure_zone_compression(zone, ctx.rgw.compression_type) + + zonegroup.zones_by_type.setdefault(zone.tier_type(), []).append(zone) + + if zone.is_read_only(): + zonegroup.ro_zones.append(zone) + else: + zonegroup.rw_zones.append(zone) + + return zone + +def create_zone_pools(ctx, zone): + """ Create the data_pool for each placement type """ + gateway = zone.gateways[0] + cluster = zone.cluster + for pool_config in zone.data.get('placement_pools', []): + pool_name = pool_config['val']['storage_classes']['STANDARD']['data_pool'] + if ctx.rgw.ec_data_pool: + create_ec_pool(gateway.remote, pool_name, zone.name, 64, + ctx.rgw.erasure_code_profile, cluster.name, 'rgw') + else: + create_replicated_pool(gateway.remote, pool_name, 64, cluster.name, 'rgw') + +def configure_zone_compression(zone, compression): + """ Set compression type in the zone's default-placement """ + zone.json_command(zone.cluster, 'placement', ['modify', + '--placement-id', 'default-placement', + '--compression', compression + ]) + +def restart_zone_gateways(zone): + zone.stop() + zone.start() + +task = RGWMultisite diff --git a/qa/tasks/rgw_multisite_tests.py b/qa/tasks/rgw_multisite_tests.py new file mode 100644 index 00000000..53aedf79 --- /dev/null +++ b/qa/tasks/rgw_multisite_tests.py @@ -0,0 +1,99 @@ +""" +rgw multisite testing +""" +import logging +import nose.core +import nose.config + +from teuthology.exceptions import ConfigError +from teuthology.task import Task +from teuthology import misc + +from tasks.rgw_multi import multisite, tests, tests_ps + +log = logging.getLogger(__name__) + + +class RGWMultisiteTests(Task): + """ + Runs the rgw_multi tests against a multisite configuration created by the + rgw-multisite task. Tests are run with nose, using any additional 'args' + provided. Overrides for tests.Config can be set in 'config'. + + - rgw-multisite-tests: + args: + - tasks.rgw_multi.tests:test_object_sync + config: + reconfigure_delay: 60 + + """ + def __init__(self, ctx, config): + super(RGWMultisiteTests, self).__init__(ctx, config) + + def setup(self): + super(RGWMultisiteTests, self).setup() + + overrides = self.ctx.config.get('overrides', {}) + misc.deep_merge(self.config, overrides.get('rgw-multisite-tests', {})) + + if not self.ctx.rgw_multisite: + raise ConfigError('rgw-multisite-tests must run after the rgw-multisite task') + realm = self.ctx.rgw_multisite.realm + master_zone = realm.meta_master_zone() + + # create the test user + log.info('creating test user..') + user = multisite.User('rgw-multisite-test-user') + user.create(master_zone, ['--display-name', 'Multisite Test User', + '--gen-access-key', '--gen-secret']) + + config = self.config.get('config', {}) + tests.init_multi(realm, user, tests.Config(**config)) + tests.realm_meta_checkpoint(realm) + + def begin(self): + # extra arguments for nose can be passed as a string or list + extra_args = self.config.get('args', []) + if not isinstance(extra_args, list): + extra_args = [extra_args] + argv = [__name__] + extra_args + + log.info("running rgw multisite tests on '%s' with args=%r", + tests.__name__, extra_args) + + # run nose tests in the rgw_multi.tests module + conf = nose.config.Config(stream=get_log_stream(), verbosity=2) + error_msg = '' + result = nose.run(defaultTest=tests.__name__, argv=argv, config=conf) + if not result: + error_msg += 'rgw multisite, ' + result = nose.run(defaultTest=tests_ps.__name__, argv=argv, config=conf) + if not result: + error_msg += 'rgw multisite pubsub, ' + if error_msg: + raise RuntimeError(error_msg + 'test failures') + + +def get_log_stream(): + """ return a log stream for nose output """ + # XXX: this is a workaround for IOErrors when nose writes to stderr, + # copied from vstart_runner.py + class LogStream(object): + def __init__(self): + self.buffer = "" + + def write(self, data): + self.buffer += data + if "\n" in self.buffer: + lines = self.buffer.split("\n") + for line in lines[:-1]: + log.info(line) + self.buffer = lines[-1] + + def flush(self): + pass + + return LogStream() + + +task = RGWMultisiteTests diff --git a/qa/tasks/s3a_hadoop.py b/qa/tasks/s3a_hadoop.py new file mode 100644 index 00000000..239be7cb --- /dev/null +++ b/qa/tasks/s3a_hadoop.py @@ -0,0 +1,289 @@ +import contextlib +import logging +from teuthology import misc +from teuthology.orchestra import run + +log = logging.getLogger(__name__) + + +@contextlib.contextmanager +def task(ctx, config): + """ + Run Hadoop S3A tests using Ceph + usage: + -tasks: + ceph-ansible: + s3a-hadoop: + maven-version: '3.6.3' (default) + hadoop-version: '2.9.2' + bucket-name: 's3atest' (default) + access-key: 'anykey' (uses a default value) + secret-key: 'secretkey' ( uses a default value) + role: client.0 + """ + if config is None: + config = {} + + assert isinstance(config, dict), \ + "task only supports a dictionary for configuration" + + assert hasattr(ctx, 'rgw'), 's3a-hadoop must run after the rgw task' + + overrides = ctx.config.get('overrides', {}) + misc.deep_merge(config, overrides.get('s3a-hadoop', {})) + testdir = misc.get_testdir(ctx) + + role = config.get('role') + (remote,) = ctx.cluster.only(role).remotes.keys() + endpoint = ctx.rgw.role_endpoints.get(role) + assert endpoint, 's3tests: no rgw endpoint for {}'.format(role) + + # get versions + maven_major = config.get('maven-major', 'maven-3') + maven_version = config.get('maven-version', '3.6.3') + hadoop_ver = config.get('hadoop-version', '2.9.2') + bucket_name = config.get('bucket-name', 's3atest') + access_key = config.get('access-key', 'EGAQRD2ULOIFKFSKCT4F') + secret_key = config.get( + 'secret-key', + 'zi816w1vZKfaSM85Cl0BxXTwSLyN7zB4RbTswrGb') + + # set versions for cloning the repo + apache_maven = 'apache-maven-{maven_version}-bin.tar.gz'.format( + maven_version=maven_version) + maven_link = 'http://apache.mirrors.lucidnetworks.net/maven/' + \ + '{maven_major}/{maven_version}/binaries/'.format(maven_major=maven_major, maven_version=maven_version) + apache_maven + hadoop_git = 'https://github.com/apache/hadoop' + hadoop_rel = 'hadoop-{ver} rel/release-{ver}'.format(ver=hadoop_ver) + install_prereq(remote) + remote.run( + args=[ + 'cd', + testdir, + run.Raw('&&'), + 'wget', + maven_link, + run.Raw('&&'), + 'tar', + '-xvf', + apache_maven, + run.Raw('&&'), + 'git', + 'clone', + run.Raw(hadoop_git), + run.Raw('&&'), + 'cd', + 'hadoop', + run.Raw('&&'), + 'git', + 'checkout', + '-b', + run.Raw(hadoop_rel) + ] + ) + configure_s3a(remote, endpoint.dns_name, access_key, secret_key, bucket_name, testdir) + setup_user_bucket(remote, endpoint.dns_name, access_key, secret_key, bucket_name, testdir) + if hadoop_ver.startswith('2.8'): + # test all ITtests but skip AWS test using public bucket landsat-pds + # which is not available from within this test + test_options = '-Dit.test=ITestS3A* -Dparallel-tests -Dscale \ + -Dfs.s3a.scale.test.timeout=1200 \ + -Dfs.s3a.scale.test.huge.filesize=256M verify' + else: + test_options = 'test -Dtest=S3a*,TestS3A*' + try: + run_s3atest(remote, maven_version, testdir, test_options) + yield + finally: + log.info("Done s3a testing, Cleaning up") + for fil in ['apache*', 'hadoop*', 'venv*', 'create*']: + remote.run(args=['rm', run.Raw('-rf'), run.Raw('{tdir}/{file}'.format(tdir=testdir, file=fil))]) + + +def install_prereq(client): + """ + Install pre requisites for RHEL and CentOS + TBD: Ubuntu + """ + if client.os.name == 'rhel' or client.os.name == 'centos': + client.run( + args=[ + 'sudo', + 'yum', + 'install', + '-y', + 'protobuf-c.x86_64', + 'java', + 'java-1.8.0-openjdk-devel', + 'dnsmasq' + ] + ) + + +def setup_user_bucket(client, dns_name, access_key, secret_key, bucket_name, testdir): + """ + Create user with access_key and secret_key that will be + used for the s3a testdir + """ + client.run( + args=[ + 'sudo', + 'radosgw-admin', + 'user', + 'create', + run.Raw('--uid'), + 's3a', + run.Raw('--display-name=s3a cephtests'), + run.Raw('--access-key={access_key}'.format(access_key=access_key)), + run.Raw('--secret-key={secret_key}'.format(secret_key=secret_key)), + run.Raw('--email=s3a@ceph.com'), + ] + ) + client.run( + args=[ + 'virtualenv', + '{testdir}/venv'.format(testdir=testdir), + run.Raw('&&'), + run.Raw('{testdir}/venv/bin/pip'.format(testdir=testdir)), + 'install', + 'boto' + ] + ) + create_bucket = """ +#!/usr/bin/env python +import boto +import boto.s3.connection +access_key = '{access_key}' +secret_key = '{secret_key}' + +conn = boto.connect_s3( + aws_access_key_id = access_key, + aws_secret_access_key = secret_key, + host = '{dns_name}', + is_secure=False, + calling_format = boto.s3.connection.OrdinaryCallingFormat(), + ) +bucket = conn.create_bucket('{bucket_name}') +for bucket in conn.get_all_buckets(): + print(bucket.name + "\t" + bucket.creation_date) +""".format(access_key=access_key, secret_key=secret_key, dns_name=dns_name, bucket_name=bucket_name) + py_bucket_file = '{testdir}/create_bucket.py'.format(testdir=testdir) + misc.sudo_write_file( + remote=client, + path=py_bucket_file, + data=create_bucket, + perms='0744', + ) + client.run( + args=[ + 'cat', + '{testdir}/create_bucket.py'.format(testdir=testdir), + ] + ) + client.run( + args=[ + '{testdir}/venv/bin/python'.format(testdir=testdir), + '{testdir}/create_bucket.py'.format(testdir=testdir), + ] + ) + + +def run_s3atest(client, maven_version, testdir, test_options): + """ + Finally run the s3a test + """ + aws_testdir = '{testdir}/hadoop/hadoop-tools/hadoop-aws/'.format(testdir=testdir) + run_test = '{testdir}/apache-maven-{maven_version}/bin/mvn'.format(testdir=testdir, maven_version=maven_version) + # Remove AWS CredentialsProvider tests as it hits public bucket from AWS + # better solution is to create the public bucket on local server and test + rm_test = 'rm src/test/java/org/apache/hadoop/fs/s3a/ITestS3AAWSCredentialsProvider.java' + client.run( + args=[ + 'cd', + run.Raw(aws_testdir), + run.Raw('&&'), + run.Raw(rm_test), + run.Raw('&&'), + run.Raw(run_test), + run.Raw(test_options) + ] + ) + + +def configure_s3a(client, dns_name, access_key, secret_key, bucket_name, testdir): + """ + Use the template to configure s3a test, Fill in access_key, secret_key + and other details required for test. + """ + config_template = """ + +fs.s3a.endpoint +{name} + + + +fs.contract.test.fs.s3a +s3a://{bucket_name}/ + + + +fs.s3a.connection.ssl.enabled +false + + + +test.fs.s3n.name +s3n://{bucket_name}/ + + + +test.fs.s3a.name +s3a://{bucket_name}/ + + + +test.fs.s3.name +s3://{bucket_name}/ + + + +fs.s3.awsAccessKeyId +{access_key} + + + +fs.s3.awsSecretAccessKey +{secret_key} + + + +fs.s3n.awsAccessKeyId +{access_key} + + + +fs.s3n.awsSecretAccessKey +{secret_key} + + + +fs.s3a.access.key +AWS access key ID. Omit for Role-based authentication. +{access_key} + + + +fs.s3a.secret.key +AWS secret key. Omit for Role-based authentication. +{secret_key} + + +""".format(name=dns_name, bucket_name=bucket_name, access_key=access_key, secret_key=secret_key) + config_path = testdir + '/hadoop/hadoop-tools/hadoop-aws/src/test/resources/auth-keys.xml' + misc.write_file( + remote=client, + path=config_path, + data=config_template, + ) + # output for debug + client.run(args=['cat', config_path]) diff --git a/qa/tasks/s3readwrite.py b/qa/tasks/s3readwrite.py new file mode 100644 index 00000000..d9e7234c --- /dev/null +++ b/qa/tasks/s3readwrite.py @@ -0,0 +1,353 @@ +""" +Run rgw s3 readwite tests +""" +import base64 +import contextlib +import logging +import os +import random +import string +import yaml + +from teuthology import misc as teuthology +from teuthology import contextutil +from teuthology.config import config as teuth_config +from teuthology.orchestra import run +from teuthology.orchestra.connection import split_user + +log = logging.getLogger(__name__) + +@contextlib.contextmanager +def download(ctx, config): + """ + Download the s3 tests from the git builder. + Remove downloaded s3 file upon exit. + + The context passed in should be identical to the context + passed in to the main task. + """ + assert isinstance(config, dict) + log.info('Downloading s3-tests...') + testdir = teuthology.get_testdir(ctx) + for (client, client_config) in config.items(): + s3tests_branch = client_config.get('force-branch', None) + if not s3tests_branch: + raise ValueError( + "Could not determine what branch to use for s3-tests. Please add 'force-branch: {s3-tests branch name}' to the .yaml config for this s3readwrite task.") + + log.info("Using branch '%s' for s3tests", s3tests_branch) + sha1 = client_config.get('sha1') + git_remote = client_config.get('git_remote', teuth_config.ceph_git_base_url) + ctx.cluster.only(client).run( + args=[ + 'git', 'clone', + '-b', s3tests_branch, + git_remote + 's3-tests.git', + '{tdir}/s3-tests'.format(tdir=testdir), + ], + ) + if sha1 is not None: + ctx.cluster.only(client).run( + args=[ + 'cd', '{tdir}/s3-tests'.format(tdir=testdir), + run.Raw('&&'), + 'git', 'reset', '--hard', sha1, + ], + ) + try: + yield + finally: + log.info('Removing s3-tests...') + testdir = teuthology.get_testdir(ctx) + for client in config: + ctx.cluster.only(client).run( + args=[ + 'rm', + '-rf', + '{tdir}/s3-tests'.format(tdir=testdir), + ], + ) + + +def _config_user(s3tests_conf, section, user): + """ + Configure users for this section by stashing away keys, ids, and + email addresses. + """ + s3tests_conf[section].setdefault('user_id', user) + s3tests_conf[section].setdefault('email', '{user}+test@test.test'.format(user=user)) + s3tests_conf[section].setdefault('display_name', 'Mr. {user}'.format(user=user)) + s3tests_conf[section].setdefault('access_key', ''.join(random.choice(string.ascii_uppercase) for i in range(20))) + s3tests_conf[section].setdefault('secret_key', base64.b64encode(os.urandom(40)).decode('ascii')) + +@contextlib.contextmanager +def create_users(ctx, config): + """ + Create a default s3 user. + """ + assert isinstance(config, dict) + log.info('Creating rgw users...') + testdir = teuthology.get_testdir(ctx) + users = {'s3': 'foo'} + cached_client_user_names = dict() + for client in config['clients']: + cached_client_user_names[client] = dict() + s3tests_conf = config['s3tests_conf'][client] + s3tests_conf.setdefault('readwrite', {}) + s3tests_conf['readwrite'].setdefault('bucket', 'rwtest-' + client + '-{random}-') + s3tests_conf['readwrite'].setdefault('readers', 10) + s3tests_conf['readwrite'].setdefault('writers', 3) + s3tests_conf['readwrite'].setdefault('duration', 300) + s3tests_conf['readwrite'].setdefault('files', {}) + rwconf = s3tests_conf['readwrite'] + rwconf['files'].setdefault('num', 10) + rwconf['files'].setdefault('size', 2000) + rwconf['files'].setdefault('stddev', 500) + for section, user in users.items(): + _config_user(s3tests_conf, section, '{user}.{client}'.format(user=user, client=client)) + log.debug('creating user {user} on {client}'.format(user=s3tests_conf[section]['user_id'], + client=client)) + + # stash the 'delete_user' flag along with user name for easier cleanup + delete_this_user = True + if 'delete_user' in s3tests_conf['s3']: + delete_this_user = s3tests_conf['s3']['delete_user'] + log.debug('delete_user set to {flag} for {client}'.format(flag=delete_this_user, client=client)) + cached_client_user_names[client][section+user] = (s3tests_conf[section]['user_id'], delete_this_user) + + # skip actual user creation if the create_user flag is set to false for this client + if 'create_user' in s3tests_conf['s3'] and s3tests_conf['s3']['create_user'] == False: + log.debug('create_user set to False, skipping user creation for {client}'.format(client=client)) + continue + else: + ctx.cluster.only(client).run( + args=[ + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'radosgw-admin', + '-n', client, + 'user', 'create', + '--uid', s3tests_conf[section]['user_id'], + '--display-name', s3tests_conf[section]['display_name'], + '--access-key', s3tests_conf[section]['access_key'], + '--secret', s3tests_conf[section]['secret_key'], + '--email', s3tests_conf[section]['email'], + ], + ) + try: + yield + finally: + for client in config['clients']: + for section, user in users.items(): + #uid = '{user}.{client}'.format(user=user, client=client) + real_uid, delete_this_user = cached_client_user_names[client][section+user] + if delete_this_user: + ctx.cluster.only(client).run( + args=[ + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'radosgw-admin', + '-n', client, + 'user', 'rm', + '--uid', real_uid, + '--purge-data', + ], + ) + else: + log.debug('skipping delete for user {uid} on {client}'.format(uid=real_uid, client=client)) + +@contextlib.contextmanager +def configure(ctx, config): + """ + Configure the s3-tests. This includes the running of the + bootstrap code and the updating of local conf files. + """ + assert isinstance(config, dict) + log.info('Configuring s3-readwrite-tests...') + for client, properties in config['clients'].items(): + s3tests_conf = config['s3tests_conf'][client] + if properties is not None and 'rgw_server' in properties: + host = None + for target, roles in zip(ctx.config['targets'].keys(), ctx.config['roles']): + log.info('roles: ' + str(roles)) + log.info('target: ' + str(target)) + if properties['rgw_server'] in roles: + _, host = split_user(target) + assert host is not None, "Invalid client specified as the rgw_server" + s3tests_conf['s3']['host'] = host + else: + s3tests_conf['s3']['host'] = 'localhost' + + def_conf = s3tests_conf['DEFAULT'] + s3tests_conf['s3'].setdefault('port', def_conf['port']) + s3tests_conf['s3'].setdefault('is_secure', def_conf['is_secure']) + + (remote,) = ctx.cluster.only(client).remotes.keys() + remote.run( + args=[ + 'cd', + '{tdir}/s3-tests'.format(tdir=teuthology.get_testdir(ctx)), + run.Raw('&&'), + './bootstrap', + ], + ) + conf = dict( + s3=s3tests_conf['s3'], + readwrite=s3tests_conf['readwrite'], + ) + teuthology.write_file( + remote=remote, + path='{tdir}/archive/s3readwrite.{client}.config.yaml'.format(tdir=teuthology.get_testdir(ctx), client=client), + data=yaml.safe_dump(conf, default_flow_style=False), + ) + yield + + +@contextlib.contextmanager +def run_tests(ctx, config): + """ + Run the s3readwrite tests after everything is set up. + + :param ctx: Context passed to task + :param config: specific configuration information + """ + assert isinstance(config, dict) + testdir = teuthology.get_testdir(ctx) + for client, client_config in config.items(): + (remote,) = ctx.cluster.only(client).remotes.keys() + conf = teuthology.get_file(remote, '{tdir}/archive/s3readwrite.{client}.config.yaml'.format(tdir=testdir, client=client)) + args = [ + '{tdir}/s3-tests/virtualenv/bin/s3tests-test-readwrite'.format(tdir=testdir), + ] + if client_config is not None and 'extra_args' in client_config: + args.extend(client_config['extra_args']) + + ctx.cluster.only(client).run( + args=args, + stdin=conf, + ) + yield + + +@contextlib.contextmanager +def task(ctx, config): + """ + Run the s3tests-test-readwrite suite against rgw. + + To run all tests on all clients:: + + tasks: + - ceph: + - rgw: + - s3readwrite: + force-branch: ceph-nautilus + + To restrict testing to particular clients:: + + tasks: + - ceph: + - rgw: [client.0] + - s3readwrite: [client.0] + + To run against a server on client.1:: + + tasks: + - ceph: + - rgw: [client.1] + - s3readwrite: + client.0: + force-branch: ceph-nautilus + rgw_server: client.1 + + To pass extra test arguments + + tasks: + - ceph: + - rgw: [client.0] + - s3readwrite: + client.0: + force-branch: ceph-nautilus + readwrite: + bucket: mybucket + readers: 10 + writers: 3 + duration: 600 + files: + num: 10 + size: 2000 + stddev: 500 + client.1: + ... + + To override s3 configuration + + tasks: + - ceph: + - rgw: [client.0] + - s3readwrite: + client.0: + force-branch: ceph-nautilus + s3: + user_id: myuserid + display_name: myname + email: my@email + access_key: myaccesskey + secret_key: mysecretkey + + """ + assert hasattr(ctx, 'rgw'), 's3readwrite must run after the rgw task' + assert config is None or isinstance(config, list) \ + or isinstance(config, dict), \ + "task s3readwrite only supports a list or dictionary for configuration" + all_clients = ['client.{id}'.format(id=id_) + for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')] + if config is None: + config = all_clients + if isinstance(config, list): + config = dict.fromkeys(config) + clients = config.keys() + + overrides = ctx.config.get('overrides', {}) + # merge each client section, not the top level. + for client in config.keys(): + if not config[client]: + config[client] = {} + teuthology.deep_merge(config[client], overrides.get('s3readwrite', {})) + + log.debug('in s3readwrite, config is %s', config) + + s3tests_conf = {} + for client in clients: + if config[client] is None: + config[client] = {} + config[client].setdefault('s3', {}) + config[client].setdefault('readwrite', {}) + endpoint = ctx.rgw.role_endpoints.get(client) + assert endpoint, 's3readwrite: no rgw endpoint for {}'.format(client) + + s3tests_conf[client] = ({ + 'DEFAULT': + { + 'port' : endpoint.port, + 'is_secure' : endpoint.cert is not None, + }, + 'readwrite' : config[client]['readwrite'], + 's3' : config[client]['s3'], + }) + + with contextutil.nested( + lambda: download(ctx=ctx, config=config), + lambda: create_users(ctx=ctx, config=dict( + clients=clients, + s3tests_conf=s3tests_conf, + )), + lambda: configure(ctx=ctx, config=dict( + clients=config, + s3tests_conf=s3tests_conf, + )), + lambda: run_tests(ctx=ctx, config=config), + ): + pass + yield diff --git a/qa/tasks/s3roundtrip.py b/qa/tasks/s3roundtrip.py new file mode 100644 index 00000000..cf6a9e86 --- /dev/null +++ b/qa/tasks/s3roundtrip.py @@ -0,0 +1,326 @@ +""" +Run rgw roundtrip message tests +""" +import base64 +import contextlib +import logging +import os +import random +import string +import yaml + +from teuthology import misc as teuthology +from teuthology import contextutil +from teuthology.config import config as teuth_config +from teuthology.orchestra import run +from teuthology.orchestra.connection import split_user + +log = logging.getLogger(__name__) + +@contextlib.contextmanager +def download(ctx, config): + """ + Download the s3 tests from the git builder. + Remove downloaded s3 file upon exit. + + The context passed in should be identical to the context + passed in to the main task. + """ + assert isinstance(config, dict) + log.info('Downloading s3-tests...') + testdir = teuthology.get_testdir(ctx) + for (client, client_config) in config.items(): + s3tests_branch = client_config.get('force-branch', None) + if not s3tests_branch: + raise ValueError( + "Could not determine what branch to use for s3-tests. Please add 'force-branch: {s3-tests branch name}' to the .yaml config for this s3roundtrip task.") + + log.info("Using branch '%s' for s3tests", s3tests_branch) + sha1 = client_config.get('sha1') + git_remote = client_config.get('git_remote', teuth_config.ceph_git_base_url) + ctx.cluster.only(client).run( + args=[ + 'git', 'clone', + '-b', s3tests_branch, + git_remote + 's3-tests.git', + '{tdir}/s3-tests'.format(tdir=testdir), + ], + ) + if sha1 is not None: + ctx.cluster.only(client).run( + args=[ + 'cd', '{tdir}/s3-tests'.format(tdir=testdir), + run.Raw('&&'), + 'git', 'reset', '--hard', sha1, + ], + ) + try: + yield + finally: + log.info('Removing s3-tests...') + testdir = teuthology.get_testdir(ctx) + for client in config: + ctx.cluster.only(client).run( + args=[ + 'rm', + '-rf', + '{tdir}/s3-tests'.format(tdir=testdir), + ], + ) + + +def _config_user(s3tests_conf, section, user): + """ + Configure users for this section by stashing away keys, ids, and + email addresses. + """ + s3tests_conf[section].setdefault('user_id', user) + s3tests_conf[section].setdefault('email', '{user}+test@test.test'.format(user=user)) + s3tests_conf[section].setdefault('display_name', 'Mr. {user}'.format(user=user)) + s3tests_conf[section].setdefault('access_key', ''.join(random.choice(string.ascii_uppercase) for i in range(20))) + s3tests_conf[section].setdefault('secret_key', base64.b64encode(os.urandom(40)).decode('ascii')) + +@contextlib.contextmanager +def create_users(ctx, config): + """ + Create a default s3 user. + """ + assert isinstance(config, dict) + log.info('Creating rgw users...') + testdir = teuthology.get_testdir(ctx) + users = {'s3': 'foo'} + for client in config['clients']: + s3tests_conf = config['s3tests_conf'][client] + s3tests_conf.setdefault('roundtrip', {}) + s3tests_conf['roundtrip'].setdefault('bucket', 'rttest-' + client + '-{random}-') + s3tests_conf['roundtrip'].setdefault('readers', 10) + s3tests_conf['roundtrip'].setdefault('writers', 3) + s3tests_conf['roundtrip'].setdefault('duration', 300) + s3tests_conf['roundtrip'].setdefault('files', {}) + rtconf = s3tests_conf['roundtrip'] + rtconf['files'].setdefault('num', 10) + rtconf['files'].setdefault('size', 2000) + rtconf['files'].setdefault('stddev', 500) + for section, user in [('s3', 'foo')]: + _config_user(s3tests_conf, section, '{user}.{client}'.format(user=user, client=client)) + ctx.cluster.only(client).run( + args=[ + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'radosgw-admin', + '-n', client, + 'user', 'create', + '--uid', s3tests_conf[section]['user_id'], + '--display-name', s3tests_conf[section]['display_name'], + '--access-key', s3tests_conf[section]['access_key'], + '--secret', s3tests_conf[section]['secret_key'], + '--email', s3tests_conf[section]['email'], + ], + ) + try: + yield + finally: + for client in config['clients']: + for user in users.values(): + uid = '{user}.{client}'.format(user=user, client=client) + ctx.cluster.only(client).run( + args=[ + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'radosgw-admin', + '-n', client, + 'user', 'rm', + '--uid', uid, + '--purge-data', + ], + ) + +@contextlib.contextmanager +def configure(ctx, config): + """ + Configure the s3-tests. This includes the running of the + bootstrap code and the updating of local conf files. + """ + assert isinstance(config, dict) + log.info('Configuring s3-roundtrip-tests...') + testdir = teuthology.get_testdir(ctx) + for client, properties in config['clients'].items(): + s3tests_conf = config['s3tests_conf'][client] + if properties is not None and 'rgw_server' in properties: + host = None + for target, roles in zip(ctx.config['targets'].keys(), ctx.config['roles']): + log.info('roles: ' + str(roles)) + log.info('target: ' + str(target)) + if properties['rgw_server'] in roles: + _, host = split_user(target) + assert host is not None, "Invalid client specified as the rgw_server" + s3tests_conf['s3']['host'] = host + else: + s3tests_conf['s3']['host'] = 'localhost' + + def_conf = s3tests_conf['DEFAULT'] + s3tests_conf['s3'].setdefault('port', def_conf['port']) + s3tests_conf['s3'].setdefault('is_secure', def_conf['is_secure']) + + (remote,) = ctx.cluster.only(client).remotes.keys() + remote.run( + args=[ + 'cd', + '{tdir}/s3-tests'.format(tdir=testdir), + run.Raw('&&'), + './bootstrap', + ], + ) + conf = dict( + s3=s3tests_conf['s3'], + roundtrip=s3tests_conf['roundtrip'], + ) + teuthology.write_file( + remote=remote, + path='{tdir}/archive/s3roundtrip.{client}.config.yaml'.format(tdir=testdir, client=client), + data=yaml.safe_dump(conf, default_flow_style=False)) + yield + + +@contextlib.contextmanager +def run_tests(ctx, config): + """ + Run the s3 roundtrip after everything is set up. + + :param ctx: Context passed to task + :param config: specific configuration information + """ + assert isinstance(config, dict) + testdir = teuthology.get_testdir(ctx) + for client, client_config in config.items(): + (remote,) = ctx.cluster.only(client).remotes.keys() + conf = teuthology.get_file(remote, '{tdir}/archive/s3roundtrip.{client}.config.yaml'.format(tdir=testdir, client=client)) + args = [ + '{tdir}/s3-tests/virtualenv/bin/s3tests-test-roundtrip'.format(tdir=testdir), + ] + if client_config is not None and 'extra_args' in client_config: + args.extend(client_config['extra_args']) + + ctx.cluster.only(client).run( + args=args, + stdin=conf, + ) + yield + + +@contextlib.contextmanager +def task(ctx, config): + """ + Run the s3tests-test-roundtrip suite against rgw. + + To run all tests on all clients:: + + tasks: + - ceph: + - rgw: + - s3roundtrip: + force-branch: ceph-nautilus + + To restrict testing to particular clients:: + + tasks: + - ceph: + - rgw: [client.0] + - s3roundtrip: + client.0: + force-branch: ceph-nautilus + + To run against a server on client.1:: + + tasks: + - ceph: + - rgw: [client.1] + - s3roundtrip: + client.0: + force-branch: ceph-nautilus + rgw_server: client.1 + + To pass extra test arguments + + tasks: + - ceph: + - rgw: [client.0] + - s3roundtrip: + client.0: + force-branch: ceph-nautilus + roundtrip: + bucket: mybucket + readers: 10 + writers: 3 + duration: 600 + files: + num: 10 + size: 2000 + stddev: 500 + client.1: + ... + + To override s3 configuration + + tasks: + - ceph: + - rgw: [client.0] + - s3roundtrip: + force-branch: ceph-nautilus + client.0: + s3: + user_id: myuserid + display_name: myname + email: my@email + access_key: myaccesskey + secret_key: mysecretkey + + """ + assert hasattr(ctx, 'rgw'), 's3roundtrip must run after the rgw task' + assert config is None or isinstance(config, list) \ + or isinstance(config, dict), \ + "task s3roundtrip only supports a list or dictionary for configuration" + all_clients = ['client.{id}'.format(id=id_) + for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')] + if config is None: + config = all_clients + if isinstance(config, list): + config = dict.fromkeys(config) + clients = config.keys() + + s3tests_conf = {} + for client in clients: + if config[client] is None: + config[client] = {} + config[client].setdefault('s3', {}) + config[client].setdefault('roundtrip', {}) + + endpoint = ctx.rgw.role_endpoints.get(client) + assert endpoint, 's3roundtrip: no rgw endpoint for {}'.format(client) + + s3tests_conf[client] = ({ + 'DEFAULT': + { + 'port' : endpoint.port, + 'is_secure' : endpoint.cert is not None, + }, + 'roundtrip' : config[client]['roundtrip'], + 's3' : config[client]['s3'], + }) + + with contextutil.nested( + lambda: download(ctx=ctx, config=config), + lambda: create_users(ctx=ctx, config=dict( + clients=clients, + s3tests_conf=s3tests_conf, + )), + lambda: configure(ctx=ctx, config=dict( + clients=config, + s3tests_conf=s3tests_conf, + )), + lambda: run_tests(ctx=ctx, config=config), + ): + pass + yield diff --git a/qa/tasks/s3tests.py b/qa/tasks/s3tests.py new file mode 100644 index 00000000..1b88ec74 --- /dev/null +++ b/qa/tasks/s3tests.py @@ -0,0 +1,424 @@ +""" +Run a set of s3 tests on rgw. +""" +from io import BytesIO +from configobj import ConfigObj +import base64 +import contextlib +import logging +import os +import random +import six +import string + +from teuthology import misc as teuthology +from teuthology import contextutil +from teuthology.config import config as teuth_config +from teuthology.orchestra import run +from teuthology.orchestra.connection import split_user + +log = logging.getLogger(__name__) + +@contextlib.contextmanager +def download(ctx, config): + """ + Download the s3 tests from the git builder. + Remove downloaded s3 file upon exit. + + The context passed in should be identical to the context + passed in to the main task. + """ + assert isinstance(config, dict) + log.info('Downloading s3-tests...') + testdir = teuthology.get_testdir(ctx) + for (client, client_config) in config.items(): + s3tests_branch = client_config.get('force-branch', None) + if not s3tests_branch: + raise ValueError( + "Could not determine what branch to use for s3-tests. Please add 'force-branch: {s3-tests branch name}' to the .yaml config for this s3tests task.") + + log.info("Using branch '%s' for s3tests", s3tests_branch) + sha1 = client_config.get('sha1') + git_remote = client_config.get('git_remote', teuth_config.ceph_git_base_url) + ctx.cluster.only(client).run( + args=[ + 'git', 'clone', + '-b', s3tests_branch, + git_remote + 's3-tests.git', + '{tdir}/s3-tests'.format(tdir=testdir), + ], + ) + if sha1 is not None: + ctx.cluster.only(client).run( + args=[ + 'cd', '{tdir}/s3-tests'.format(tdir=testdir), + run.Raw('&&'), + 'git', 'reset', '--hard', sha1, + ], + ) + try: + yield + finally: + log.info('Removing s3-tests...') + testdir = teuthology.get_testdir(ctx) + for client in config: + ctx.cluster.only(client).run( + args=[ + 'rm', + '-rf', + '{tdir}/s3-tests'.format(tdir=testdir), + ], + ) + + +def _config_user(s3tests_conf, section, user): + """ + Configure users for this section by stashing away keys, ids, and + email addresses. + """ + s3tests_conf[section].setdefault('user_id', user) + s3tests_conf[section].setdefault('email', '{user}+test@test.test'.format(user=user)) + s3tests_conf[section].setdefault('display_name', 'Mr. {user}'.format(user=user)) + s3tests_conf[section].setdefault('access_key', + ''.join(random.choice(string.ascii_uppercase) for i in range(20))) + s3tests_conf[section].setdefault('secret_key', + six.ensure_str(base64.b64encode(os.urandom(40)))) + s3tests_conf[section].setdefault('totp_serial', + ''.join(random.choice(string.digits) for i in range(10))) + s3tests_conf[section].setdefault('totp_seed', + six.ensure_str(base64.b32encode(os.urandom(40)))) + s3tests_conf[section].setdefault('totp_seconds', '5') + + +@contextlib.contextmanager +def create_users(ctx, config): + """ + Create a main and an alternate s3 user. + """ + assert isinstance(config, dict) + log.info('Creating rgw users...') + testdir = teuthology.get_testdir(ctx) + users = {'s3 main': 'foo', 's3 alt': 'bar', 's3 tenant': 'testx$tenanteduser'} + for client in config['clients']: + s3tests_conf = config['s3tests_conf'][client] + s3tests_conf.setdefault('fixtures', {}) + s3tests_conf['fixtures'].setdefault('bucket prefix', 'test-' + client + '-{random}-') + for section, user in users.items(): + _config_user(s3tests_conf, section, '{user}.{client}'.format(user=user, client=client)) + log.debug('Creating user {user} on {host}'.format(user=s3tests_conf[section]['user_id'], host=client)) + cluster_name, daemon_type, client_id = teuthology.split_role(client) + client_with_id = daemon_type + '.' + client_id + ctx.cluster.only(client).run( + args=[ + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'radosgw-admin', + '-n', client_with_id, + 'user', 'create', + '--uid', s3tests_conf[section]['user_id'], + '--display-name', s3tests_conf[section]['display_name'], + '--access-key', s3tests_conf[section]['access_key'], + '--secret', s3tests_conf[section]['secret_key'], + '--email', s3tests_conf[section]['email'], + '--cluster', cluster_name, + ], + ) + ctx.cluster.only(client).run( + args=[ + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'radosgw-admin', + '-n', client_with_id, + 'mfa', 'create', + '--uid', s3tests_conf[section]['user_id'], + '--totp-serial', s3tests_conf[section]['totp_serial'], + '--totp-seed', s3tests_conf[section]['totp_seed'], + '--totp-seconds', s3tests_conf[section]['totp_seconds'], + '--totp-window', '8', + '--totp-seed-type', 'base32', + '--cluster', cluster_name, + ], + ) + try: + yield + finally: + for client in config['clients']: + for user in users.values(): + uid = '{user}.{client}'.format(user=user, client=client) + cluster_name, daemon_type, client_id = teuthology.split_role(client) + client_with_id = daemon_type + '.' + client_id + ctx.cluster.only(client).run( + args=[ + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'radosgw-admin', + '-n', client_with_id, + 'user', 'rm', + '--uid', uid, + '--purge-data', + '--cluster', cluster_name, + ], + ) + + +@contextlib.contextmanager +def configure(ctx, config): + """ + Configure the s3-tests. This includes the running of the + bootstrap code and the updating of local conf files. + """ + assert isinstance(config, dict) + log.info('Configuring s3-tests...') + testdir = teuthology.get_testdir(ctx) + for client, properties in config['clients'].items(): + s3tests_conf = config['s3tests_conf'][client] + if properties is not None and 'rgw_server' in properties: + host = None + for target, roles in zip(ctx.config['targets'].keys(), ctx.config['roles']): + log.info('roles: ' + str(roles)) + log.info('target: ' + str(target)) + if properties['rgw_server'] in roles: + _, host = split_user(target) + assert host is not None, "Invalid client specified as the rgw_server" + s3tests_conf['DEFAULT']['host'] = host + else: + s3tests_conf['DEFAULT']['host'] = 'localhost' + + if properties is not None and 'slow_backend' in properties: + s3tests_conf['fixtures']['slow backend'] = properties['slow_backend'] + + (remote,) = ctx.cluster.only(client).remotes.keys() + remote.run( + args=[ + 'cd', + '{tdir}/s3-tests'.format(tdir=testdir), + run.Raw('&&'), + './bootstrap', + ], + ) + conf_fp = BytesIO() + s3tests_conf.write(conf_fp) + teuthology.write_file( + remote=remote, + path='{tdir}/archive/s3-tests.{client}.conf'.format(tdir=testdir, client=client), + data=conf_fp.getvalue(), + ) + + log.info('Configuring boto...') + boto_src = os.path.join(os.path.dirname(__file__), 'boto.cfg.template') + for client, properties in config['clients'].items(): + with open(boto_src, 'rb') as f: + (remote,) = ctx.cluster.only(client).remotes.keys() + conf = six.ensure_str(f.read()).format( + idle_timeout=config.get('idle_timeout', 30) + ) + teuthology.write_file( + remote=remote, + path='{tdir}/boto.cfg'.format(tdir=testdir), + data=six.ensure_binary(conf), + ) + + try: + yield + + finally: + log.info('Cleaning up boto...') + for client, properties in config['clients'].items(): + (remote,) = ctx.cluster.only(client).remotes.keys() + remote.run( + args=[ + 'rm', + '{tdir}/boto.cfg'.format(tdir=testdir), + ], + ) + +@contextlib.contextmanager +def run_tests(ctx, config): + """ + Run the s3tests after everything is set up. + + :param ctx: Context passed to task + :param config: specific configuration information + """ + assert isinstance(config, dict) + testdir = teuthology.get_testdir(ctx) + # civetweb > 1.8 && beast parsers are strict on rfc2616 + attrs = ["!fails_on_rgw", "!lifecycle_expiration", "!fails_strict_rfc2616"] + for client, client_config in config.items(): + (remote,) = ctx.cluster.only(client).remotes.keys() + args = [ + 'S3TEST_CONF={tdir}/archive/s3-tests.{client}.conf'.format(tdir=testdir, client=client), + 'BOTO_CONFIG={tdir}/boto.cfg'.format(tdir=testdir) + ] + # the 'requests' library comes with its own ca bundle to verify ssl + # certificates - override that to use the system's ca bundle, which + # is where the ssl task installed this certificate + if remote.os.package_type == 'deb': + args += ['REQUESTS_CA_BUNDLE=/etc/ssl/certs/ca-certificates.crt'] + else: + args += ['REQUESTS_CA_BUNDLE=/etc/pki/tls/certs/ca-bundle.crt'] + args += [ + '{tdir}/s3-tests/virtualenv/bin/nosetests'.format(tdir=testdir), + '-w', + '{tdir}/s3-tests'.format(tdir=testdir), + '-v', + '-a', ','.join(attrs), + ] + if client_config is not None and 'extra_args' in client_config: + args.extend(client_config['extra_args']) + + remote.run( + args=args, + label="s3 tests against rgw" + ) + yield + +@contextlib.contextmanager +def scan_for_leaked_encryption_keys(ctx, config): + """ + Scan radosgw logs for the encryption keys used by s3tests to + verify that we're not leaking secrets. + + :param ctx: Context passed to task + :param config: specific configuration information + """ + assert isinstance(config, dict) + + try: + yield + finally: + # x-amz-server-side-encryption-customer-key + s3test_customer_key = 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=' + + log.debug('Scanning radosgw logs for leaked encryption keys...') + procs = list() + for client, client_config in config.items(): + if not client_config.get('scan_for_encryption_keys', True): + continue + cluster_name, daemon_type, client_id = teuthology.split_role(client) + client_with_cluster = '.'.join((cluster_name, daemon_type, client_id)) + (remote,) = ctx.cluster.only(client).remotes.keys() + proc = remote.run( + args=[ + 'grep', + '--binary-files=text', + s3test_customer_key, + '/var/log/ceph/rgw.{client}.log'.format(client=client_with_cluster), + ], + wait=False, + check_status=False, + ) + procs.append(proc) + + for proc in procs: + proc.wait() + if proc.returncode == 1: # 1 means no matches + continue + log.error('radosgw log is leaking encryption keys!') + raise Exception('radosgw log is leaking encryption keys') + +@contextlib.contextmanager +def task(ctx, config): + """ + Run the s3-tests suite against rgw. + + To run all tests on all clients:: + + tasks: + - ceph: + - rgw: + - s3tests: + + To restrict testing to particular clients:: + + tasks: + - ceph: + - rgw: [client.0] + - s3tests: + client.0: + force-branch: ceph-nautilus + + To run against a server on client.1 and increase the boto timeout to 10m:: + + tasks: + - ceph: + - rgw: [client.1] + - s3tests: + client.0: + force-branch: ceph-nautilus + rgw_server: client.1 + idle_timeout: 600 + + To pass extra arguments to nose (e.g. to run a certain test):: + + tasks: + - ceph: + - rgw: [client.0] + - s3tests: + client.0: + force-branch: ceph-nautilus + extra_args: ['test_s3:test_object_acl_grand_public_read'] + client.1: + force-branch: ceph-nautilus + extra_args: ['--exclude', 'test_100_continue'] + """ + assert hasattr(ctx, 'rgw'), 's3tests must run after the rgw task' + assert config is None or isinstance(config, list) \ + or isinstance(config, dict), \ + "task s3tests only supports a list or dictionary for configuration" + all_clients = ['client.{id}'.format(id=id_) + for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')] + if config is None: + config = all_clients + if isinstance(config, list): + config = dict.fromkeys(config) + clients = config.keys() + + overrides = ctx.config.get('overrides', {}) + # merge each client section, not the top level. + for client in config.keys(): + if not config[client]: + config[client] = {} + teuthology.deep_merge(config[client], overrides.get('s3tests', {})) + + log.debug('s3tests config is %s', config) + + s3tests_conf = {} + for client in clients: + endpoint = ctx.rgw.role_endpoints.get(client) + assert endpoint, 's3tests: no rgw endpoint for {}'.format(client) + + s3tests_conf[client] = ConfigObj( + indent_type='', + infile={ + 'DEFAULT': + { + 'port' : endpoint.port, + 'is_secure' : endpoint.cert is not None, + 'api_name' : 'default', + }, + 'fixtures' : {}, + 's3 main' : {}, + 's3 alt' : {}, + 's3 tenant': {}, + } + ) + + with contextutil.nested( + lambda: download(ctx=ctx, config=config), + lambda: create_users(ctx=ctx, config=dict( + clients=clients, + s3tests_conf=s3tests_conf, + )), + lambda: configure(ctx=ctx, config=dict( + clients=config, + s3tests_conf=s3tests_conf, + )), + lambda: run_tests(ctx=ctx, config=config), + lambda: scan_for_leaked_encryption_keys(ctx=ctx, config=config), + ): + pass + yield diff --git a/qa/tasks/samba.py b/qa/tasks/samba.py new file mode 100644 index 00000000..1dd62d86 --- /dev/null +++ b/qa/tasks/samba.py @@ -0,0 +1,247 @@ +""" +Samba +""" +import contextlib +import logging +import sys +import time + +import six + +from teuthology import misc as teuthology +from teuthology.orchestra import run +from teuthology.orchestra.daemon import DaemonGroup + +log = logging.getLogger(__name__) + + +def get_sambas(ctx, roles): + """ + Scan for roles that are samba. Yield the id of the the samba role + (samba.0, samba.1...) and the associated remote site + + :param ctx: Context + :param roles: roles for this test (extracted from yaml files) + """ + for role in roles: + assert isinstance(role, six.string_types) + PREFIX = 'samba.' + assert role.startswith(PREFIX) + id_ = role[len(PREFIX):] + (remote,) = ctx.cluster.only(role).remotes.keys() + yield (id_, remote) + + +@contextlib.contextmanager +def task(ctx, config): + """ + Setup samba smbd with ceph vfs module. This task assumes the samba + package has already been installed via the install task. + + The config is optional and defaults to starting samba on all nodes. + If a config is given, it is expected to be a list of + samba nodes to start smbd servers on. + + Example that starts smbd on all samba nodes:: + + tasks: + - install: + - install: + project: samba + extra_packages: ['samba'] + - ceph: + - samba: + - interactive: + + Example that starts smbd on just one of the samba nodes and cifs on the other:: + + tasks: + - samba: [samba.0] + - cifs: [samba.1] + + An optional backend can be specified, and requires a path which smbd will + use as the backend storage location: + + roles: + - [osd.0, osd.1, osd.2, mon.0, mon.1, mon.2, mds.a] + - [client.0, samba.0] + + tasks: + - ceph: + - ceph-fuse: [client.0] + - samba: + samba.0: + cephfuse: "{testdir}/mnt.0" + + This mounts ceph to {testdir}/mnt.0 using fuse, and starts smbd with + a UNC of //localhost/cephfuse. Access through that UNC will be on + the ceph fuse mount point. + + If no arguments are specified in the samba + role, the default behavior is to enable the ceph UNC //localhost/ceph + and use the ceph vfs module as the smbd backend. + + :param ctx: Context + :param config: Configuration + """ + log.info("Setting up smbd with ceph vfs...") + assert config is None or isinstance(config, list) or isinstance(config, dict), \ + "task samba got invalid config" + + if config is None: + config = dict(('samba.{id}'.format(id=id_), None) + for id_ in teuthology.all_roles_of_type(ctx.cluster, 'samba')) + elif isinstance(config, list): + config = dict((name, None) for name in config) + + samba_servers = list(get_sambas(ctx=ctx, roles=config.keys())) + + testdir = teuthology.get_testdir(ctx) + + if not hasattr(ctx, 'daemons'): + ctx.daemons = DaemonGroup() + + for id_, remote in samba_servers: + + rolestr = "samba.{id_}".format(id_=id_) + + confextras = """vfs objects = ceph + ceph:config_file = /etc/ceph/ceph.conf""" + + unc = "ceph" + backend = "/" + + if config[rolestr] is not None: + # verify that there's just one parameter in role + if len(config[rolestr]) != 1: + log.error("samba config for role samba.{id_} must have only one parameter".format(id_=id_)) + raise Exception('invalid config') + confextras = "" + (unc, backendstr) = config[rolestr].items()[0] + backend = backendstr.format(testdir=testdir) + + # on first samba role, set ownership and permissions of ceph root + # so that samba tests succeed + if config[rolestr] is None and id_ == samba_servers[0][0]: + remote.run( + args=[ + 'mkdir', '-p', '/tmp/cmnt', run.Raw('&&'), + 'sudo', 'ceph-fuse', '/tmp/cmnt', run.Raw('&&'), + 'sudo', 'chown', 'ubuntu:ubuntu', '/tmp/cmnt/', run.Raw('&&'), + 'sudo', 'chmod', '1777', '/tmp/cmnt/', run.Raw('&&'), + 'sudo', 'umount', '/tmp/cmnt/', run.Raw('&&'), + 'rm', '-rf', '/tmp/cmnt', + ], + ) + else: + remote.run( + args=[ + 'sudo', 'chown', 'ubuntu:ubuntu', backend, run.Raw('&&'), + 'sudo', 'chmod', '1777', backend, + ], + ) + + teuthology.sudo_write_file(remote, "/usr/local/samba/etc/smb.conf", """ +[global] + workgroup = WORKGROUP + netbios name = DOMAIN + +[{unc}] + path = {backend} + {extras} + writeable = yes + valid users = ubuntu +""".format(extras=confextras, unc=unc, backend=backend)) + + # create ubuntu user + remote.run( + args=[ + 'sudo', '/usr/local/samba/bin/smbpasswd', '-e', 'ubuntu', + run.Raw('||'), + 'printf', run.Raw('"ubuntu\nubuntu\n"'), + run.Raw('|'), + 'sudo', '/usr/local/samba/bin/smbpasswd', '-s', '-a', 'ubuntu' + ]) + + smbd_cmd = [ + 'sudo', + 'daemon-helper', + 'term', + 'nostdin', + '/usr/local/samba/sbin/smbd', + '-F', + ] + ctx.daemons.add_daemon(remote, 'smbd', id_, + args=smbd_cmd, + logger=log.getChild("smbd.{id_}".format(id_=id_)), + stdin=run.PIPE, + wait=False, + ) + + # let smbd initialize, probably a better way... + seconds_to_sleep = 100 + log.info('Sleeping for %s seconds...' % seconds_to_sleep) + time.sleep(seconds_to_sleep) + log.info('Sleeping stopped...') + + try: + yield + finally: + log.info('Stopping smbd processes...') + exc_info = (None, None, None) + for d in ctx.daemons.iter_daemons_of_role('smbd'): + try: + d.stop() + except (run.CommandFailedError, + run.CommandCrashedError, + run.ConnectionLostError): + exc_info = sys.exc_info() + log.exception('Saw exception from %s.%s', d.role, d.id_) + if exc_info != (None, None, None): + six.reraise(exc_info[0], exc_info[1], exc_info[2]) + + for id_, remote in samba_servers: + remote.run( + args=[ + 'sudo', + 'rm', '-rf', + '/usr/local/samba/etc/smb.conf', + '/usr/local/samba/private/*', + '/usr/local/samba/var/run/', + '/usr/local/samba/var/locks', + '/usr/local/samba/var/lock', + ], + ) + # make sure daemons are gone + try: + remote.run( + args=[ + 'while', + 'sudo', 'killall', '-9', 'smbd', + run.Raw(';'), + 'do', 'sleep', '1', + run.Raw(';'), + 'done', + ], + ) + + remote.run( + args=[ + 'sudo', + 'lsof', + backend, + ], + check_status=False + ) + remote.run( + args=[ + 'sudo', + 'fuser', + '-M', + backend, + ], + check_status=False + ) + except Exception: + log.exception("Saw exception") + pass diff --git a/qa/tasks/scrub.py b/qa/tasks/scrub.py new file mode 100644 index 00000000..7cf304e8 --- /dev/null +++ b/qa/tasks/scrub.py @@ -0,0 +1,117 @@ +""" +Scrub osds +""" +import contextlib +import gevent +import logging +import random +import time + +import tasks.ceph_manager +from teuthology import misc as teuthology + +log = logging.getLogger(__name__) + +@contextlib.contextmanager +def task(ctx, config): + """ + Run scrub periodically. Randomly chooses an OSD to scrub. + + The config should be as follows: + + scrub: + frequency: + deep: + + example: + + tasks: + - ceph: + - scrub: + frequency: 30 + deep: 0 + """ + if config is None: + config = {} + assert isinstance(config, dict), \ + 'scrub task only accepts a dict for configuration' + + log.info('Beginning scrub...') + + first_mon = teuthology.get_first_mon(ctx, config) + (mon,) = ctx.cluster.only(first_mon).remotes.keys() + + manager = ceph_manager.CephManager( + mon, + ctx=ctx, + logger=log.getChild('ceph_manager'), + ) + + num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd') + while len(manager.get_osd_status()['up']) < num_osds: + time.sleep(10) + + scrub_proc = Scrubber( + manager, + config, + ) + try: + yield + finally: + log.info('joining scrub') + scrub_proc.do_join() + +class Scrubber: + """ + Scrubbing is actually performed during initialization + """ + def __init__(self, manager, config): + """ + Spawn scrubbing thread upon completion. + """ + self.ceph_manager = manager + self.ceph_manager.wait_for_clean() + + osd_status = self.ceph_manager.get_osd_status() + self.osds = osd_status['up'] + + self.config = config + if self.config is None: + self.config = dict() + + else: + def tmp(x): + """Local display""" + print(x) + self.log = tmp + + self.stopping = False + + log.info("spawning thread") + + self.thread = gevent.spawn(self.do_scrub) + + def do_join(self): + """Scrubbing thread finished""" + self.stopping = True + self.thread.get() + + def do_scrub(self): + """Perform the scrub operation""" + frequency = self.config.get("frequency", 30) + deep = self.config.get("deep", 0) + + log.info("stopping %s" % self.stopping) + + while not self.stopping: + osd = str(random.choice(self.osds)) + + if deep: + cmd = 'deep-scrub' + else: + cmd = 'scrub' + + log.info('%sbing %s' % (cmd, osd)) + self.ceph_manager.raw_cluster_cmd('osd', cmd, osd) + + time.sleep(frequency) diff --git a/qa/tasks/scrub_test.py b/qa/tasks/scrub_test.py new file mode 100644 index 00000000..3d71708e --- /dev/null +++ b/qa/tasks/scrub_test.py @@ -0,0 +1,403 @@ +"""Scrub testing""" + +import contextlib +import json +import logging +import os +import time +import tempfile + +from tasks import ceph_manager +from teuthology import misc as teuthology + +log = logging.getLogger(__name__) + + +def wait_for_victim_pg(manager): + """Return a PG with some data and its acting set""" + # wait for some PG to have data that we can mess with + victim = None + while victim is None: + stats = manager.get_pg_stats() + for pg in stats: + size = pg['stat_sum']['num_bytes'] + if size > 0: + victim = pg['pgid'] + acting = pg['acting'] + return victim, acting + time.sleep(3) + + +def find_victim_object(ctx, pg, osd): + """Return a file to be fuzzed""" + (osd_remote,) = ctx.cluster.only('osd.%d' % osd).remotes.keys() + data_path = os.path.join( + '/var/lib/ceph/osd', + 'ceph-{id}'.format(id=osd), + 'fuse', + '{pg}_head'.format(pg=pg), + 'all', + ) + + # fuzz time + ls_out = osd_remote.sh('sudo ls %s' % data_path) + + # find an object file we can mess with (and not the pg info object) + osdfilename = next(line for line in ls_out.split('\n') + if not line.endswith('::::head#')) + assert osdfilename is not None + + # Get actual object name from osd stored filename + objname = osdfilename.split(':')[4] + return osd_remote, os.path.join(data_path, osdfilename), objname + + +def corrupt_file(osd_remote, path): + # put a single \0 at the beginning of the file + osd_remote.run( + args=['sudo', 'dd', + 'if=/dev/zero', + 'of=%s/data' % path, + 'bs=1', 'count=1', 'conv=notrunc'] + ) + + +def get_pgnum(pgid): + pos = pgid.find('.') + assert pos != -1 + return pgid[pos+1:] + + +def deep_scrub(manager, victim, pool): + # scrub, verify inconsistent + pgnum = get_pgnum(victim) + manager.do_pg_scrub(pool, pgnum, 'deep-scrub') + + stats = manager.get_single_pg_stats(victim) + inconsistent = stats['state'].find('+inconsistent') != -1 + assert inconsistent + + +def repair(manager, victim, pool): + # repair, verify no longer inconsistent + pgnum = get_pgnum(victim) + manager.do_pg_scrub(pool, pgnum, 'repair') + + stats = manager.get_single_pg_stats(victim) + inconsistent = stats['state'].find('+inconsistent') != -1 + assert not inconsistent + + +def test_repair_corrupted_obj(ctx, manager, pg, osd_remote, obj_path, pool): + corrupt_file(osd_remote, obj_path) + deep_scrub(manager, pg, pool) + repair(manager, pg, pool) + + +def test_repair_bad_omap(ctx, manager, pg, osd, objname): + # Test deep-scrub with various omap modifications + # Modify omap on specific osd + log.info('fuzzing omap of %s' % objname) + manager.osd_admin_socket(osd, ['rmomapkey', 'rbd', objname, 'key']) + manager.osd_admin_socket(osd, ['setomapval', 'rbd', objname, + 'badkey', 'badval']) + manager.osd_admin_socket(osd, ['setomapheader', 'rbd', objname, 'badhdr']) + + deep_scrub(manager, pg, 'rbd') + # please note, the repair here is errnomous, it rewrites the correct omap + # digest and data digest on the replicas with the corresponding digests + # from the primary osd which is hosting the victim object, see + # find_victim_object(). + # so we need to either put this test and the end of this task or + # undo the mess-up manually before the "repair()" that just ensures + # the cleanup is sane, otherwise the succeeding tests will fail. if they + # try set "badkey" in hope to get an "inconsistent" pg with a deep-scrub. + manager.osd_admin_socket(osd, ['setomapheader', 'rbd', objname, 'hdr']) + manager.osd_admin_socket(osd, ['rmomapkey', 'rbd', objname, 'badkey']) + manager.osd_admin_socket(osd, ['setomapval', 'rbd', objname, + 'key', 'val']) + repair(manager, pg, 'rbd') + + +class MessUp: + def __init__(self, manager, osd_remote, pool, osd_id, + obj_name, obj_path, omap_key, omap_val): + self.manager = manager + self.osd = osd_remote + self.pool = pool + self.osd_id = osd_id + self.obj = obj_name + self.path = obj_path + self.omap_key = omap_key + self.omap_val = omap_val + + @contextlib.contextmanager + def _test_with_file(self, messup_cmd, *checks): + temp = tempfile.mktemp() + backup_cmd = ['sudo', 'cp', os.path.join(self.path, 'data'), temp] + self.osd.run(args=backup_cmd) + self.osd.run(args=messup_cmd.split()) + yield checks + create_cmd = ['sudo', 'mkdir', self.path] + self.osd.run(args=create_cmd, check_status=False) + restore_cmd = ['sudo', 'cp', temp, os.path.join(self.path, 'data')] + self.osd.run(args=restore_cmd) + + def remove(self): + cmd = 'sudo rmdir {path}'.format(path=self.path) + return self._test_with_file(cmd, 'missing') + + def append(self): + cmd = 'sudo dd if=/dev/zero of={path}/data bs=1 count=1 ' \ + 'conv=notrunc oflag=append'.format(path=self.path) + return self._test_with_file(cmd, + 'data_digest_mismatch', + 'size_mismatch') + + def truncate(self): + cmd = 'sudo dd if=/dev/null of={path}/data'.format(path=self.path) + return self._test_with_file(cmd, + 'data_digest_mismatch', + 'size_mismatch') + + def change_obj(self): + cmd = 'sudo dd if=/dev/zero of={path}/data bs=1 count=1 ' \ + 'conv=notrunc'.format(path=self.path) + return self._test_with_file(cmd, + 'data_digest_mismatch') + + @contextlib.contextmanager + def rm_omap(self): + cmd = ['rmomapkey', self.pool, self.obj, self.omap_key] + self.manager.osd_admin_socket(self.osd_id, cmd) + yield ('omap_digest_mismatch',) + cmd = ['setomapval', self.pool, self.obj, + self.omap_key, self.omap_val] + self.manager.osd_admin_socket(self.osd_id, cmd) + + @contextlib.contextmanager + def add_omap(self): + cmd = ['setomapval', self.pool, self.obj, 'badkey', 'badval'] + self.manager.osd_admin_socket(self.osd_id, cmd) + yield ('omap_digest_mismatch',) + cmd = ['rmomapkey', self.pool, self.obj, 'badkey'] + self.manager.osd_admin_socket(self.osd_id, cmd) + + @contextlib.contextmanager + def change_omap(self): + cmd = ['setomapval', self.pool, self.obj, self.omap_key, 'badval'] + self.manager.osd_admin_socket(self.osd_id, cmd) + yield ('omap_digest_mismatch',) + cmd = ['setomapval', self.pool, self.obj, self.omap_key, self.omap_val] + self.manager.osd_admin_socket(self.osd_id, cmd) + + +class InconsistentObjChecker: + """Check the returned inconsistents/inconsistent info""" + + def __init__(self, osd, acting, obj_name): + self.osd = osd + self.acting = acting + self.obj = obj_name + assert self.osd in self.acting + + def basic_checks(self, inc): + assert inc['object']['name'] == self.obj + assert inc['object']['snap'] == "head" + assert len(inc['shards']) == len(self.acting), \ + "the number of returned shard does not match with the acting set" + + def run(self, check, inc): + func = getattr(self, check) + func(inc) + + def _check_errors(self, inc, err_name): + bad_found = False + good_found = False + for shard in inc['shards']: + log.info('shard = %r' % shard) + log.info('err = %s' % err_name) + assert 'osd' in shard + osd = shard['osd'] + err = err_name in shard['errors'] + if osd == self.osd: + assert bad_found is False, \ + "multiple entries found for the given OSD" + assert err is True, \ + "Didn't find '{err}' in errors".format(err=err_name) + bad_found = True + else: + assert osd in self.acting, "shard not in acting set" + assert err is False, \ + "Expected '{err}' in errors".format(err=err_name) + good_found = True + assert bad_found is True, \ + "Shard for osd.{osd} not found".format(osd=self.osd) + assert good_found is True, \ + "No other acting shards found" + + def _check_attrs(self, inc, attr_name): + bad_attr = None + good_attr = None + for shard in inc['shards']: + log.info('shard = %r' % shard) + log.info('attr = %s' % attr_name) + assert 'osd' in shard + osd = shard['osd'] + attr = shard.get(attr_name, False) + if osd == self.osd: + assert bad_attr is None, \ + "multiple entries found for the given OSD" + bad_attr = attr + else: + assert osd in self.acting, "shard not in acting set" + assert good_attr is None or good_attr == attr, \ + "multiple good attrs found" + good_attr = attr + assert bad_attr is not None, \ + "bad {attr} not found".format(attr=attr_name) + assert good_attr is not None, \ + "good {attr} not found".format(attr=attr_name) + assert good_attr != bad_attr, \ + "bad attr is identical to the good ones: " \ + "{0} == {1}".format(good_attr, bad_attr) + + def data_digest_mismatch(self, inc): + assert 'data_digest_mismatch' in inc['errors'] + self._check_attrs(inc, 'data_digest') + + def missing(self, inc): + assert 'missing' in inc['union_shard_errors'] + self._check_errors(inc, 'missing') + + def size_mismatch(self, inc): + assert 'size_mismatch' in inc['errors'] + self._check_attrs(inc, 'size') + + def omap_digest_mismatch(self, inc): + assert 'omap_digest_mismatch' in inc['errors'] + self._check_attrs(inc, 'omap_digest') + + +def test_list_inconsistent_obj(ctx, manager, osd_remote, pg, acting, osd_id, + obj_name, obj_path): + mon = manager.controller + pool = 'rbd' + omap_key = 'key' + omap_val = 'val' + manager.do_rados(mon, ['-p', pool, 'setomapval', obj_name, + omap_key, omap_val]) + # Update missing digests, requires "osd deep scrub update digest min age: 0" + pgnum = get_pgnum(pg) + manager.do_pg_scrub(pool, pgnum, 'deep-scrub') + + messup = MessUp(manager, osd_remote, pool, osd_id, obj_name, obj_path, + omap_key, omap_val) + for test in [messup.rm_omap, messup.add_omap, messup.change_omap, + messup.append, messup.truncate, messup.change_obj, + messup.remove]: + with test() as checks: + deep_scrub(manager, pg, pool) + cmd = 'rados list-inconsistent-pg {pool} ' \ + '--format=json'.format(pool=pool) + pgs = json.loads(mon.sh(cmd)) + assert pgs == [pg] + + cmd = 'rados list-inconsistent-obj {pg} ' \ + '--format=json'.format(pg=pg) + objs = json.loads(mon.sh(cmd)) + assert len(objs['inconsistents']) == 1 + + checker = InconsistentObjChecker(osd_id, acting, obj_name) + inc_obj = objs['inconsistents'][0] + log.info('inc = %r', inc_obj) + checker.basic_checks(inc_obj) + for check in checks: + checker.run(check, inc_obj) + + +def task(ctx, config): + """ + Test [deep] scrub + + tasks: + - chef: + - install: + - ceph: + log-whitelist: + - '!= data_digest' + - '!= omap_digest' + - '!= size' + - deep-scrub 0 missing, 1 inconsistent objects + - deep-scrub [0-9]+ errors + - repair 0 missing, 1 inconsistent objects + - repair [0-9]+ errors, [0-9]+ fixed + - shard [0-9]+ .* : missing + - deep-scrub 1 missing, 1 inconsistent objects + - does not match object info size + - attr name mistmatch + - deep-scrub 1 missing, 0 inconsistent objects + - failed to pick suitable auth object + - candidate size [0-9]+ info size [0-9]+ mismatch + conf: + osd: + osd deep scrub update digest min age: 0 + - scrub_test: + """ + if config is None: + config = {} + assert isinstance(config, dict), \ + 'scrub_test task only accepts a dict for configuration' + first_mon = teuthology.get_first_mon(ctx, config) + (mon,) = ctx.cluster.only(first_mon).remotes.keys() + + num_osds = teuthology.num_instances_of_type(ctx.cluster, 'osd') + log.info('num_osds is %s' % num_osds) + + manager = ceph_manager.CephManager( + mon, + ctx=ctx, + logger=log.getChild('ceph_manager'), + ) + + while len(manager.get_osd_status()['up']) < num_osds: + time.sleep(10) + + for i in range(num_osds): + manager.raw_cluster_cmd('tell', 'osd.%d' % i, 'injectargs', + '--', '--osd-objectstore-fuse') + manager.flush_pg_stats(range(num_osds)) + manager.wait_for_clean() + + # write some data + p = manager.do_rados(mon, ['-p', 'rbd', 'bench', '--no-cleanup', '1', + 'write', '-b', '4096']) + log.info('err is %d' % p.exitstatus) + + # wait for some PG to have data that we can mess with + pg, acting = wait_for_victim_pg(manager) + osd = acting[0] + + osd_remote, obj_path, obj_name = find_victim_object(ctx, pg, osd) + manager.do_rados(mon, ['-p', 'rbd', 'setomapval', obj_name, 'key', 'val']) + log.info('err is %d' % p.exitstatus) + manager.do_rados(mon, ['-p', 'rbd', 'setomapheader', obj_name, 'hdr']) + log.info('err is %d' % p.exitstatus) + + # Update missing digests, requires "osd deep scrub update digest min age: 0" + pgnum = get_pgnum(pg) + manager.do_pg_scrub('rbd', pgnum, 'deep-scrub') + + log.info('messing with PG %s on osd %d' % (pg, osd)) + test_repair_corrupted_obj(ctx, manager, pg, osd_remote, obj_path, 'rbd') + test_repair_bad_omap(ctx, manager, pg, osd, obj_name) + test_list_inconsistent_obj(ctx, manager, osd_remote, pg, acting, osd, + obj_name, obj_path) + log.info('test successful!') + + # shut down fuse mount + for i in range(num_osds): + manager.raw_cluster_cmd('tell', 'osd.%d' % i, 'injectargs', + '--', '--no-osd-objectstore-fuse') + time.sleep(5) + log.info('done') diff --git a/qa/tasks/swift.py b/qa/tasks/swift.py new file mode 100644 index 00000000..f8758842 --- /dev/null +++ b/qa/tasks/swift.py @@ -0,0 +1,256 @@ +""" +Test Swift API +""" +from io import BytesIO +from configobj import ConfigObj +import base64 +import contextlib +import logging +import os + +from distutils.version import LooseVersion +from teuthology import misc as teuthology +from teuthology import contextutil +from teuthology.config import config as teuth_config +from teuthology.orchestra import run + +log = logging.getLogger(__name__) + + +@contextlib.contextmanager +def download(ctx, config): + """ + Download the Swift API. + """ + testdir = teuthology.get_testdir(ctx) + assert isinstance(config, dict) + log.info('Downloading swift...') + for (client, cconf) in config.items(): + ctx.cluster.only(client).run( + args=[ + 'git', 'clone', + '-b', cconf.get('force-branch', 'ceph-nautilus'), + teuth_config.ceph_git_base_url + 'swift.git', + '{tdir}/swift'.format(tdir=testdir), + ], + ) + try: + yield + finally: + log.info('Removing swift...') + testdir = teuthology.get_testdir(ctx) + for (client, _) in config.items(): + ctx.cluster.only(client).run( + args=[ + 'rm', + '-rf', + '{tdir}/swift'.format(tdir=testdir), + ], + ) + +def _config_user(testswift_conf, account, user, suffix): + """ + Configure a swift user + + :param account: Swift account + :param user: User name + :param suffix: user name and email suffixes. + """ + testswift_conf['func_test'].setdefault('account{s}'.format(s=suffix), account) + testswift_conf['func_test'].setdefault('username{s}'.format(s=suffix), user) + testswift_conf['func_test'].setdefault('email{s}'.format(s=suffix), '{account}+test@test.test'.format(account=account)) + testswift_conf['func_test'].setdefault('display_name{s}'.format(s=suffix), 'Mr. {account} {user}'.format(account=account, user=user)) + testswift_conf['func_test'].setdefault('password{s}'.format(s=suffix), base64.b64encode(os.urandom(40)).decode('ascii')) + +@contextlib.contextmanager +def create_users(ctx, config): + """ + Create rgw users to interact with the swift interface. + """ + assert isinstance(config, dict) + log.info('Creating rgw users...') + testdir = teuthology.get_testdir(ctx) + users = {'': 'foo', '2': 'bar'} + for client, testswift_conf in config.items(): + cluster_name, daemon_type, client_id = teuthology.split_role(client) + for suffix, user in users.items(): + _config_user(testswift_conf, '{user}.{client}'.format(user=user, client=client), user, suffix) + ctx.cluster.only(client).run( + args=[ + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'radosgw-admin', + '-n', client, + '--cluster', cluster_name, + 'user', 'create', + '--subuser', '{account}:{user}'.format(account=testswift_conf['func_test']['account{s}'.format(s=suffix)],user=user), + '--display-name', testswift_conf['func_test']['display_name{s}'.format(s=suffix)], + '--secret', testswift_conf['func_test']['password{s}'.format(s=suffix)], + '--email', testswift_conf['func_test']['email{s}'.format(s=suffix)], + '--key-type', 'swift', + '--access', 'full', + ], + ) + try: + yield + finally: + for client in config.keys(): + for user in users.values(): + uid = '{user}.{client}'.format(user=user, client=client) + cluster_name, daemon_type, client_id = teuthology.split_role(client) + ctx.cluster.only(client).run( + args=[ + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'radosgw-admin', + '-n', client, + '--cluster', cluster_name, + 'user', 'rm', + '--uid', uid, + '--purge-data', + ], + ) + +@contextlib.contextmanager +def configure(ctx, config): + """ + Configure rgw and Swift + """ + assert isinstance(config, dict) + log.info('Configuring testswift...') + testdir = teuthology.get_testdir(ctx) + for client, testswift_conf in config.items(): + (remote,) = ctx.cluster.only(client).remotes.keys() + remote.run( + args=[ + 'cd', + '{tdir}/swift'.format(tdir=testdir), + run.Raw('&&'), + './bootstrap', + ], + ) + conf_fp = BytesIO() + testswift_conf.write(conf_fp) + teuthology.write_file( + remote=remote, + path='{tdir}/archive/testswift.{client}.conf'.format(tdir=testdir, client=client), + data=conf_fp.getvalue(), + ) + yield + + +@contextlib.contextmanager +def run_tests(ctx, config): + """ + Run an individual Swift test. + """ + assert isinstance(config, dict) + testdir = teuthology.get_testdir(ctx) + for client, client_config in config.items(): + args = [ + 'SWIFT_TEST_CONFIG_FILE={tdir}/archive/testswift.{client}.conf'.format(tdir=testdir, client=client), + '{tdir}/swift/virtualenv/bin/nosetests'.format(tdir=testdir), + '-w', + '{tdir}/swift/test/functional'.format(tdir=testdir), + '-v', + '-a', '!fails_on_rgw', + ] + if client_config is not None and 'extra_args' in client_config: + args.extend(client_config['extra_args']) + + ctx.cluster.only(client).run( + args=args, + ) + yield + +@contextlib.contextmanager +def task(ctx, config): + """ + Run the testswift suite against rgw. + + To run all tests on all clients:: + + tasks: + - ceph: + - rgw: + - testswift: + + To restrict testing to particular clients:: + + tasks: + - ceph: + - rgw: [client.0] + - testswift: [client.0] + + To run against a server on client.1:: + + tasks: + - ceph: + - rgw: [client.1] + - testswift: + client.0: + rgw_server: client.1 + + To pass extra arguments to nose (e.g. to run a certain test):: + + tasks: + - ceph: + - rgw: [client.0] + - testswift: + client.0: + extra_args: ['test.functional.tests:TestFileUTF8', '-m', 'testCopy'] + client.1: + extra_args: ['--exclude', 'TestFile'] + """ + assert hasattr(ctx, 'rgw'), 'swift must run after the rgw task' + assert config is None or isinstance(config, list) \ + or isinstance(config, dict), \ + "task testswift only supports a list or dictionary for configuration" + all_clients = ['client.{id}'.format(id=id_) + for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')] + if config is None: + config = all_clients + if isinstance(config, list): + config = dict.fromkeys(config) + + testswift_conf = {} + clients = [] + for client, client_config in config.items(): + # http://tracker.ceph.com/issues/40304 can't bootstrap on rhel 7.6+ + (remote,) = ctx.cluster.only(client).remotes.keys() + if remote.os.name == 'rhel' and LooseVersion(remote.os.version) >= LooseVersion('7.6'): + log.warning('Swift tests cannot run on rhel 7.6+, skipping client {}'.format(client)) + continue + + clients.append(client) + + server = client_config.get('rgw_server', client) + endpoint = ctx.rgw.role_endpoints.get(server) + assert endpoint, 'swift: no rgw endpoint for {}'.format(server) + + testswift_conf[client] = ConfigObj( + indent_type='', + infile={ + 'func_test': + { + 'auth_host' : endpoint.hostname, + 'auth_port' : endpoint.port, + 'auth_ssl' : 'yes' if endpoint.cert else 'no', + 'auth_prefix' : '/auth/', + }, + } + ) + # only take config for valid clients + config = {c: config[c] for c in clients} + + log.info('clients={c}'.format(c=config.keys())) + with contextutil.nested( + lambda: download(ctx=ctx, config=config), + lambda: create_users(ctx=ctx, config=testswift_conf), + lambda: configure(ctx=ctx, config=testswift_conf), + lambda: run_tests(ctx=ctx, config=config), + ): + pass + yield diff --git a/qa/tasks/systemd.py b/qa/tasks/systemd.py new file mode 100644 index 00000000..745f503c --- /dev/null +++ b/qa/tasks/systemd.py @@ -0,0 +1,135 @@ +""" +Systemd test +""" +import contextlib +import logging +import re +import time + +from teuthology.orchestra import run +from teuthology.misc import reconnect, get_first_mon, wait_until_healthy + +log = logging.getLogger(__name__) + +def _remote_service_status(remote, service): + status = remote.sh('sudo systemctl status %s' % service, + check_status=False) + return status + +@contextlib.contextmanager +def task(ctx, config): + """ + - tasks: + ceph-deploy: + systemd: + + Test ceph systemd services can start, stop and restart and + check for any failed services and report back errors + """ + for remote, roles in ctx.cluster.remotes.items(): + remote.run(args=['sudo', 'ps', '-eaf', run.Raw('|'), + 'grep', 'ceph']) + units = remote.sh('sudo systemctl list-units | grep ceph', + check_status=False) + log.info(units) + if units.find('failed'): + log.info("Ceph services in failed state") + + # test overall service stop and start using ceph.target + # ceph.target tests are meant for ceph systemd tests + # and not actual process testing using 'ps' + log.info("Stopping all Ceph services") + remote.run(args=['sudo', 'systemctl', 'stop', 'ceph.target']) + status = _remote_service_status(remote, 'ceph.target') + log.info(status) + log.info("Checking process status") + ps_eaf = remote.sh('sudo ps -eaf | grep ceph') + if ps_eaf.find('Active: inactive'): + log.info("Successfully stopped all ceph services") + else: + log.info("Failed to stop ceph services") + + log.info("Starting all Ceph services") + remote.run(args=['sudo', 'systemctl', 'start', 'ceph.target']) + status = _remote_service_status(remote, 'ceph.target') + log.info(status) + if status.find('Active: active'): + log.info("Successfully started all Ceph services") + else: + log.info("info", "Failed to start Ceph services") + ps_eaf = remote.sh('sudo ps -eaf | grep ceph') + log.info(ps_eaf) + time.sleep(4) + + # test individual services start stop + name = remote.shortname + mon_name = 'ceph-mon@' + name + '.service' + mds_name = 'ceph-mds@' + name + '.service' + mgr_name = 'ceph-mgr@' + name + '.service' + mon_role_name = 'mon.' + name + mds_role_name = 'mds.' + name + mgr_role_name = 'mgr.' + name + m_osd = re.search('--id (\d+) --setuser ceph', r.stdout.getvalue()) + if m_osd: + osd_service = 'ceph-osd@{m}.service'.format(m=m_osd.group(1)) + remote.run(args=['sudo', 'systemctl', 'status', + osd_service]) + remote.run(args=['sudo', 'systemctl', 'stop', + osd_service]) + time.sleep(4) # immediate check will result in deactivating state + status = _remote_service_status(remote, osd_service) + log.info(status) + if status.find('Active: inactive'): + log.info("Successfully stopped single osd ceph service") + else: + log.info("Failed to stop ceph osd services") + remote.sh(['sudo', 'systemctl', 'start', osd_service]) + time.sleep(4) + if mon_role_name in roles: + remote.run(args=['sudo', 'systemctl', 'status', mon_name]) + remote.run(args=['sudo', 'systemctl', 'stop', mon_name]) + time.sleep(4) # immediate check will result in deactivating state + status = _remote_service_status(remote, mon_name) + if status.find('Active: inactive'): + log.info("Successfully stopped single mon ceph service") + else: + log.info("Failed to stop ceph mon service") + remote.run(args=['sudo', 'systemctl', 'start', mon_name]) + time.sleep(4) + if mgr_role_name in roles: + remote.run(args=['sudo', 'systemctl', 'status', mgr_name]) + remote.run(args=['sudo', 'systemctl', 'stop', mgr_name]) + time.sleep(4) # immediate check will result in deactivating state + status = _remote_service_status(remote, mgr_name) + if status.find('Active: inactive'): + log.info("Successfully stopped single ceph mgr service") + else: + log.info("Failed to stop ceph mgr service") + remote.run(args=['sudo', 'systemctl', 'start', mgr_name]) + time.sleep(4) + if mds_role_name in roles: + remote.run(args=['sudo', 'systemctl', 'status', mds_name]) + remote.run(args=['sudo', 'systemctl', 'stop', mds_name]) + time.sleep(4) # immediate check will result in deactivating state + status = _remote_service_status(remote, mds_name) + if status.find('Active: inactive'): + log.info("Successfully stopped single ceph mds service") + else: + log.info("Failed to stop ceph mds service") + remote.run(args=['sudo', 'systemctl', 'start', mds_name]) + time.sleep(4) + + # reboot all nodes and verify the systemd units restart + # workunit that runs would fail if any of the systemd unit doesnt start + ctx.cluster.run(args='sudo reboot', wait=False, check_status=False) + # avoid immediate reconnect + time.sleep(120) + reconnect(ctx, 480) # reconnect all nodes + # for debug info + ctx.cluster.run(args=['sudo', 'ps', '-eaf', run.Raw('|'), + 'grep', 'ceph']) + # wait for HEALTH_OK + mon = get_first_mon(ctx, config) + (mon_remote,) = ctx.cluster.only(mon).remotes.keys() + wait_until_healthy(ctx, mon_remote, use_sudo=True) + yield diff --git a/qa/tasks/tempest.py b/qa/tasks/tempest.py new file mode 100644 index 00000000..2fe49a7e --- /dev/null +++ b/qa/tasks/tempest.py @@ -0,0 +1,284 @@ +""" +Deploy and configure Tempest for Teuthology +""" +import contextlib +import logging + +from six.moves import configparser + +from teuthology import misc as teuthology +from teuthology import contextutil +from teuthology import packaging +from teuthology.exceptions import ConfigError +from teuthology.orchestra import run + +log = logging.getLogger(__name__) + + +def get_tempest_dir(ctx): + return '{tdir}/tempest'.format(tdir=teuthology.get_testdir(ctx)) + +def run_in_tempest_dir(ctx, client, cmdargs, **kwargs): + ctx.cluster.only(client).run( + args=[ 'cd', get_tempest_dir(ctx), run.Raw('&&'), ] + cmdargs, + **kwargs + ) + +def run_in_tempest_rgw_dir(ctx, client, cmdargs, **kwargs): + ctx.cluster.only(client).run( + args=[ 'cd', get_tempest_dir(ctx) + '/rgw', run.Raw('&&'), ] + cmdargs, + **kwargs + ) + +def run_in_tempest_venv(ctx, client, cmdargs, **kwargs): + run_in_tempest_dir(ctx, client, + [ 'source', + '.tox/venv/bin/activate', + run.Raw('&&') + ] + cmdargs, **kwargs) + +@contextlib.contextmanager +def download(ctx, config): + """ + Download the Tempest from github. + Remove downloaded file upon exit. + + The context passed in should be identical to the context + passed in to the main task. + """ + assert isinstance(config, dict) + log.info('Downloading Tempest...') + for (client, cconf) in config.items(): + ctx.cluster.only(client).run( + args=[ + 'git', 'clone', + '-b', cconf.get('force-branch', 'master'), + 'https://github.com/openstack/tempest.git', + get_tempest_dir(ctx) + ], + ) + + sha1 = cconf.get('sha1') + if sha1 is not None: + run_in_tempest_dir(ctx, client, [ 'git', 'reset', '--hard', sha1 ]) + try: + yield + finally: + log.info('Removing Tempest...') + for client in config: + ctx.cluster.only(client).run( + args=[ 'rm', '-rf', get_tempest_dir(ctx) ], + ) + +def get_toxvenv_dir(ctx): + return ctx.tox.venv_path + +@contextlib.contextmanager +def install_python3(ctx, config): + assert isinstance(config, dict) + log.info('Installing Python3 for Tempest') + installed = [] + for (client, _) in config.items(): + (remote,) = ctx.cluster.only(client).remotes.keys() + try: + packaging.get_package_version(remote, 'python3') + except: + packaging.install_package('python3', remote) + installed.append(client) + try: + yield + finally: + log.info('Removing Python3 required by Tempest...') + for client in installed: + (remote,) = ctx.cluster.only(client).remotes.keys() + packaging.remove_package('python3', remote) + +@contextlib.contextmanager +def setup_venv(ctx, config): + """ + Setup the virtualenv for Tempest using tox. + """ + assert isinstance(config, dict) + log.info('Setting up virtualenv for Tempest') + for (client, _) in config.items(): + run_in_tempest_dir(ctx, client, + [ '{tvdir}/bin/tox'.format(tvdir=get_toxvenv_dir(ctx)), + '-e', 'venv', '--notest' + ]) + yield + +def setup_logging(ctx, cpar): + cpar.set('DEFAULT', 'log_dir', teuthology.get_archive_dir(ctx)) + cpar.set('DEFAULT', 'log_file', 'tempest.log') + +def to_config(config, params, section, cpar): + for (k, v) in config[section].items(): + if isinstance(v, str): + v = v.format(**params) + elif isinstance(v, bool): + v = 'true' if v else 'false' + else: + v = str(v) + cpar.set(section, k, v) + +@contextlib.contextmanager +def configure_instance(ctx, config): + assert isinstance(config, dict) + log.info('Configuring Tempest') + + for (client, cconfig) in config.items(): + run_in_tempest_venv(ctx, client, + [ + 'tempest', + 'init', + '--workspace-path', + get_tempest_dir(ctx) + '/workspace.yaml', + 'rgw' + ]) + + # prepare the config file + tetcdir = '{tdir}/rgw/etc'.format(tdir=get_tempest_dir(ctx)) + (remote,) = ctx.cluster.only(client).remotes.keys() + local_conf = remote.get_file(tetcdir + '/tempest.conf.sample') + + # fill the params dictionary which allows to use templatized configs + keystone_role = cconfig.get('use-keystone-role', None) + if keystone_role is None \ + or keystone_role not in ctx.keystone.public_endpoints: + raise ConfigError('the use-keystone-role is misconfigured') + public_host, public_port = ctx.keystone.public_endpoints[keystone_role] + params = { + 'keystone_public_host': public_host, + 'keystone_public_port': str(public_port), + } + + cpar = configparser.ConfigParser() + cpar.read(local_conf) + setup_logging(ctx, cpar) + to_config(cconfig, params, 'auth', cpar) + to_config(cconfig, params, 'identity', cpar) + to_config(cconfig, params, 'object-storage', cpar) + to_config(cconfig, params, 'object-storage-feature-enabled', cpar) + cpar.write(open(local_conf, 'w+')) + + remote.put_file(local_conf, tetcdir + '/tempest.conf') + yield + +@contextlib.contextmanager +def run_tempest(ctx, config): + assert isinstance(config, dict) + log.info('Configuring Tempest') + + for (client, cconf) in config.items(): + blacklist = cconf.get('blacklist', []) + assert isinstance(blacklist, list) + run_in_tempest_venv(ctx, client, + [ + 'tempest', + 'run', + '--workspace-path', + get_tempest_dir(ctx) + '/workspace.yaml', + '--workspace', + 'rgw', + '--regex', + '(tempest.api.object_storage)' + + ''.join([ '(?!{blackitem})'.format(blackitem=blackitem) + for blackitem in blacklist]) + ]) + try: + yield + finally: + pass + + +@contextlib.contextmanager +def task(ctx, config): + """ + Deploy and run Tempest's object storage campaign + + Example of configuration: + + overrides: + ceph: + conf: + client: + rgw keystone admin token: ADMIN + rgw keystone accepted roles: admin,Member + rgw keystone implicit tenants: true + rgw keystone accepted admin roles: admin + rgw swift enforce content length: true + rgw swift account in url: true + rgw swift versioning enabled: true + tasks: + # typically, the task should be preceded with install, ceph, tox, + # keystone and rgw. Tox and Keystone are specific requirements + # of tempest.py. + - rgw: + # it's important to match the prefix with the endpoint's URL + # in Keystone. Additionally, if we want to test /info and its + # accompanying stuff, the whole Swift API must be put in root + # of the whole URL hierarchy (read: frontend_prefix == /swift). + frontend_prefix: /swift + client.0: + use-keystone-role: client.0 + - tempest: + client.0: + force-branch: master + use-keystone-role: client.0 + auth: + admin_username: admin + admin_project_name: admin + admin_password: ADMIN + admin_domain_name: Default + identity: + uri: http://{keystone_public_host}:{keystone_public_port}/v2.0/ + uri_v3: http://{keystone_public_host}:{keystone_public_port}/v3/ + admin_role: admin + object-storage: + reseller_admin_role: admin + object-storage-feature-enabled: + container_sync: false + discoverability: false + blacklist: + # please strip half of these items after merging PRs #15369 + # and #12704 + - .*test_list_containers_reverse_order.* + - .*test_list_container_contents_with_end_marker.* + - .*test_delete_non_empty_container.* + - .*test_container_synchronization.* + - .*test_get_object_after_expiration_time.* + - .*test_create_object_with_transfer_encoding.* + """ + assert config is None or isinstance(config, list) \ + or isinstance(config, dict), \ + 'task tempest only supports a list or dictionary for configuration' + + if not ctx.tox: + raise ConfigError('tempest must run after the tox task') + if not ctx.keystone: + raise ConfigError('tempest must run after the keystone task') + + all_clients = ['client.{id}'.format(id=id_) + for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')] + if config is None: + config = all_clients + if isinstance(config, list): + config = dict.fromkeys(config) + + overrides = ctx.config.get('overrides', {}) + # merge each client section, not the top level. + for client in config.keys(): + if not config[client]: + config[client] = {} + teuthology.deep_merge(config[client], overrides.get('keystone', {})) + + log.debug('Tempest config is %s', config) + + with contextutil.nested( + lambda: download(ctx=ctx, config=config), + lambda: install_python3(ctx=ctx, config=config), + lambda: setup_venv(ctx=ctx, config=config), + lambda: configure_instance(ctx=ctx, config=config), + lambda: run_tempest(ctx=ctx, config=config), + ): + yield diff --git a/qa/tasks/tests/__init__.py b/qa/tasks/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/qa/tasks/tests/test_devstack.py b/qa/tasks/tests/test_devstack.py new file mode 100644 index 00000000..39b94a64 --- /dev/null +++ b/qa/tasks/tests/test_devstack.py @@ -0,0 +1,48 @@ +from textwrap import dedent + +from tasks import devstack + + +class TestDevstack(object): + def test_parse_os_table(self): + table_str = dedent(""" + +---------------------+--------------------------------------+ + | Property | Value | + +---------------------+--------------------------------------+ + | attachments | [] | + | availability_zone | nova | + | bootable | false | + | created_at | 2014-02-21T17:14:47.548361 | + | display_description | None | + | display_name | NAME | + | id | ffdbd1bb-60dc-4d95-acfe-88774c09ad3e | + | metadata | {} | + | size | 1 | + | snapshot_id | None | + | source_volid | None | + | status | creating | + | volume_type | None | + +---------------------+--------------------------------------+ + """).strip() + expected = { + 'Property': 'Value', + 'attachments': '[]', + 'availability_zone': 'nova', + 'bootable': 'false', + 'created_at': '2014-02-21T17:14:47.548361', + 'display_description': 'None', + 'display_name': 'NAME', + 'id': 'ffdbd1bb-60dc-4d95-acfe-88774c09ad3e', + 'metadata': '{}', + 'size': '1', + 'snapshot_id': 'None', + 'source_volid': 'None', + 'status': 'creating', + 'volume_type': 'None'} + + vol_info = devstack.parse_os_table(table_str) + assert vol_info == expected + + + + diff --git a/qa/tasks/tests/test_radosgw_admin.py b/qa/tasks/tests/test_radosgw_admin.py new file mode 100644 index 00000000..2ed0ebd5 --- /dev/null +++ b/qa/tasks/tests/test_radosgw_admin.py @@ -0,0 +1,35 @@ +import six +if six.PY3: + from unittest.mock import Mock +else: + from mock import Mock + +from tasks import radosgw_admin + +acl_with_version = """fooFoofooFooFULL_CONTROL +""" # noqa + + +acl_without_version = """fooFoofooFooFULL_CONTROL +""" # noqa + + +class TestGetAcl(object): + + def setup(self): + self.key = Mock() + + def test_removes_xml_version(self): + self.key.get_xml_acl = Mock(return_value=acl_with_version) + result = radosgw_admin.get_acl(self.key) + assert result.startswith(' 0 + snap = random.choice(snaps) + log.info("Removing snap %s" % (snap,)) + for pool in pools: + manager.remove_pool_snap(pool, str(snap)) + snaps.remove(snap) + def add_snap(snap): + log.info("Adding snap %s" % (snap,)) + for pool in pools: + manager.add_pool_snap(pool, str(snap)) + snaps.append(snap) + index = 0 + while not stopping: + index += 1 + time.sleep(period) + if len(snaps) <= min_snaps: + add_snap(index) + elif len(snaps) >= max_snaps: + remove_snap() + else: + random.choice([lambda: add_snap(index), remove_snap])() + log.info("Stopping") + thread = gevent.spawn(do_thrash) + yield + stopping = True + thread.join() + diff --git a/qa/tasks/thrashosds-health.yaml b/qa/tasks/thrashosds-health.yaml new file mode 100644 index 00000000..914f6e25 --- /dev/null +++ b/qa/tasks/thrashosds-health.yaml @@ -0,0 +1,15 @@ +overrides: + ceph: + log-whitelist: + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ + - \(POOL_ + - \(CACHE_POOL_ + - \(SMALLER_PGP_NUM\) + - \(OBJECT_ + - \(SLOW_OPS\) + - \(REQUEST_SLOW\) + - \(TOO_FEW_PGS\) + - slow request diff --git a/qa/tasks/thrashosds.py b/qa/tasks/thrashosds.py new file mode 100644 index 00000000..253663f8 --- /dev/null +++ b/qa/tasks/thrashosds.py @@ -0,0 +1,219 @@ +""" +Thrash -- Simulate random osd failures. +""" +import contextlib +import logging +from tasks import ceph_manager +from teuthology import misc as teuthology + + +log = logging.getLogger(__name__) + +@contextlib.contextmanager +def task(ctx, config): + """ + "Thrash" the OSDs by randomly marking them out/down (and then back + in) until the task is ended. This loops, and every op_delay + seconds it randomly chooses to add or remove an OSD (even odds) + unless there are fewer than min_out OSDs out of the cluster, or + more than min_in OSDs in the cluster. + + All commands are run on mon0 and it stops when __exit__ is called. + + The config is optional, and is a dict containing some or all of: + + cluster: (default 'ceph') the name of the cluster to thrash + + min_in: (default 4) the minimum number of OSDs to keep in the + cluster + + min_out: (default 0) the minimum number of OSDs to keep out of the + cluster + + op_delay: (5) the length of time to sleep between changing an + OSD's status + + min_dead: (0) minimum number of osds to leave down/dead. + + max_dead: (0) maximum number of osds to leave down/dead before waiting + for clean. This should probably be num_replicas - 1. + + clean_interval: (60) the approximate length of time to loop before + waiting until the cluster goes clean. (In reality this is used + to probabilistically choose when to wait, and the method used + makes it closer to -- but not identical to -- the half-life.) + + scrub_interval: (-1) the approximate length of time to loop before + waiting until a scrub is performed while cleaning. (In reality + this is used to probabilistically choose when to wait, and it + only applies to the cases where cleaning is being performed). + -1 is used to indicate that no scrubbing will be done. + + chance_down: (0.4) the probability that the thrasher will mark an + OSD down rather than marking it out. (The thrasher will not + consider that OSD out of the cluster, since presently an OSD + wrongly marked down will mark itself back up again.) This value + can be either an integer (eg, 75) or a float probability (eg + 0.75). + + chance_test_min_size: (0) chance to run test_pool_min_size, + which: + - kills all but one osd + - waits + - kills that osd + - revives all other osds + - verifies that the osds fully recover + + timeout: (360) the number of seconds to wait for the cluster + to become clean after each cluster change. If this doesn't + happen within the timeout, an exception will be raised. + + revive_timeout: (150) number of seconds to wait for an osd asok to + appear after attempting to revive the osd + + thrash_primary_affinity: (true) randomly adjust primary-affinity + + chance_pgnum_grow: (0) chance to increase a pool's size + chance_pgpnum_fix: (0) chance to adjust pgpnum to pg for a pool + pool_grow_by: (10) amount to increase pgnum by + chance_pgnum_shrink: (0) chance to decrease a pool's size + pool_shrink_by: (10) amount to decrease pgnum by + max_pgs_per_pool_osd: (1200) don't expand pools past this size per osd + + pause_short: (3) duration of short pause + pause_long: (80) duration of long pause + pause_check_after: (50) assert osd down after this long + chance_inject_pause_short: (1) chance of injecting short stall + chance_inject_pause_long: (0) chance of injecting long stall + + clean_wait: (0) duration to wait before resuming thrashing once clean + + sighup_delay: (0.1) duration to delay between sending signal.SIGHUP to a + random live osd + + powercycle: (false) whether to power cycle the node instead + of just the osd process. Note that this assumes that a single + osd is the only important process on the node. + + bdev_inject_crash: (0) seconds to delay while inducing a synthetic crash. + the delay lets the BlockDevice "accept" more aio operations but blocks + any flush, and then eventually crashes (losing some or all ios). If 0, + no bdev failure injection is enabled. + + bdev_inject_crash_probability: (.5) probability of doing a bdev failure + injection crash vs a normal OSD kill. + + chance_test_backfill_full: (0) chance to simulate full disks stopping + backfill + + chance_test_map_discontinuity: (0) chance to test map discontinuity + map_discontinuity_sleep_time: (40) time to wait for map trims + + ceph_objectstore_tool: (true) whether to export/import a pg while an osd is down + chance_move_pg: (1.0) chance of moving a pg if more than 1 osd is down (default 100%) + + optrack_toggle_delay: (2.0) duration to delay between toggling op tracker + enablement to all osds + + dump_ops_enable: (true) continuously dump ops on all live osds + + noscrub_toggle_delay: (2.0) duration to delay between toggling noscrub + + disable_objectstore_tool_tests: (false) disable ceph_objectstore_tool based + tests + + chance_thrash_cluster_full: .05 + + chance_thrash_pg_upmap: 1.0 + chance_thrash_pg_upmap_items: 1.0 + + aggressive_pg_num_changes: (true) whether we should bypass the careful throttling of pg_num and pgp_num changes in mgr's adjust_pgs() controller + + example: + + tasks: + - ceph: + - thrashosds: + cluster: ceph + chance_down: 10 + op_delay: 3 + min_in: 1 + timeout: 600 + - interactive: + """ + if config is None: + config = {} + assert isinstance(config, dict), \ + 'thrashosds task only accepts a dict for configuration' + # add default value for sighup_delay + config['sighup_delay'] = config.get('sighup_delay', 0.1) + # add default value for optrack_toggle_delay + config['optrack_toggle_delay'] = config.get('optrack_toggle_delay', 2.0) + # add default value for dump_ops_enable + config['dump_ops_enable'] = config.get('dump_ops_enable', "true") + # add default value for noscrub_toggle_delay + config['noscrub_toggle_delay'] = config.get('noscrub_toggle_delay', 2.0) + # add default value for random_eio + config['random_eio'] = config.get('random_eio', 0.0) + aggro = config.get('aggressive_pg_num_changes', True) + + log.info("config is {config}".format(config=str(config))) + + overrides = ctx.config.get('overrides', {}) + log.info("overrides is {overrides}".format(overrides=str(overrides))) + teuthology.deep_merge(config, overrides.get('thrashosds', {})) + cluster = config.get('cluster', 'ceph') + + log.info("config is {config}".format(config=str(config))) + + if 'powercycle' in config: + + # sync everyone first to avoid collateral damage to / etc. + log.info('Doing preliminary sync to avoid collateral damage...') + ctx.cluster.run(args=['sync']) + + if 'ipmi_user' in ctx.teuthology_config: + for remote in ctx.cluster.remotes.keys(): + log.debug('checking console status of %s' % remote.shortname) + if not remote.console.check_status(): + log.warning('Failed to get console status for %s', + remote.shortname) + + # check that all osd remotes have a valid console + osds = ctx.cluster.only(teuthology.is_type('osd', cluster)) + for remote in osds.remotes.keys(): + if not remote.console.has_ipmi_credentials: + raise Exception( + 'IPMI console required for powercycling, ' + 'but not available on osd role: {r}'.format( + r=remote.name)) + + cluster_manager = ctx.managers[cluster] + for f in ['powercycle', 'bdev_inject_crash']: + if config.get(f): + cluster_manager.config[f] = config.get(f) + + if aggro: + cluster_manager.raw_cluster_cmd( + 'config', 'set', 'mgr', + 'mgr_debug_aggressive_pg_num_changes', + 'true') + + log.info('Beginning thrashosds...') + thrash_proc = ceph_manager.Thrasher( + cluster_manager, + config, + logger=log.getChild('thrasher') + ) + try: + yield + finally: + log.info('joining thrashosds') + thrash_proc.do_join() + cluster_manager.wait_for_all_osds_up() + cluster_manager.flush_all_pg_stats() + cluster_manager.wait_for_recovery(config.get('timeout', 360)) + if aggro: + cluster_manager.raw_cluster_cmd( + 'config', 'rm', 'mgr', + 'mgr_debug_aggressive_pg_num_changes') diff --git a/qa/tasks/tox.py b/qa/tasks/tox.py new file mode 100644 index 00000000..36c226d0 --- /dev/null +++ b/qa/tasks/tox.py @@ -0,0 +1,50 @@ +import argparse +import contextlib +import logging + +from teuthology import misc as teuthology +from teuthology.orchestra import run + +log = logging.getLogger(__name__) + + +def get_toxvenv_dir(ctx): + return '{tdir}/tox-venv'.format(tdir=teuthology.get_testdir(ctx)) + +@contextlib.contextmanager +def task(ctx, config): + """ + Deploy tox from pip. It's a dependency for both Keystone and Tempest. + """ + assert config is None or isinstance(config, list) \ + or isinstance(config, dict), \ + "task tox only supports a list or dictionary for configuration" + all_clients = ['client.{id}'.format(id=id_) + for id_ in teuthology.all_roles_of_type(ctx.cluster, 'client')] + if config is None: + config = all_clients + if isinstance(config, list): + config = dict.fromkeys(config) + + log.info('Deploying tox from pip...') + for (client, _) in config.items(): + # yup, we have to deploy tox first. The packaged one, available + # on Sepia's Ubuntu machines, is outdated for Keystone/Tempest. + tvdir = get_toxvenv_dir(ctx) + ctx.cluster.only(client).run(args=[ 'virtualenv', '-p', 'python3', tvdir ]) + ctx.cluster.only(client).run(args= + [ 'source', '{tvdir}/bin/activate'.format(tvdir=tvdir), + run.Raw('&&'), + 'pip', 'install', 'tox==3.15.0' + ]) + + # export the path Keystone and Tempest + ctx.tox = argparse.Namespace() + ctx.tox.venv_path = get_toxvenv_dir(ctx) + + try: + yield + finally: + for (client, _) in config.items(): + ctx.cluster.only(client).run( + args=[ 'rm', '-rf', get_toxvenv_dir(ctx) ]) diff --git a/qa/tasks/userdata_setup.yaml b/qa/tasks/userdata_setup.yaml new file mode 100644 index 00000000..7271925c --- /dev/null +++ b/qa/tasks/userdata_setup.yaml @@ -0,0 +1,25 @@ +#cloud-config-archive + +- type: text/cloud-config + content: | + output: + all: '| tee -a /var/log/cloud-init-output.log' + +# allow passwordless access for debugging +- | + #!/usr/bin/env bash + exec passwd -d ubuntu + +- | + #!/usr/bin/env bash + + # mount a NFS share for storing logs + apt-get update + apt-get -y install nfs-common + mkdir /mnt/log + # 10.0.2.2 is the host + mount -v -t nfs -o proto=tcp 10.0.2.2:{mnt_dir} /mnt/log + + # mount the iso image that has the test script + mkdir /mnt/cdrom + mount -t auto /dev/cdrom /mnt/cdrom diff --git a/qa/tasks/userdata_teardown.yaml b/qa/tasks/userdata_teardown.yaml new file mode 100644 index 00000000..731d769f --- /dev/null +++ b/qa/tasks/userdata_teardown.yaml @@ -0,0 +1,11 @@ +- | + #!/usr/bin/env bash + cp /var/log/cloud-init-output.log /mnt/log + +- | + #!/usr/bin/env bash + umount /mnt/log + +- | + #!/usr/bin/env bash + shutdown -h -P now diff --git a/qa/tasks/util/__init__.py b/qa/tasks/util/__init__.py new file mode 100644 index 00000000..5b8575ed --- /dev/null +++ b/qa/tasks/util/__init__.py @@ -0,0 +1,26 @@ +from teuthology import misc + +def get_remote(ctx, cluster, service_type, service_id): + """ + Get the Remote for the host where a particular role runs. + + :param cluster: name of the cluster the service is part of + :param service_type: e.g. 'mds', 'osd', 'client' + :param service_id: The third part of a role, e.g. '0' for + the role 'ceph.client.0' + :return: a Remote instance for the host where the + requested role is placed + """ + def _is_instance(role): + role_tuple = misc.split_role(role) + return role_tuple == (cluster, service_type, str(service_id)) + try: + (remote,) = ctx.cluster.only(_is_instance).remotes.keys() + except ValueError: + raise KeyError("Service {0}.{1}.{2} not found".format(cluster, + service_type, + service_id)) + return remote + +def get_remote_for_role(ctx, role): + return get_remote(ctx, *misc.split_role(role)) diff --git a/qa/tasks/util/rados.py b/qa/tasks/util/rados.py new file mode 100644 index 00000000..a0c54ce4 --- /dev/null +++ b/qa/tasks/util/rados.py @@ -0,0 +1,87 @@ +import logging + +from teuthology import misc as teuthology + +log = logging.getLogger(__name__) + +def rados(ctx, remote, cmd, wait=True, check_status=False): + testdir = teuthology.get_testdir(ctx) + log.info("rados %s" % ' '.join(cmd)) + pre = [ + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir), + 'rados', + ]; + pre.extend(cmd) + proc = remote.run( + args=pre, + check_status=check_status, + wait=wait, + ) + if wait: + return proc.exitstatus + else: + return proc + +def create_ec_pool(remote, name, profile_name, pgnum, profile={}, cluster_name="ceph", application=None): + remote.run(args=['sudo', 'ceph'] + + cmd_erasure_code_profile(profile_name, profile) + ['--cluster', cluster_name]) + remote.run(args=[ + 'sudo', 'ceph', 'osd', 'pool', 'create', name, + str(pgnum), str(pgnum), 'erasure', profile_name, '--cluster', cluster_name + ]) + if application: + remote.run(args=[ + 'sudo', 'ceph', 'osd', 'pool', 'application', 'enable', name, application, '--cluster', cluster_name + ], check_status=False) # may fail as EINVAL when run in jewel upgrade test + +def create_replicated_pool(remote, name, pgnum, cluster_name="ceph", application=None): + remote.run(args=[ + 'sudo', 'ceph', 'osd', 'pool', 'create', name, str(pgnum), str(pgnum), '--cluster', cluster_name + ]) + if application: + remote.run(args=[ + 'sudo', 'ceph', 'osd', 'pool', 'application', 'enable', name, application, '--cluster', cluster_name + ], check_status=False) + +def create_cache_pool(remote, base_name, cache_name, pgnum, size, cluster_name="ceph"): + remote.run(args=[ + 'sudo', 'ceph', 'osd', 'pool', 'create', cache_name, str(pgnum), '--cluster', cluster_name + ]) + remote.run(args=[ + 'sudo', 'ceph', 'osd', 'tier', 'add-cache', base_name, cache_name, + str(size), '--cluster', cluster_name + ]) + +def cmd_erasure_code_profile(profile_name, profile): + """ + Return the shell command to run to create the erasure code profile + described by the profile parameter. + + :param profile_name: a string matching [A-Za-z0-9-_.]+ + :param profile: a map whose semantic depends on the erasure code plugin + :returns: a shell command as an array suitable for Remote.run + + If profile is {}, it is replaced with + + { 'k': '2', 'm': '1', 'crush-failure-domain': 'osd'} + + for backward compatibility. In previous versions of teuthology, + these values were hardcoded as function arguments and some yaml + files were designed with these implicit values. The teuthology + code should not know anything about the erasure code profile + content or semantic. The valid values and parameters are outside + its scope. + """ + + if profile == {}: + profile = { + 'k': '2', + 'm': '1', + 'crush-failure-domain': 'osd' + } + return [ + 'osd', 'erasure-code-profile', 'set', + profile_name + ] + [ str(key) + '=' + str(value) for key, value in profile.items() ] diff --git a/qa/tasks/util/rgw.py b/qa/tasks/util/rgw.py new file mode 100644 index 00000000..3229f0a5 --- /dev/null +++ b/qa/tasks/util/rgw.py @@ -0,0 +1,94 @@ +import logging +import json +import time + +from six import StringIO + +from teuthology import misc as teuthology + +log = logging.getLogger(__name__) + +def rgwadmin(ctx, client, cmd, stdin=StringIO(), check_status=False, + format='json', decode=True, log_level=logging.DEBUG): + log.info('rgwadmin: {client} : {cmd}'.format(client=client,cmd=cmd)) + testdir = teuthology.get_testdir(ctx) + cluster_name, daemon_type, client_id = teuthology.split_role(client) + client_with_id = daemon_type + '.' + client_id + pre = [ + 'adjust-ulimits', + 'ceph-coverage'.format(tdir=testdir), + '{tdir}/archive/coverage'.format(tdir=testdir), + 'radosgw-admin'.format(tdir=testdir), + '--log-to-stderr', + '--format', format, + '-n', client_with_id, + '--cluster', cluster_name, + ] + pre.extend(cmd) + log.log(log_level, 'rgwadmin: cmd=%s' % pre) + (remote,) = ctx.cluster.only(client).remotes.keys() + proc = remote.run( + args=pre, + check_status=check_status, + stdout=StringIO(), + stderr=StringIO(), + stdin=stdin, + ) + r = proc.exitstatus + out = proc.stdout.getvalue() + if not decode: + return (r, out) + j = None + if not r and out != '': + try: + j = json.loads(out) + log.log(log_level, ' json result: %s' % j) + except ValueError: + j = out + log.log(log_level, ' raw result: %s' % j) + return (r, j) + +def get_user_summary(out, user): + """Extract the summary for a given user""" + user_summary = None + for summary in out['summary']: + if summary.get('user') == user: + user_summary = summary + + if not user_summary: + raise AssertionError('No summary info found for user: %s' % user) + + return user_summary + +def get_user_successful_ops(out, user): + summary = out['summary'] + if len(summary) == 0: + return 0 + return get_user_summary(out, user)['total']['successful_ops'] + +def wait_for_radosgw(url, remote): + """ poll the given url until it starts accepting connections + + add_daemon() doesn't wait until radosgw finishes startup, so this is used + to avoid racing with later tasks that expect radosgw to be up and listening + """ + # TODO: use '--retry-connrefused --retry 8' when teuthology is running on + # Centos 8 and other OS's with an updated version of curl + curl_cmd = ['curl', + url] + exit_status = 0 + num_retries = 8 + for seconds in range(num_retries): + proc = remote.run( + args=curl_cmd, + check_status=False, + stdout=StringIO(), + stderr=StringIO(), + stdin=StringIO(), + ) + exit_status = proc.exitstatus + if exit_status == 0: + break + time.sleep(2**seconds) + + assert exit_status == 0 diff --git a/qa/tasks/util/test/__init__.py b/qa/tasks/util/test/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/qa/tasks/util/test/test_rados.py b/qa/tasks/util/test/test_rados.py new file mode 100644 index 00000000..a8f4cb02 --- /dev/null +++ b/qa/tasks/util/test/test_rados.py @@ -0,0 +1,40 @@ +# +# The MIT License +# +# Copyright (C) 2014 Cloudwatt +# +# Author: Loic Dachary +# +# Permission is hereby granted, free of charge, to any person +# obtaining a copy of this software and associated documentation +# files (the "Software"), to deal in the Software without +# restriction, including without limitation the rights to use, +# copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following +# conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +# +from tasks.util import rados + +class TestRados(object): + + def test_cmd_erasure_code_profile(self): + name = 'NAME' + cmd = rados.cmd_erasure_code_profile(name, {}) + assert 'k=2' in cmd + assert name in cmd + cmd = rados.cmd_erasure_code_profile(name, { 'k': '88' }) + assert 'k=88' in cmd + assert name in cmd diff --git a/qa/tasks/util/workunit.py b/qa/tasks/util/workunit.py new file mode 100644 index 00000000..1f5623af --- /dev/null +++ b/qa/tasks/util/workunit.py @@ -0,0 +1,78 @@ +import copy + +from teuthology import misc +from teuthology.orchestra import run + +class Refspec: + def __init__(self, refspec): + self.refspec = refspec + + def __str__(self): + return self.refspec + + def _clone(self, git_url, clonedir, opts=None): + if opts is None: + opts = [] + return (['rm', '-rf', clonedir] + + [run.Raw('&&')] + + ['git', 'clone'] + opts + + [git_url, clonedir]) + + def _cd(self, clonedir): + return ['cd', clonedir] + + def _checkout(self): + return ['git', 'checkout', self.refspec] + + def clone(self, git_url, clonedir): + return (self._clone(git_url, clonedir) + + [run.Raw('&&')] + + self._cd(clonedir) + + [run.Raw('&&')] + + self._checkout()) + + +class Branch(Refspec): + def __init__(self, tag): + Refspec.__init__(self, tag) + + def clone(self, git_url, clonedir): + opts = ['--depth', '1', + '--branch', self.refspec] + return (self._clone(git_url, clonedir, opts) + + [run.Raw('&&')] + + self._cd(clonedir)) + + +class Head(Refspec): + def __init__(self): + Refspec.__init__(self, 'HEAD') + + def clone(self, git_url, clonedir): + opts = ['--depth', '1'] + return (self._clone(git_url, clonedir, opts) + + [run.Raw('&&')] + + self._cd(clonedir)) + + +def get_refspec_after_overrides(config, overrides): + # mimic the behavior of the "install" task, where the "overrides" are + # actually the defaults of that task. in other words, if none of "sha1", + # "tag", or "branch" is specified by a "workunit" tasks, we will update + # it with the information in the "workunit" sub-task nested in "overrides". + overrides = copy.deepcopy(overrides.get('workunit', {})) + refspecs = {'suite_sha1': Refspec, 'suite_branch': Branch, + 'sha1': Refspec, 'tag': Refspec, 'branch': Branch} + if any(map(lambda i: i in config, refspecs.keys())): + for i in refspecs.keys(): + overrides.pop(i, None) + misc.deep_merge(config, overrides) + + for spec, cls in refspecs.items(): + refspec = config.get(spec) + if refspec: + refspec = cls(refspec) + break + if refspec is None: + refspec = Head() + return refspec diff --git a/qa/tasks/vstart_runner.py b/qa/tasks/vstart_runner.py new file mode 100644 index 00000000..cfbaad78 --- /dev/null +++ b/qa/tasks/vstart_runner.py @@ -0,0 +1,1169 @@ +""" +vstart_runner: override Filesystem and Mount interfaces to run a CephFSTestCase against a vstart +ceph instance instead of a packaged/installed cluster. Use this to turn around test cases +quickly during development. + +Simple usage (assuming teuthology and ceph checked out in ~/git): + + # Activate the teuthology virtualenv + source ~/git/teuthology/virtualenv/bin/activate + # Go into your ceph build directory + cd ~/git/ceph/build + # Invoke a test using this script + python ~/git/ceph/qa/tasks/vstart_runner.py --create tasks.cephfs.test_data_scan + +Alternative usage: + + # Alternatively, if you use different paths, specify them as follows: + LD_LIBRARY_PATH=`pwd`/lib PYTHONPATH=~/git/teuthology:~/git/ceph/qa:`pwd`/../src/pybind:`pwd`/lib/cython_modules/lib.2 python ~/git/ceph/qa/tasks/vstart_runner.py + + # If you wish to drop to a python shell on failures, use --interactive: + python ~/git/ceph/qa/tasks/vstart_runner.py --interactive + + # If you wish to run a named test case, pass it as an argument: + python ~/git/ceph/qa/tasks/vstart_runner.py tasks.cephfs.test_data_scan + + # Also, you can create the cluster once and then run named test cases against it: + python ~/git/ceph/qa/tasks/vstart_runner.py --create-cluster-only + python ~/git/ceph/qa/tasks/vstart_runner.py tasks.mgr.dashboard.test_health + python ~/git/ceph/qa/tasks/vstart_runner.py tasks.mgr.dashboard.test_rgw + +""" + +from io import BytesIO +from io import StringIO +from collections import defaultdict +import getpass +import signal +import tempfile +import threading +import datetime +import shutil +import re +import os +import time +import sys +import errno +from unittest import suite, loader +import unittest +import platform +from teuthology import misc +from teuthology.orchestra.run import Raw, quote +from teuthology.orchestra.daemon import DaemonGroup +from teuthology.config import config as teuth_config +import six +import logging +try: + import urllib3 + urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) +except: + pass + +log = logging.getLogger(__name__) + +handler = logging.FileHandler("./vstart_runner.log") +formatter = logging.Formatter( + fmt=u'%(asctime)s.%(msecs)03d %(levelname)s:%(name)s:%(message)s', + datefmt='%Y-%m-%dT%H:%M:%S') +handler.setFormatter(formatter) +log.addHandler(handler) +log.setLevel(logging.INFO) + + +def respawn_in_path(lib_path, python_paths): + execv_cmd = ['python'] + if platform.system() == "Darwin": + lib_path_var = "DYLD_LIBRARY_PATH" + else: + lib_path_var = "LD_LIBRARY_PATH" + + py_binary = os.environ.get("PYTHON", "python") + + if lib_path_var in os.environ: + if lib_path not in os.environ[lib_path_var]: + os.environ[lib_path_var] += ':' + lib_path + os.execvp(py_binary, execv_cmd + sys.argv) + else: + os.environ[lib_path_var] = lib_path + os.execvp(py_binary, execv_cmd + sys.argv) + + for p in python_paths: + sys.path.insert(0, p) + + +# Let's use some sensible defaults +if os.path.exists("./CMakeCache.txt") and os.path.exists("./bin"): + + # A list of candidate paths for each package we need + guesses = [ + ["~/git/teuthology", "~/scm/teuthology", "~/teuthology"], + ["lib/cython_modules/lib.2"], + ["../src/pybind"], + ] + + python_paths = [] + + # Up one level so that "tasks.foo.bar" imports work + python_paths.append(os.path.abspath( + os.path.join(os.path.dirname(os.path.realpath(__file__)), "..") + )) + + for package_guesses in guesses: + for g in package_guesses: + g_exp = os.path.abspath(os.path.expanduser(g)) + if os.path.exists(g_exp): + python_paths.append(g_exp) + + ld_path = os.path.join(os.getcwd(), "lib/") + print("Using guessed paths {0} {1}".format(ld_path, python_paths)) + respawn_in_path(ld_path, python_paths) + + +try: + from teuthology.exceptions import CommandFailedError + from tasks.ceph_manager import CephManager + from tasks.cephfs.fuse_mount import FuseMount + from tasks.cephfs.filesystem import Filesystem, MDSCluster, CephCluster + from tasks.mgr.mgr_test_case import MgrCluster + from teuthology.contextutil import MaxWhileTries + from teuthology.task import interactive +except ImportError: + sys.stderr.write("***\nError importing packages, have you activated your teuthology virtualenv " + "and set PYTHONPATH to point to teuthology and ceph-qa-suite?\n***\n\n") + raise + +# Must import after teuthology because of gevent monkey patching +import subprocess + +if os.path.exists("./CMakeCache.txt"): + # Running in build dir of a cmake build + BIN_PREFIX = "./bin/" + SRC_PREFIX = "../src" +else: + # Running in src/ of an autotools build + BIN_PREFIX = "./" + SRC_PREFIX = "./" + + +class LocalRemoteProcess(object): + def __init__(self, args, subproc, check_status, stdout, stderr): + self.args = args + self.subproc = subproc + self.stdout = stdout + self.stderr = stderr + # this variable is meant for instance of this class named fuse_daemon. + # child process of the command launched with sudo must be killed, + # since killing parent process alone has no impact on the child + # process. + self.fuse_pid = -1 + + self.check_status = check_status + self.exitstatus = self.returncode = None + + def wait(self): + if self.finished: + # Avoid calling communicate() on a dead process because it'll + # give you stick about std* already being closed + if self.check_status and self.exitstatus != 0: + raise CommandFailedError(self.args, self.exitstatus) + else: + return + + out, err = self.subproc.communicate() + if isinstance(self.stdout, StringIO): + self.stdout.write(out.decode(errors='ignore')) + elif self.stdout is None: + pass + else: + self.stdout.write(out) + if isinstance(self.stderr, StringIO): + self.stderr.write(err.decode(errors='ignore')) + elif self.stderr is None: + pass + else: + self.stderr.write(err) + + self.exitstatus = self.returncode = self.subproc.returncode + + if self.exitstatus != 0: + sys.stderr.write(six.ensure_str(out)) + sys.stderr.write(six.ensure_str(err)) + + if self.check_status and self.exitstatus != 0: + raise CommandFailedError(self.args, self.exitstatus) + + @property + def finished(self): + if self.exitstatus is not None: + return True + + if self.subproc.poll() is not None: + out, err = self.subproc.communicate() + if isinstance(self.stdout, StringIO): + self.stdout.write(out.decode(errors='ignore')) + elif self.stdout is None: + pass + else: + self.stdout.write(out) + if isinstance(self.stderr, StringIO): + self.stderr.write(err.decode(errors='ignore')) + elif self.stderr is None: + pass + else: + self.stderr.write(err) + self.exitstatus = self.returncode = self.subproc.returncode + return True + else: + return False + + def kill(self): + log.debug("kill ") + if self.subproc.pid and not self.finished: + log.debug("kill: killing pid {0} ({1})".format( + self.subproc.pid, self.args)) + safe_kill(self.subproc.pid) + else: + log.debug("kill: already terminated ({0})".format(self.args)) + + @property + def stdin(self): + class FakeStdIn(object): + def __init__(self, mount_daemon): + self.mount_daemon = mount_daemon + + def close(self): + self.mount_daemon.kill() + + return FakeStdIn(self) + + +class LocalRemote(object): + """ + Amusingly named class to present the teuthology RemoteProcess interface when we are really + running things locally for vstart + + Run this inside your src/ dir! + """ + + def __init__(self): + self.name = "local" + self.hostname = "localhost" + self.user = getpass.getuser() + + def get_file(self, path, sudo, dest_dir): + tmpfile = tempfile.NamedTemporaryFile(delete=False).name + shutil.copy(path, tmpfile) + return tmpfile + + def put_file(self, src, dst, sudo=False): + shutil.copy(src, dst) + + def run(self, args, check_status=True, wait=True, + stdout=None, stderr=None, cwd=None, stdin=None, + logger=None, label=None, env=None, timeout=None, omit_sudo=True): + try: + if args[args.index('sudo') + 1] in ['-u', 'passwd', 'chown']: + omit_sudo = False + except ValueError: + pass + + # We don't need no stinkin' sudo + if omit_sudo: + args = [a for a in args if a != "sudo"] + + # We have to use shell=True if any run.Raw was present, e.g. && + shell = any([a for a in args if isinstance(a, Raw)]) + + # Filter out helper tools that don't exist in a vstart environment + args = [a for a in args if a not in ( + 'adjust-ulimits', 'ceph-coverage', 'timeout')] + + # Adjust binary path prefix if given a bare program name + if "/" not in args[0]: + # If they asked for a bare binary name, and it exists + # in our built tree, use the one there. + local_bin = os.path.join(BIN_PREFIX, args[0]) + if os.path.exists(local_bin): + args = [local_bin] + args[1:] + else: + log.debug("'{0}' is not a binary in the Ceph build dir".format( + args[0] + )) + + log.debug("Running {0}".format(args)) + + if shell: + subproc = subprocess.Popen(quote(args), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + stdin=subprocess.PIPE, + cwd=cwd, + shell=True) + else: + # Sanity check that we've got a list of strings + for arg in args: + if not isinstance(arg, six.string_types): + raise RuntimeError("Oops, can't handle arg {0} type {1}".format( + arg, arg.__class__ + )) + + subproc = subprocess.Popen(args, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + stdin=subprocess.PIPE, + cwd=cwd, + env=env) + + if stdin: + # Hack: writing to stdin is not deadlock-safe, but it "always" works + # as long as the input buffer is "small" + if isinstance(stdin, str): + subproc.stdin.write(stdin.encode()) + else: + subproc.stdin.write(stdin) + + proc = LocalRemoteProcess( + args, subproc, check_status, + stdout, stderr + ) + + if wait: + proc.wait() + + return proc + + # XXX: for compatibility keep this method same teuthology.orchestra.remote.sh + def sh(self, script, **kwargs): + """ + Shortcut for run method. + + Usage: + my_name = remote.sh('whoami') + remote_date = remote.sh('date') + """ + if 'stdout' not in kwargs: + kwargs['stdout'] = StringIO() + if 'args' not in kwargs: + kwargs['args'] = script + proc = self.run(**kwargs) + return proc.stdout.getvalue() + + +class LocalDaemon(object): + def __init__(self, daemon_type, daemon_id): + self.daemon_type = daemon_type + self.daemon_id = daemon_id + self.controller = LocalRemote() + self.proc = None + + @property + def remote(self): + return LocalRemote() + + def running(self): + return self._get_pid() is not None + + def check_status(self): + if self.proc: + return self.proc.poll() + + def _get_pid(self): + """ + Return PID as an integer or None if not found + """ + ps_txt = self.controller.run(args=["ps", "ww", "-u"+str(os.getuid())], + stdout=StringIO()).\ + stdout.getvalue().strip() + lines = ps_txt.split("\n")[1:] + + for line in lines: + if line.find("ceph-{0} -i {1}".format(self.daemon_type, self.daemon_id)) != -1: + log.debug("Found ps line for daemon: {0}".format(line)) + return int(line.split()[0]) + log.debug("No match for {0} {1}: {2}".format( + self.daemon_type, self.daemon_id, ps_txt + )) + return None + + def wait(self, timeout): + waited = 0 + while self._get_pid() is not None: + if waited > timeout: + raise MaxWhileTries("Timed out waiting for daemon {0}.{1}".format(self.daemon_type, self.daemon_id)) + time.sleep(1) + waited += 1 + + def stop(self, timeout=300): + if not self.running(): + log.error('tried to stop a non-running daemon') + return + + pid = self._get_pid() + log.debug("Killing PID {0} for {1}.{2}".format(pid, self.daemon_type, self.daemon_id)) + os.kill(pid, signal.SIGTERM) + + waited = 0 + while pid is not None: + new_pid = self._get_pid() + if new_pid is not None and new_pid != pid: + log.debug("Killing new PID {0}".format(new_pid)) + pid = new_pid + os.kill(pid, signal.SIGTERM) + + if new_pid is None: + break + else: + if waited > timeout: + raise MaxWhileTries( + "Timed out waiting for daemon {0}.{1}".format( + self.daemon_type, self.daemon_id)) + time.sleep(1) + waited += 1 + + self.wait(timeout=timeout) + + def restart(self): + if self._get_pid() is not None: + self.stop() + + self.proc = self.controller.run([os.path.join(BIN_PREFIX, "./ceph-{0}".format(self.daemon_type)), "-i", self.daemon_id]) + + def signal(self, sig, silent=False): + if not self.running(): + raise RuntimeError("Can't send signal to non-running daemon") + + os.kill(self._get_pid(), sig) + if not silent: + log.debug("Sent signal {0} to {1}.{2}".format(sig, self.daemon_type, self.daemon_id)) + + +def safe_kill(pid): + """ + os.kill annoyingly raises exception if process already dead. Ignore it. + """ + try: + return os.kill(pid, signal.SIGKILL) + except OSError as e: + if e.errno == errno.ESRCH: + # Raced with process termination + pass + else: + raise + + +class LocalFuseMount(FuseMount): + def __init__(self, ctx, test_dir, client_id): + super(LocalFuseMount, self).__init__(ctx, None, test_dir, client_id, LocalRemote()) + + @property + def config_path(self): + return "./ceph.conf" + + def get_keyring_path(self): + # This is going to end up in a config file, so use an absolute path + # to avoid assumptions about daemons' pwd + return os.path.abspath("./client.{0}.keyring".format(self.client_id)) + + def run_shell(self, args, wait=True, check_status=True, omit_sudo=True): + # FIXME maybe should add a pwd arg to teuthology.orchestra so that + # the "cd foo && bar" shenanigans isn't needed to begin with and + # then we wouldn't have to special case this + return self.client_remote.run(args, wait=wait, cwd=self.mountpoint, + check_status=check_status, + omit_sudo=omit_sudo) + + def setupfs(self, name=None): + if name is None and self.fs is not None: + # Previous mount existed, reuse the old name + name = self.fs.name + self.fs = LocalFilesystem(self.ctx, name=name) + log.debug('Wait for MDS to reach steady state...') + self.fs.wait_for_daemons() + log.debug('Ready to start {}...'.format(type(self).__name__)) + + @property + def _prefix(self): + return BIN_PREFIX + + def _asok_path(self): + # In teuthology, the asok is named after the PID of the ceph-fuse process, because it's + # run foreground. When running it daemonized however, the asok is named after + # the PID of the launching process, not the long running ceph-fuse process. Therefore + # we need to give an exact path here as the logic for checking /proc/ for which + # asok is alive does not work. + + # Load the asok path from ceph.conf as vstart.sh now puts admin sockets + # in a tmpdir. All of the paths are the same, so no need to select + # based off of the service type. + d = "./out" + with open(self.config_path) as f: + for line in f: + asok_conf = re.search("^\s*admin\s+socket\s*=\s*(.*?)[^/]+$", line) + if asok_conf: + d = asok_conf.groups(1)[0] + break + path = "{0}/client.{1}.{2}.asok".format(d, self.client_id, self.fuse_daemon.subproc.pid) + log.info("I think my launching pid was {0}".format(self.fuse_daemon.subproc.pid)) + return path + + def mount(self, mount_path=None, mount_fs_name=None, mountpoint=None): + if mountpoint is not None: + self.mountpoint = mountpoint + self.setupfs(name=mount_fs_name) + + self.client_remote.run(args=['mkdir', '-p', self.mountpoint]) + + def list_connections(): + self.client_remote.run( + args=["mount", "-t", "fusectl", "/sys/fs/fuse/connections", "/sys/fs/fuse/connections"], + check_status=False + ) + + p = self.client_remote.run(args=["ls", "/sys/fs/fuse/connections"], + check_status=False, stdout=StringIO()) + if p.exitstatus != 0: + log.warning("ls conns failed with {0}, assuming none".format(p.exitstatus)) + return [] + + ls_str = p.stdout.getvalue().strip() + if ls_str: + return [int(n) for n in ls_str.split("\n")] + else: + return [] + + # Before starting ceph-fuse process, note the contents of + # /sys/fs/fuse/connections + pre_mount_conns = list_connections() + log.debug("Pre-mount connections: {0}".format(pre_mount_conns)) + + prefix = [os.path.join(BIN_PREFIX, "ceph-fuse")] + if os.getuid() != 0: + prefix += ["--client_die_on_failed_dentry_invalidate=false"] + + if mount_path is not None: + prefix += ["--client_mountpoint={0}".format(mount_path)] + + if mount_fs_name is not None: + prefix += ["--client_mds_namespace={0}".format(mount_fs_name)] + + self.fuse_daemon = self.client_remote.run(args= + prefix + [ + "-f", + "--name", + "client.{0}".format(self.client_id), + self.mountpoint + ], wait=False) + + log.debug("Mounting client.{0} with pid {1}".format(self.client_id, self.fuse_daemon.subproc.pid)) + + # Wait for the connection reference to appear in /sys + waited = 0 + post_mount_conns = list_connections() + while len(post_mount_conns) <= len(pre_mount_conns): + if self.fuse_daemon.finished: + # Did mount fail? Raise the CommandFailedError instead of + # hitting the "failed to populate /sys/" timeout + self.fuse_daemon.wait() + time.sleep(1) + waited += 1 + if waited > 30: + raise RuntimeError("Fuse mount failed to populate /sys/ after {0} seconds".format( + waited + )) + post_mount_conns = list_connections() + + log.debug("Post-mount connections: {0}".format(post_mount_conns)) + + # Record our fuse connection number so that we can use it when + # forcing an unmount + new_conns = list(set(post_mount_conns) - set(pre_mount_conns)) + if len(new_conns) == 0: + raise RuntimeError("New fuse connection directory not found ({0})".format(new_conns)) + elif len(new_conns) > 1: + raise RuntimeError("Unexpectedly numerous fuse connections {0}".format(new_conns)) + else: + self._fuse_conn = new_conns[0] + + self.gather_mount_info() + + self.mounted = True + + def _run_python(self, pyscript, py_version='python'): + """ + Override this to remove the daemon-helper prefix that is used otherwise + to make the process killable. + """ + return self.client_remote.run(args=[py_version, '-c', pyscript], + wait=False, stdout=StringIO()) + +class LocalCephManager(CephManager): + def __init__(self): + # Deliberately skip parent init, only inheriting from it to get + # util methods like osd_dump that sit on top of raw_cluster_cmd + self.controller = LocalRemote() + + # A minority of CephManager fns actually bother locking for when + # certain teuthology tests want to run tasks in parallel + self.lock = threading.RLock() + + self.log = lambda x: log.debug(x) + + # Don't bother constructing a map of pools: it should be empty + # at test cluster start, and in any case it would be out of date + # in no time. The attribute needs to exist for some of the CephManager + # methods to work though. + self.pools = {} + + def find_remote(self, daemon_type, daemon_id): + """ + daemon_type like 'mds', 'osd' + daemon_id like 'a', '0' + """ + return LocalRemote() + + def run_ceph_w(self, watch_channel=None): + """ + :param watch_channel: Specifies the channel to be watched. + This can be 'cluster', 'audit', ... + :type watch_channel: str + """ + args = [os.path.join(BIN_PREFIX, "ceph"), "-w"] + if watch_channel is not None: + args.append("--watch-channel") + args.append(watch_channel) + proc = self.controller.run(args=args, wait=False, stdout=StringIO()) + return proc + + def raw_cluster_cmd(self, *args, **kwargs): + """ + args like ["osd", "dump"} + return stdout string + """ + proc = self.controller.run(args=[os.path.join(BIN_PREFIX, "ceph")] +\ + list(args), **kwargs, stdout=StringIO()) + return proc.stdout.getvalue() + + def raw_cluster_cmd_result(self, *args, **kwargs): + """ + like raw_cluster_cmd but don't check status, just return rc + """ + kwargs['check_status'] = False + proc = self.controller.run([os.path.join(BIN_PREFIX, "ceph")] + list(args), **kwargs) + return proc.exitstatus + + def admin_socket(self, daemon_type, daemon_id, command, check_status=True, + timeout=None, stdout=None): + if stdout is None: + stdout = StringIO() + + return self.controller.run( + args=[os.path.join(BIN_PREFIX, "ceph"), "daemon", + "{0}.{1}".format(daemon_type, daemon_id)] + command, + check_status=check_status, timeout=timeout, stdout=stdout) + + +class LocalCephCluster(CephCluster): + def __init__(self, ctx): + # Deliberately skip calling parent constructor + self._ctx = ctx + self.mon_manager = LocalCephManager() + self._conf = defaultdict(dict) + + @property + def admin_remote(self): + return LocalRemote() + + def get_config(self, key, service_type=None): + if service_type is None: + service_type = 'mon' + + # FIXME hardcoded vstart service IDs + service_id = { + 'mon': 'a', + 'mds': 'a', + 'osd': '0' + }[service_type] + + return self.json_asok(['config', 'get', key], service_type, service_id)[key] + + def _write_conf(self): + # In teuthology, we have the honour of writing the entire ceph.conf, but + # in vstart land it has mostly already been written and we need to carefully + # append to it. + conf_path = "./ceph.conf" + banner = "\n#LOCAL_TEST\n" + existing_str = open(conf_path).read() + + if banner in existing_str: + existing_str = existing_str[0:existing_str.find(banner)] + + existing_str += banner + + for subsys, kvs in self._conf.items(): + existing_str += "\n[{0}]\n".format(subsys) + for key, val in kvs.items(): + # Comment out existing instance if it exists + log.debug("Searching for existing instance {0}/{1}".format( + key, subsys + )) + existing_section = re.search("^\[{0}\]$([\n]|[^\[])+".format( + subsys + ), existing_str, re.MULTILINE) + + if existing_section: + section_str = existing_str[existing_section.start():existing_section.end()] + existing_val = re.search("^\s*[^#]({0}) =".format(key), section_str, re.MULTILINE) + if existing_val: + start = existing_section.start() + existing_val.start(1) + log.debug("Found string to replace at {0}".format( + start + )) + existing_str = existing_str[0:start] + "#" + existing_str[start:] + + existing_str += "{0} = {1}\n".format(key, val) + + open(conf_path, "w").write(existing_str) + + def set_ceph_conf(self, subsys, key, value): + self._conf[subsys][key] = value + self._write_conf() + + def clear_ceph_conf(self, subsys, key): + del self._conf[subsys][key] + self._write_conf() + + +class LocalMDSCluster(LocalCephCluster, MDSCluster): + def __init__(self, ctx): + super(LocalMDSCluster, self).__init__(ctx) + + self.mds_ids = ctx.daemons.daemons['ceph.mds'].keys() + self.mds_daemons = dict([(id_, LocalDaemon("mds", id_)) for id_ in self.mds_ids]) + + def clear_firewall(self): + # FIXME: unimplemented + pass + + def newfs(self, name='cephfs', create=True): + return LocalFilesystem(self._ctx, name=name, create=create) + + +class LocalMgrCluster(LocalCephCluster, MgrCluster): + def __init__(self, ctx): + super(LocalMgrCluster, self).__init__(ctx) + + self.mgr_ids = ctx.daemons.daemons['ceph.mgr'].keys() + self.mgr_daemons = dict([(id_, LocalDaemon("mgr", id_)) for id_ in self.mgr_ids]) + + +class LocalFilesystem(Filesystem, LocalMDSCluster): + def __init__(self, ctx, fscid=None, name='cephfs', create=False): + # Deliberately skip calling parent constructor + self._ctx = ctx + + self.id = None + self.name = None + self.ec_profile = None + self.metadata_pool_name = None + self.metadata_overlay = False + self.data_pool_name = None + self.data_pools = None + self.fs_config = None + + # Hack: cheeky inspection of ceph.conf to see what MDSs exist + self.mds_ids = set() + for line in open("ceph.conf").readlines(): + match = re.match("^\[mds\.(.+)\]$", line) + if match: + self.mds_ids.add(match.group(1)) + + if not self.mds_ids: + raise RuntimeError("No MDSs found in ceph.conf!") + + self.mds_ids = list(self.mds_ids) + + log.debug("Discovered MDS IDs: {0}".format(self.mds_ids)) + + self.mon_manager = LocalCephManager() + + self.mds_daemons = dict([(id_, LocalDaemon("mds", id_)) for id_ in self.mds_ids]) + + self.client_remote = LocalRemote() + + self._conf = defaultdict(dict) + + if name is not None: + if fscid is not None: + raise RuntimeError("cannot specify fscid when creating fs") + if create and not self.legacy_configured(): + self.create() + else: + if fscid is not None: + self.id = fscid + self.getinfo(refresh=True) + + # Stash a reference to the first created filesystem on ctx, so + # that if someone drops to the interactive shell they can easily + # poke our methods. + if not hasattr(self._ctx, "filesystem"): + self._ctx.filesystem = self + + @property + def _prefix(self): + return BIN_PREFIX + + def set_clients_block(self, blocked, mds_id=None): + raise NotImplementedError() + + +class InteractiveFailureResult(unittest.TextTestResult): + """ + Specialization that implements interactive-on-error style + behavior. + """ + def addFailure(self, test, err): + super(InteractiveFailureResult, self).addFailure(test, err) + log.error(self._exc_info_to_string(err, test)) + log.error("Failure in test '{0}', going interactive".format( + self.getDescription(test) + )) + interactive.task(ctx=None, config=None) + + def addError(self, test, err): + super(InteractiveFailureResult, self).addError(test, err) + log.error(self._exc_info_to_string(err, test)) + log.error("Error in test '{0}', going interactive".format( + self.getDescription(test) + )) + interactive.task(ctx=None, config=None) + + +def enumerate_methods(s): + log.debug("e: {0}".format(s)) + for t in s._tests: + if isinstance(t, suite.BaseTestSuite): + for sub in enumerate_methods(t): + yield sub + else: + yield s, t + + +def load_tests(modules, loader): + if modules: + log.debug("Executing modules: {0}".format(modules)) + module_suites = [] + for mod_name in modules: + # Test names like cephfs.test_auto_repair + module_suites.append(loader.loadTestsFromName(mod_name)) + log.debug("Loaded: {0}".format(list(module_suites))) + return suite.TestSuite(module_suites) + else: + log.debug("Executing all cephfs tests") + return loader.discover( + os.path.join(os.path.dirname(os.path.abspath(__file__)), "cephfs") + ) + + +def scan_tests(modules): + overall_suite = load_tests(modules, loader.TestLoader()) + + max_required_mds = 0 + max_required_clients = 0 + max_required_mgr = 0 + require_memstore = False + + for suite_, case in enumerate_methods(overall_suite): + max_required_mds = max(max_required_mds, + getattr(case, "MDSS_REQUIRED", 0)) + max_required_clients = max(max_required_clients, + getattr(case, "CLIENTS_REQUIRED", 0)) + max_required_mgr = max(max_required_mgr, + getattr(case, "MGRS_REQUIRED", 0)) + require_memstore = getattr(case, "REQUIRE_MEMSTORE", False) \ + or require_memstore + + return max_required_mds, max_required_clients, \ + max_required_mgr, require_memstore + + +class LocalCluster(object): + def __init__(self, rolename="placeholder"): + self.remotes = { + LocalRemote(): [rolename] + } + + def only(self, requested): + return self.__class__(rolename=requested) + + +class LocalContext(object): + def __init__(self): + self.config = {} + self.teuthology_config = teuth_config + self.cluster = LocalCluster() + self.daemons = DaemonGroup() + + # Shove some LocalDaemons into the ctx.daemons DaemonGroup instance so that any + # tests that want to look these up via ctx can do so. + # Inspect ceph.conf to see what roles exist + for conf_line in open("ceph.conf").readlines(): + for svc_type in ["mon", "osd", "mds", "mgr"]: + prefixed_type = "ceph." + svc_type + if prefixed_type not in self.daemons.daemons: + self.daemons.daemons[prefixed_type] = {} + match = re.match("^\[{0}\.(.+)\]$".format(svc_type), conf_line) + if match: + svc_id = match.group(1) + self.daemons.daemons[prefixed_type][svc_id] = LocalDaemon(svc_type, svc_id) + + def __del__(self): + shutil.rmtree(self.teuthology_config['test_path']) + +def exec_test(): + # Parse arguments + interactive_on_error = False + create_cluster = False + create_cluster_only = False + ignore_missing_binaries = False + opt_verbose = True + + args = sys.argv[1:] + flags = [a for a in args if a.startswith("-")] + modules = [a for a in args if not a.startswith("-")] + for f in flags: + if f == "--interactive": + interactive_on_error = True + elif f == "--create": + create_cluster = True + elif f == "--create-cluster-only": + create_cluster_only = True + elif f == "--ignore-missing-binaries": + ignore_missing_binaries = True + elif '--no-verbose' == f: + opt_verbose = False + else: + log.error("Unknown option '{0}'".format(f)) + sys.exit(-1) + + # Help developers by stopping up-front if their tree isn't built enough for all the + # tools that the tests might want to use (add more here if needed) + require_binaries = ["ceph-dencoder", "cephfs-journal-tool", "cephfs-data-scan", + "cephfs-table-tool", "ceph-fuse", "rados"] + missing_binaries = [b for b in require_binaries if not os.path.exists(os.path.join(BIN_PREFIX, b))] + if missing_binaries and not ignore_missing_binaries: + log.error("Some ceph binaries missing, please build them: {0}".format(" ".join(missing_binaries))) + sys.exit(-1) + + max_required_mds, max_required_clients, \ + max_required_mgr, require_memstore = scan_tests(modules) + + remote = LocalRemote() + + # Tolerate no MDSs or clients running at start + ps_txt = remote.run(args=["ps", "-u"+str(os.getuid())], + stdout=StringIO()).stdout.getvalue().strip() + lines = ps_txt.split("\n")[1:] + for line in lines: + if 'ceph-fuse' in line or 'ceph-mds' in line: + pid = int(line.split()[0]) + log.warning("Killing stray process {0}".format(line)) + os.kill(pid, signal.SIGKILL) + + # Fire up the Ceph cluster if the user requested it + if create_cluster or create_cluster_only: + log.info("Creating cluster with {0} MDS daemons".format( + max_required_mds)) + remote.run([os.path.join(SRC_PREFIX, "stop.sh")], check_status=False) + remote.run(["rm", "-rf", "./out"]) + remote.run(["rm", "-rf", "./dev"]) + vstart_env = os.environ.copy() + vstart_env["FS"] = "0" + vstart_env["MDS"] = max_required_mds.__str__() + vstart_env["OSD"] = "4" + vstart_env["MGR"] = max(max_required_mgr, 1).__str__() + + args = [ + os.path.join(SRC_PREFIX, "vstart.sh"), + "-n", + "--nolockdep", + ] + if require_memstore: + args.append("--memstore") + + if opt_verbose: + args.append("-d") + + remote.run(args, env=vstart_env) + + # Wait for OSD to come up so that subsequent injectargs etc will + # definitely succeed + LocalCephCluster(LocalContext()).mon_manager.wait_for_all_osds_up(timeout=30) + + if create_cluster_only: + return + + # List of client mounts, sufficient to run the selected tests + clients = [i.__str__() for i in range(0, max_required_clients)] + + test_dir = tempfile.mkdtemp() + teuth_config['test_path'] = test_dir + + ctx = LocalContext() + ceph_cluster = LocalCephCluster(ctx) + mds_cluster = LocalMDSCluster(ctx) + mgr_cluster = LocalMgrCluster(ctx) + + # Construct Mount classes + mounts = [] + for client_id in clients: + # Populate client keyring (it sucks to use client.admin for test clients + # because it's awkward to find the logs later) + client_name = "client.{0}".format(client_id) + + if client_name not in open("./keyring").read(): + p = remote.run(args=[os.path.join(BIN_PREFIX, "ceph"), "auth", "get-or-create", client_name, + "osd", "allow rw", + "mds", "allow", + "mon", "allow r"], stdout=StringIO()) + + open("./keyring", "at").write(p.stdout.getvalue()) + + mount = LocalFuseMount(ctx, test_dir, client_id) + mounts.append(mount) + if mount.is_mounted(): + log.warning("unmounting {0}".format(mount.mountpoint)) + mount.umount_wait() + else: + if os.path.exists(mount.mountpoint): + os.rmdir(mount.mountpoint) + + from tasks.cephfs_test_runner import DecoratingLoader + + class LogStream(object): + def __init__(self): + self.buffer = "" + + def write(self, data): + self.buffer += data + if "\n" in self.buffer: + lines = self.buffer.split("\n") + for line in lines[:-1]: + pass + # sys.stderr.write(line + "\n") + log.info(line) + self.buffer = lines[-1] + + def flush(self): + pass + + decorating_loader = DecoratingLoader({ + "ctx": ctx, + "mounts": mounts, + "ceph_cluster": ceph_cluster, + "mds_cluster": mds_cluster, + "mgr_cluster": mgr_cluster, + }) + + # For the benefit of polling tests like test_full -- in teuthology land we set this + # in a .yaml, here it's just a hardcoded thing for the developer's pleasure. + remote.run(args=[os.path.join(BIN_PREFIX, "ceph"), "tell", "osd.*", "injectargs", "--osd-mon-report-interval", "5"]) + ceph_cluster.set_ceph_conf("osd", "osd_mon_report_interval", "5") + + # Vstart defaults to two segments, which very easily gets a "behind on trimming" health warning + # from normal IO latency. Increase it for running teests. + ceph_cluster.set_ceph_conf("mds", "mds log max segments", "10") + + # Make sure the filesystem created in tests has uid/gid that will let us talk to + # it after mounting it (without having to go root). Set in 'global' not just 'mds' + # so that cephfs-data-scan will pick it up too. + ceph_cluster.set_ceph_conf("global", "mds root ino uid", "%s" % os.getuid()) + ceph_cluster.set_ceph_conf("global", "mds root ino gid", "%s" % os.getgid()) + + # Monkeypatch get_package_version to avoid having to work out what kind of distro we're on + def _get_package_version(remote, pkg_name): + # Used in cephfs tests to find fuse version. Your development workstation *does* have >=2.9, right? + return "2.9" + + import teuthology.packaging + teuthology.packaging.get_package_version = _get_package_version + + overall_suite = load_tests(modules, decorating_loader) + + # Filter out tests that don't lend themselves to interactive running, + victims = [] + for case, method in enumerate_methods(overall_suite): + fn = getattr(method, method._testMethodName) + + drop_test = False + + if hasattr(fn, 'is_for_teuthology') and getattr(fn, 'is_for_teuthology') is True: + drop_test = True + log.warning("Dropping test because long running: ".format(method.id())) + + if getattr(fn, "needs_trimming", False) is True: + drop_test = (os.getuid() != 0) + log.warning("Dropping test because client trim unavailable: ".format(method.id())) + + if drop_test: + # Don't drop the test if it was explicitly requested in arguments + is_named = False + for named in modules: + if named.endswith(method.id()): + is_named = True + break + + if not is_named: + victims.append((case, method)) + + log.debug("Disabling {0} tests because of is_for_teuthology or needs_trimming".format(len(victims))) + for s, method in victims: + s._tests.remove(method) + + if interactive_on_error: + result_class = InteractiveFailureResult + else: + result_class = unittest.TextTestResult + fail_on_skip = False + + class LoggingResult(result_class): + def startTest(self, test): + log.info("Starting test: {0}".format(self.getDescription(test))) + test.started_at = datetime.datetime.utcnow() + return super(LoggingResult, self).startTest(test) + + def stopTest(self, test): + log.info("Stopped test: {0} in {1}s".format( + self.getDescription(test), + (datetime.datetime.utcnow() - test.started_at).total_seconds() + )) + + def addSkip(self, test, reason): + if fail_on_skip: + # Don't just call addFailure because that requires a traceback + self.failures.append((test, reason)) + else: + super(LoggingResult, self).addSkip(test, reason) + + # Execute! + result = unittest.TextTestRunner( + stream=LogStream(), + resultclass=LoggingResult, + verbosity=2, + failfast=True).run(overall_suite) + + if not result.wasSuccessful(): + result.printErrors() # duplicate output at end for convenience + + bad_tests = [] + for test, error in result.errors: + bad_tests.append(str(test)) + for test, failure in result.failures: + bad_tests.append(str(test)) + + sys.exit(-1) + else: + sys.exit(0) + + +if __name__ == "__main__": + exec_test() diff --git a/qa/tasks/watch_notify_same_primary.py b/qa/tasks/watch_notify_same_primary.py new file mode 100644 index 00000000..7c034961 --- /dev/null +++ b/qa/tasks/watch_notify_same_primary.py @@ -0,0 +1,130 @@ + +""" +watch_notify_same_primary task +""" +from six import StringIO +import contextlib +import logging + +import six + +from teuthology.orchestra import run +from teuthology.contextutil import safe_while + +log = logging.getLogger(__name__) + + +@contextlib.contextmanager +def task(ctx, config): + """ + Run watch_notify_same_primary + + The config should be as follows: + + watch_notify_same_primary: + clients: [client list] + + The client list should contain 1 client + + The test requires 3 osds. + + example: + + tasks: + - ceph: + - watch_notify_same_primary: + clients: [client.0] + - interactive: + """ + log.info('Beginning watch_notify_same_primary...') + assert isinstance(config, dict), \ + "please list clients to run on" + + clients = config.get('clients', ['client.0']) + assert len(clients) == 1 + role = clients[0] + assert isinstance(role, six.string_types) + PREFIX = 'client.' + assert role.startswith(PREFIX) + (remote,) = ctx.cluster.only(role).remotes.keys() + manager = ctx.managers['ceph'] + manager.raw_cluster_cmd('osd', 'set', 'noout') + + pool = manager.create_pool_with_unique_name() + def obj(n): return "foo-{num}".format(num=n) + def start_watch(n): + remote.run( + args = [ + "rados", + "-p", pool, + "put", + obj(n), + "/etc/resolv.conf"], + logger=log.getChild('watch.{id}'.format(id=n))) + proc = remote.run( + args = [ + "rados", + "-p", pool, + "watch", + obj(n)], + stdin=run.PIPE, + stdout=StringIO(), + stderr=StringIO(), + wait=False) + return proc + + num = 20 + + watches = [start_watch(i) for i in range(num)] + + # wait for them all to register + for i in range(num): + with safe_while() as proceed: + while proceed(): + lines = remote.sh( + ["rados", "-p", pool, "listwatchers", obj(i)]) + num_watchers = lines.count('watcher=') + log.info('i see %d watchers for %s', num_watchers, obj(i)) + if num_watchers >= 1: + break + + def notify(n, msg): + remote.run( + args = [ + "rados", + "-p", pool, + "notify", + obj(n), + msg], + logger=log.getChild('notify.{id}'.format(id=n))) + + [notify(n, 'notify1') for n in range(len(watches))] + + manager.kill_osd(0) + manager.mark_down_osd(0) + + [notify(n, 'notify2') for n in range(len(watches))] + + try: + yield + finally: + log.info('joining watch_notify_stress') + for watch in watches: + watch.stdin.write("\n") + + run.wait(watches) + + for watch in watches: + lines = watch.stdout.getvalue().split("\n") + got1 = False + got2 = False + for l in lines: + if 'notify1' in l: + got1 = True + if 'notify2' in l: + got2 = True + log.info(lines) + assert got1 and got2 + + manager.revive_osd(0) + manager.remove_pool(pool) diff --git a/qa/tasks/watch_notify_stress.py b/qa/tasks/watch_notify_stress.py new file mode 100644 index 00000000..e5e38049 --- /dev/null +++ b/qa/tasks/watch_notify_stress.py @@ -0,0 +1,70 @@ +""" +test_stress_watch task +""" +import contextlib +import logging + +import six +from teuthology.orchestra import run +from teuthology.task import proc_thrasher + +log = logging.getLogger(__name__) + + +@contextlib.contextmanager +def task(ctx, config): + """ + Run test_stress_watch + + The config should be as follows: + + test_stress_watch: + clients: [client list] + + example: + + tasks: + - ceph: + - test_stress_watch: + clients: [client.0] + - interactive: + """ + log.info('Beginning test_stress_watch...') + assert isinstance(config, dict), \ + "please list clients to run on" + testwatch = {} + + remotes = [] + + for role in config.get('clients', ['client.0']): + assert isinstance(role, six.string_types) + PREFIX = 'client.' + assert role.startswith(PREFIX) + id_ = role[len(PREFIX):] + (remote,) = ctx.cluster.only(role).remotes.keys() + remotes.append(remote) + + args =['CEPH_CLIENT_ID={id_}'.format(id_=id_), + 'CEPH_ARGS="{flags}"'.format(flags=config.get('flags', '')), + 'daemon-helper', + 'kill', + 'multi_stress_watch foo foo' + ] + + log.info("args are %s" % (args,)) + + proc = proc_thrasher.ProcThrasher({}, remote, + args=[run.Raw(i) for i in args], + logger=log.getChild('testwatch.{id}'.format(id=id_)), + stdin=run.PIPE, + wait=False + ) + proc.start() + testwatch[id_] = proc + + try: + yield + finally: + log.info('joining watch_notify_stress') + for i in testwatch.values(): + i.join() diff --git a/qa/tasks/workunit.py b/qa/tasks/workunit.py new file mode 100644 index 00000000..5a767038 --- /dev/null +++ b/qa/tasks/workunit.py @@ -0,0 +1,423 @@ +""" +Workunit task -- Run ceph on sets of specific clients +""" +import logging +import pipes +import os +import re + +import six + +from tasks.util import get_remote_for_role +from tasks.util.workunit import get_refspec_after_overrides + +from teuthology import misc +from teuthology.config import config as teuth_config +from teuthology.orchestra.run import CommandFailedError +from teuthology.parallel import parallel +from teuthology.orchestra import run + +log = logging.getLogger(__name__) + +def task(ctx, config): + """ + Run ceph on all workunits found under the specified path. + + For example:: + + tasks: + - ceph: + - ceph-fuse: [client.0] + - workunit: + clients: + client.0: [direct_io, xattrs.sh] + client.1: [snaps] + branch: foo + + You can also run a list of workunits on all clients: + tasks: + - ceph: + - ceph-fuse: + - workunit: + tag: v0.47 + clients: + all: [direct_io, xattrs.sh, snaps] + + If you have an "all" section it will run all the workunits + on each client simultaneously, AFTER running any workunits specified + for individual clients. (This prevents unintended simultaneous runs.) + + To customize tests, you can specify environment variables as a dict. You + can also specify a time limit for each work unit (defaults to 3h): + + tasks: + - ceph: + - ceph-fuse: + - workunit: + sha1: 9b28948635b17165d17c1cf83d4a870bd138ddf6 + clients: + all: [snaps] + env: + FOO: bar + BAZ: quux + timeout: 3h + + This task supports roles that include a ceph cluster, e.g.:: + + tasks: + - ceph: + - workunit: + clients: + backup.client.0: [foo] + client.1: [bar] # cluster is implicitly 'ceph' + + You can also specify an alternative top-level dir to 'qa/workunits', like + 'qa/standalone', with:: + + tasks: + - install: + - workunit: + basedir: qa/standalone + clients: + client.0: + - test-ceph-helpers.sh + + :param ctx: Context + :param config: Configuration + """ + assert isinstance(config, dict) + assert isinstance(config.get('clients'), dict), \ + 'configuration must contain a dictionary of clients' + + overrides = ctx.config.get('overrides', {}) + refspec = get_refspec_after_overrides(config, overrides) + timeout = config.get('timeout', '3h') + cleanup = config.get('cleanup', True) + + log.info('Pulling workunits from ref %s', refspec) + + created_mountpoint = {} + + if config.get('env') is not None: + assert isinstance(config['env'], dict), 'env must be a dictionary' + clients = config['clients'] + + # Create scratch dirs for any non-all workunits + log.info('Making a separate scratch dir for every client...') + for role in clients.keys(): + assert isinstance(role, six.string_types) + if role == "all": + continue + + assert 'client' in role + created_mnt_dir = _make_scratch_dir(ctx, role, config.get('subdir')) + created_mountpoint[role] = created_mnt_dir + + # Execute any non-all workunits + log.info("timeout={}".format(timeout)) + log.info("cleanup={}".format(cleanup)) + with parallel() as p: + for role, tests in clients.items(): + if role != "all": + p.spawn(_run_tests, ctx, refspec, role, tests, + config.get('env'), + basedir=config.get('basedir','qa/workunits'), + timeout=timeout,cleanup=cleanup) + + if cleanup: + # Clean up dirs from any non-all workunits + for role, created in created_mountpoint.items(): + _delete_dir(ctx, role, created) + + # Execute any 'all' workunits + if 'all' in clients: + all_tasks = clients["all"] + _spawn_on_all_clients(ctx, refspec, all_tasks, config.get('env'), + config.get('basedir', 'qa/workunits'), + config.get('subdir'), timeout=timeout, + cleanup=cleanup) + + +def _client_mountpoint(ctx, cluster, id_): + """ + Returns the path to the expected mountpoint for workunits running + on some kind of filesystem. + """ + # for compatibility with tasks like ceph-fuse that aren't cluster-aware yet, + # only include the cluster name in the dir if the cluster is not 'ceph' + if cluster == 'ceph': + dir_ = 'mnt.{0}'.format(id_) + else: + dir_ = 'mnt.{0}.{1}'.format(cluster, id_) + return os.path.join(misc.get_testdir(ctx), dir_) + + +def _delete_dir(ctx, role, created_mountpoint): + """ + Delete file used by this role, and delete the directory that this + role appeared in. + + :param ctx: Context + :param role: "role.#" where # is used for the role id. + """ + cluster, _, id_ = misc.split_role(role) + remote = get_remote_for_role(ctx, role) + mnt = _client_mountpoint(ctx, cluster, id_) + client = os.path.join(mnt, 'client.{id}'.format(id=id_)) + + # Remove the directory inside the mount where the workunit ran + remote.run( + args=[ + 'sudo', + 'rm', + '-rf', + '--', + client, + ], + ) + log.info("Deleted dir {dir}".format(dir=client)) + + # If the mount was an artificially created dir, delete that too + if created_mountpoint: + remote.run( + args=[ + 'rmdir', + '--', + mnt, + ], + ) + log.info("Deleted artificial mount point {dir}".format(dir=client)) + + +def _make_scratch_dir(ctx, role, subdir): + """ + Make scratch directories for this role. This also makes the mount + point if that directory does not exist. + + :param ctx: Context + :param role: "role.#" where # is used for the role id. + :param subdir: use this subdir (False if not used) + """ + created_mountpoint = False + cluster, _, id_ = misc.split_role(role) + remote = get_remote_for_role(ctx, role) + dir_owner = remote.user + mnt = _client_mountpoint(ctx, cluster, id_) + # if neither kclient nor ceph-fuse are required for a workunit, + # mnt may not exist. Stat and create the directory if it doesn't. + try: + remote.run( + args=[ + 'stat', + '--', + mnt, + ], + ) + log.info('Did not need to create dir {dir}'.format(dir=mnt)) + except CommandFailedError: + remote.run( + args=[ + 'mkdir', + '--', + mnt, + ], + ) + log.info('Created dir {dir}'.format(dir=mnt)) + created_mountpoint = True + + if not subdir: + subdir = 'client.{id}'.format(id=id_) + + if created_mountpoint: + remote.run( + args=[ + 'cd', + '--', + mnt, + run.Raw('&&'), + 'mkdir', + '--', + subdir, + ], + ) + else: + remote.run( + args=[ + # cd first so this will fail if the mount point does + # not exist; pure install -d will silently do the + # wrong thing + 'cd', + '--', + mnt, + run.Raw('&&'), + 'sudo', + 'install', + '-d', + '-m', '0755', + '--owner={user}'.format(user=dir_owner), + '--', + subdir, + ], + ) + + return created_mountpoint + + +def _spawn_on_all_clients(ctx, refspec, tests, env, basedir, subdir, timeout=None, cleanup=True): + """ + Make a scratch directory for each client in the cluster, and then for each + test spawn _run_tests() for each role. + + See run_tests() for parameter documentation. + """ + is_client = misc.is_type('client') + client_remotes = {} + created_mountpoint = {} + for remote, roles_for_host in ctx.cluster.remotes.items(): + for role in roles_for_host: + if is_client(role): + client_remotes[role] = remote + created_mountpoint[role] = _make_scratch_dir(ctx, role, subdir) + + for unit in tests: + with parallel() as p: + for role, remote in client_remotes.items(): + p.spawn(_run_tests, ctx, refspec, role, [unit], env, + basedir, + subdir, + timeout=timeout) + + # cleanup the generated client directories + if cleanup: + for role, _ in client_remotes.items(): + _delete_dir(ctx, role, created_mountpoint[role]) + + +def _run_tests(ctx, refspec, role, tests, env, basedir, + subdir=None, timeout=None, cleanup=True): + """ + Run the individual test. Create a scratch directory and then extract the + workunits from git. Make the executables, and then run the tests. + Clean up (remove files created) after the tests are finished. + + :param ctx: Context + :param refspec: branch, sha1, or version tag used to identify this + build + :param tests: specific tests specified. + :param env: environment set in yaml file. Could be None. + :param subdir: subdirectory set in yaml file. Could be None + :param timeout: If present, use the 'timeout' command on the remote host + to limit execution time. Must be specified by a number + followed by 's' for seconds, 'm' for minutes, 'h' for + hours, or 'd' for days. If '0' or anything that evaluates + to False is passed, the 'timeout' command is not used. + """ + testdir = misc.get_testdir(ctx) + assert isinstance(role, six.string_types) + cluster, type_, id_ = misc.split_role(role) + assert type_ == 'client' + remote = get_remote_for_role(ctx, role) + mnt = _client_mountpoint(ctx, cluster, id_) + # subdir so we can remove and recreate this a lot without sudo + if subdir is None: + scratch_tmp = os.path.join(mnt, 'client.{id}'.format(id=id_), 'tmp') + else: + scratch_tmp = os.path.join(mnt, subdir) + clonedir = '{tdir}/clone.{role}'.format(tdir=testdir, role=role) + srcdir = '{cdir}/{basedir}'.format(cdir=clonedir, + basedir=basedir) + + git_url = teuth_config.get_ceph_qa_suite_git_url() + # if we are running an upgrade test, and ceph-ci does not have branches like + # `jewel`, so should use ceph.git as an alternative. + try: + remote.run(logger=log.getChild(role), + args=refspec.clone(git_url, clonedir)) + except CommandFailedError: + if git_url.endswith('/ceph-ci.git'): + alt_git_url = git_url.replace('/ceph-ci.git', '/ceph.git') + elif git_url.endswith('/ceph-ci'): + alt_git_url = re.sub(r'/ceph-ci$', '/ceph.git', git_url) + else: + raise + log.info( + "failed to check out '%s' from %s; will also try in %s", + refspec, + git_url, + alt_git_url, + ) + remote.run(logger=log.getChild(role), + args=refspec.clone(alt_git_url, clonedir)) + remote.run( + logger=log.getChild(role), + args=[ + 'cd', '--', srcdir, + run.Raw('&&'), + 'if', 'test', '-e', 'Makefile', run.Raw(';'), 'then', 'make', run.Raw(';'), 'fi', + run.Raw('&&'), + 'find', '-executable', '-type', 'f', '-printf', r'%P\0'.format(srcdir=srcdir), + run.Raw('>{tdir}/workunits.list.{role}'.format(tdir=testdir, role=role)), + ], + ) + + workunits_file = '{tdir}/workunits.list.{role}'.format(tdir=testdir, role=role) + workunits = sorted(six.ensure_str(misc.get_file(remote, workunits_file)).split('\0')) + assert workunits + + try: + assert isinstance(tests, list) + for spec in tests: + log.info('Running workunits matching %s on %s...', spec, role) + prefix = '{spec}/'.format(spec=spec) + to_run = [w for w in workunits if w == spec or w.startswith(prefix)] + if not to_run: + raise RuntimeError('Spec did not match any workunits: {spec!r}'.format(spec=spec)) + for workunit in to_run: + log.info('Running workunit %s...', workunit) + args = [ + 'mkdir', '-p', '--', scratch_tmp, + run.Raw('&&'), + 'cd', '--', scratch_tmp, + run.Raw('&&'), + run.Raw('CEPH_CLI_TEST_DUP_COMMAND=1'), + run.Raw('CEPH_REF={ref}'.format(ref=refspec)), + run.Raw('TESTDIR="{tdir}"'.format(tdir=testdir)), + run.Raw('CEPH_ARGS="--cluster {0}"'.format(cluster)), + run.Raw('CEPH_ID="{id}"'.format(id=id_)), + run.Raw('PATH=$PATH:/usr/sbin'), + run.Raw('CEPH_BASE={dir}'.format(dir=clonedir)), + run.Raw('CEPH_ROOT={dir}'.format(dir=clonedir)), + ] + if env is not None: + for var, val in env.items(): + quoted_val = pipes.quote(val) + env_arg = '{var}={val}'.format(var=var, val=quoted_val) + args.append(run.Raw(env_arg)) + args.extend([ + 'adjust-ulimits', + 'ceph-coverage', + '{tdir}/archive/coverage'.format(tdir=testdir)]) + if timeout and timeout != '0': + args.extend(['timeout', timeout]) + args.extend([ + '{srcdir}/{workunit}'.format( + srcdir=srcdir, + workunit=workunit, + ), + ]) + remote.run( + logger=log.getChild(role), + args=args, + label="workunit test {workunit}".format(workunit=workunit) + ) + if cleanup: + args=['sudo', 'rm', '-rf', '--', scratch_tmp] + remote.run(logger=log.getChild(role), args=args, timeout=(60*60)) + finally: + log.info('Stopping %s on %s...', tests, role) + args=['sudo', 'rm', '-rf', '--', workunits_file, clonedir] + # N.B. don't cleanup scratch_tmp! If the mount is broken then rm will hang. + remote.run( + logger=log.getChild(role), + args=args, + ) diff --git a/qa/timezone/eastern.yaml b/qa/timezone/eastern.yaml new file mode 100644 index 00000000..019c761e --- /dev/null +++ b/qa/timezone/eastern.yaml @@ -0,0 +1,4 @@ +tasks: +- exec: + all: + - echo America/New_York | sudo tee /etc/timezone diff --git a/qa/timezone/pacific.yaml b/qa/timezone/pacific.yaml new file mode 100644 index 00000000..6944aa6d --- /dev/null +++ b/qa/timezone/pacific.yaml @@ -0,0 +1,4 @@ +tasks: +- exec: + all: + - echo America/Los_Angeles | sudo tee /etc/timezone diff --git a/qa/timezone/random.yaml b/qa/timezone/random.yaml new file mode 100644 index 00000000..1d48ce91 --- /dev/null +++ b/qa/timezone/random.yaml @@ -0,0 +1,5 @@ +tasks: +- exec: + all: + - echo America/Los_Angeles | sudo tee /etc/timezone + - [ $RANDOM -gt 32000 ] && echo America/New_York | sudo tee /etc/timezone diff --git a/qa/tox.ini b/qa/tox.ini new file mode 100644 index 00000000..5088e120 --- /dev/null +++ b/qa/tox.ini @@ -0,0 +1,15 @@ +[tox] +envlist = flake8-py2, flake8-py3 +skipsdist = True + +[testenv:flake8-py2] +basepython = python2 +deps= + flake8 +commands=flake8 --select=F,E9 --exclude=venv,.tox + +[testenv:flake8-py3] +basepython = python3 +deps= + flake8 +commands=flake8 --select=F,E9 --exclude=venv,.tox diff --git a/qa/valgrind.supp b/qa/valgrind.supp new file mode 100644 index 00000000..95d8d011 --- /dev/null +++ b/qa/valgrind.supp @@ -0,0 +1,627 @@ + +{ + + Memcheck:Free + fun:free + ... +} +{ + tcmalloc leak, observed on nautlius + Memcheck:Leak + ... + fun:_dl_init + ... +} +{ + older boost mersenne twister uses uninitialized memory for randomness + Memcheck:Cond + ... + fun:*Monitor::prepare_new_fingerprint* + ... +} +{ + older boost mersenne twister uses uninitialized memory for randomness + Memcheck:Value8 + ... + fun:*Monitor::prepare_new_fingerprint* + ... +} +{ + apparent TLS leak in eglibc + Memcheck:Leak + fun:calloc + fun:_dl_allocate_tls + fun:pthread_create* + ... +} +{ + osd: ignore ec plugin loading (FIXME SOMEDAY) + Memcheck:Leak + ... + fun:*ErasureCodePluginRegistry*load* + ... +} +{ + osd: ignore ec plugin factory (FIXME SOMEDAY) + Memcheck:Leak + ... + fun:*ErasureCodePluginRegistry*factory* + ... +} +{ + tcmalloc: libboost_thread-mt.so.1.53 is linked with tcmalloc + Memcheck:Param + msync(start) + obj:/usr/lib64/libpthread-2.17.so + obj:/usr/lib64/libunwind.so.8.0.1 + obj:/usr/lib64/libunwind.so.8.0.1 + obj:/usr/lib64/libunwind.so.8.0.1 + obj:/usr/lib64/libunwind.so.8.0.1 + ... + fun:*tcmalloc*ThreadCache* + ... + obj:/usr/lib64/libboost_thread-mt.so.1.53.0 +} +{ + tcmalloc: msync heap allocation points to uninit bytes (centos 6.5) + Memcheck:Param + msync(start) + obj:/lib64/libpthread-2.12.so + obj:/usr/lib64/libunwind.so.8.0.1 + obj:/usr/lib64/libunwind.so.8.0.1 + obj:/usr/lib64/libunwind.so.8.0.1 + obj:/usr/lib64/libunwind.so.8.0.1 + fun:_ULx86_64_step + fun:_Z13GetStackTracePPvii + fun:_ZN8tcmalloc8PageHeap8GrowHeapEm + fun:_ZN8tcmalloc8PageHeap3NewEm +} +{ + tcmalloc: msync heap allocation points to unaddressible bytes (centos 6.5 #2) + Memcheck:Param + msync(start) + obj:/lib64/libpthread-2.12.so + obj:/usr/lib64/libunwind.so.7.0.0 + fun:_ULx86_64_step + fun:_Z13GetStackTracePPvii + fun:_ZN8tcmalloc8PageHeap8GrowHeapEm + fun:_ZN8tcmalloc8PageHeap3NewEm +} +{ + tcmalloc: msync heap allocation points to uninit bytes (rhel7) + Memcheck:Param + msync(start) + obj:/usr/lib64/libpthread-2.17.so + obj:/usr/lib64/libunwind.so.8.0.1 + obj:/usr/lib64/libunwind.so.8.0.1 + obj:/usr/lib64/libunwind.so.8.0.1 + obj:/usr/lib64/libunwind.so.8.0.1 + fun:_ULx86_64_step + fun:_Z13GetStackTracePPvii + fun:_ZN8tcmalloc8PageHeap8GrowHeapEm + fun:_ZN8tcmalloc8PageHeap3NewEm +} +{ + tcmalloc: msync heap allocation points to uninit bytes (rhel7 #2) + Memcheck:Param + msync(start) + obj:/usr/lib64/libpthread-2.17.so + obj:/usr/lib64/libunwind.so.8.0.1 + obj:/usr/lib64/libunwind.so.8.0.1 + obj:/usr/lib64/libunwind.so.8.0.1 + obj:/usr/lib64/libunwind.so.8.0.1 + fun:_ULx86_64_step + obj:/usr/lib64/libtcmalloc.so.4.2.6 + fun:_Z13GetStackTracePPvii + fun:_ZN8tcmalloc8PageHeap8GrowHeapEm + fun:_ZN8tcmalloc8PageHeap3NewEm +} +{ + tcmalloc: msync heap allocation points to uninit bytes (wheezy) + Memcheck:Param + msync(start) + obj:/lib/x86_64-linux-gnu/libpthread-2.13.so + obj:/usr/lib/libunwind.so.7.0.0 + fun:_ULx86_64_step + fun:_Z13GetStackTracePPvii + fun:_ZN8tcmalloc8PageHeap8GrowHeapEm + fun:_ZN8tcmalloc8PageHeap3NewEm +} +{ + tcmalloc: msync heap allocation points to uninit bytes (precise) + Memcheck:Param + msync(start) + obj:/lib/x86_64-linux-gnu/libpthread-2.15.so + obj:/usr/lib/libunwind.so.7.0.0 + fun:_ULx86_64_step + fun:_Z13GetStackTracePPvii + fun:_ZN8tcmalloc8PageHeap8GrowHeapEm + fun:_ZN8tcmalloc8PageHeap3NewEm + obj:/usr/lib/libtcmalloc.so.0.1.0 +} +{ + tcmalloc: msync heap allocation points to uninit bytes (trusty) + Memcheck:Param + msync(start) + obj:/lib/x86_64-linux-gnu/libpthread-2.19.so + obj:/usr/lib/x86_64-linux-gnu/libunwind.so.8.0.1 + obj:/usr/lib/x86_64-linux-gnu/libunwind.so.8.0.1 + obj:/usr/lib/x86_64-linux-gnu/libunwind.so.8.0.1 + obj:/usr/lib/x86_64-linux-gnu/libunwind.so.8.0.1 + fun:_ULx86_64_step + fun:_Z13GetStackTracePPvii + fun:_ZN8tcmalloc8PageHeap8GrowHeapEm + fun:_ZN8tcmalloc8PageHeap3NewEm +} +{ + tcmalloc: msync heap allocation points to uninit bytes 2 (trusty) + Memcheck:Param + msync(start) + fun:__msync_nocancel + obj:/usr/lib/x86_64-linux-gnu/libunwind.so.8.0.1 + obj:/usr/lib/x86_64-linux-gnu/libunwind.so.8.0.1 + obj:/usr/lib/x86_64-linux-gnu/libunwind.so.8.0.1 + obj:/usr/lib/x86_64-linux-gnu/libunwind.so.8.0.1 + fun:_ULx86_64_step + fun:_Z13GetStackTracePPvii + fun:_ZN8tcmalloc8PageHeap8GrowHeapEm + fun:_ZN8tcmalloc8PageHeap3NewEm + fun:_ZN8tcmalloc15CentralFreeList8PopulateEv + fun:_ZN8tcmalloc15CentralFreeList18FetchFromSpansSafeEv + fun:_ZN8tcmalloc15CentralFreeList11RemoveRangeEPPvS2_i +} +{ + tcmalloc: msync (xenial) + Memcheck:Param + msync(start) + fun:__msync_nocancel + obj:/usr/lib/x86_64-linux-gnu/libunwind.so.8.0.1 + obj:/usr/lib/x86_64-linux-gnu/libunwind.so.8.0.1 + obj:/usr/lib/x86_64-linux-gnu/libunwind.so.8.0.1 + obj:/usr/lib/x86_64-linux-gnu/libunwind.so.8.0.1 + obj:*tcmalloc* + fun:*GetStackTrace* +} +{ + tcmalloc: string + Memcheck:Leak + ... + obj:*tcmalloc* + fun:call_init* + ... +} +{ + ceph global: deliberate onexit leak + Memcheck:Leak + ... + fun:*set_flush_on_exit* + ... +} +{ + libleveldb: ignore all static leveldb leaks + Memcheck:Leak + ... + fun:*leveldb* + ... +} +{ + libleveldb: ignore all dynamic libleveldb leaks + Memcheck:Leak + ... + obj:*libleveldb.so* + ... +} +{ + libcurl: ignore libcurl leaks + Memcheck:Leak + ... + fun:*curl_global_init +} +{ + ignore gnutls leaks + Memcheck:Leak + ... + fun:gnutls_global_init +} +{ + ignore libfcgi leak; OS_LibShutdown has no callers! + Memcheck:Leak + ... + fun:OS_LibInit + fun:FCGX_Init +} +{ + ignore libnss3 leaks + Memcheck:Leak + ... + obj:*libnss3* + ... +} +{ + strptime suckage + Memcheck:Cond + fun:__GI___strncasecmp_l + fun:__strptime_internal + ... +} +{ + strptime suckage 2 + Memcheck:Value8 + fun:__GI___strncasecmp_l + fun:__strptime_internal + ... +} +{ + strptime suckage 3 + Memcheck:Addr8 + fun:__GI___strncasecmp_l + fun:__strptime_internal + ... +} +{ + inet_ntop does something lame on local stack + Memcheck:Value8 + ... + fun:inet_ntop + ... +} +{ + inet_ntop does something lame on local stack + Memcheck:Addr8 + ... + fun:inet_ntop + ... +} +{ + dl-lookup.c thing .. Invalid write of size 8 + Memcheck:Value8 + fun:do_lookup_x + ... + fun:_dl_lookup_symbol_x + ... +} +{ + dl-lookup.c thing .. Invalid write of size 8 + Memcheck:Addr8 + fun:do_lookup_x + ... + fun:_dl_lookup_symbol_x + ... +} +{ + weird thing from libc + Memcheck:Leak + ... + fun:*sub_I_comparator* + fun:__libc_csu_init + ... +} +{ + libfuse leak + Memcheck:Leak + ... + fun:fuse_parse_cmdline + ... +} +{ + boost thread leaks on exit + Memcheck:Leak + ... + fun:*boost*detail* + ... + fun:exit +} +{ + lttng appears to not clean up state + Memcheck:Leak + ... + fun:lttng_ust_baddr_statedump_init + fun:lttng_ust_init + fun:call_init.part.0 + ... +} +{ + fun:PK11_CreateContextBySymKey race + Helgrind:Race + obj:/usr/*lib*/libfreebl*3.so + ... + obj:/usr/*lib*/libsoftokn3.so + ... + obj:/usr/*lib*/libnss3.so + fun:PK11_CreateContextBySymKey + ... +} +{ + thread init race + Helgrind:Race + fun:mempcpy + fun:_dl_allocate_tls_init + ... + fun:pthread_create@* + ... +} +{ + thread_local memory is falsely detected (https://svn.boost.org/trac/boost/ticket/3296) + Memcheck:Leak + ... + fun:*boost*detail*get_once_per_thread_epoch* + fun:*boost*call_once* + fun:*boost*detail*get_current_thread_data* + ... +} +{ + rocksdb thread local singletons + Memcheck:Leak + ... + fun:rocksdb::Env::Default() + ... +} +{ + rocksdb column thread local leaks + Memcheck:Leak + ... + fun:rocksdb::ThreadLocalPtr::StaticMeta::SetHandler* + fun:rocksdb::ColumnFamilyData::ColumnFamilyData* + ... +} +{ + rocksdb thread crap + Memcheck:Leak + ... + fun:*ThreadLocalPtr* + ... +} +{ + rocksdb singleton Env leak, blech + Memcheck:Leak + ... + fun:CreateThreadStatusUpdater + fun:PosixEnv + ... +} +{ + rocksdb::Env::Default() + Memcheck:Leak + ... + fun:*rocksdb*Env*Default* + ... +} +{ + rocksdb BGThreadWrapper + Memcheck:Leak + ... + fun:*BGThreadWrapper* + ... +} +{ + libstdc++ leak on xenial + Memcheck:Leak + fun:malloc + ... + fun:call_init.part.0 + fun:call_init + fun:_dl_init + ... +} +{ + strange leak of std::string memory from md_config_t seen in radosgw + Memcheck:Leak + ... + fun:_ZNSs4_Rep9_S_createEmmRKSaIcE + fun:_ZNSs12_S_constructIPKcEEPcT_S3_RKSaIcESt20forward_iterator_tag + ... + fun:_ZN11md_config_tC1Ev + fun:_ZN11CephContextC1Eji + ... +} +{ + python does not reset the member field when dealloc an object + Memcheck:Leak + match-leak-kinds: all + ... + fun:Py_InitializeEx + ... +} +{ + statically allocated python types don't get members freed + Memcheck:Leak + match-leak-kinds: all + ... + fun:PyType_Ready + ... +} +{ + manually constructed python module members don't get freed + Memcheck:Leak + match-leak-kinds: all + ... + fun:Py_InitModule4_64 + ... +} +{ + manually constructed python module members don't get freed + Memcheck:Leak + match-leak-kinds: all + ... + fun:PyModule_AddObject + ... +} +{ + python subinterpreters may not clean up properly + Memcheck:Leak + match-leak-kinds: all + ... + fun:Py_NewInterpreter + ... +} +{ + python should be able to take care of itself + Memcheck:Leak + match-leak-kinds: all + ... + fun:PyEval_EvalCode +} +{ + python should be able to take care of itself + Memcheck:Leak + match-leak-kinds: all + ... + fun:PyImport_ImportModuleLevel +} +{ + python-owned threads may not full clean up after themselves + Memcheck:Leak + match-leak-kinds: all + ... + fun:PyEval_CallObjectWithKeywords +} +{ + python should be able to take care of itself + Memcheck:Leak + match-leak-kinds: all + ... + fun:PyEval_EvalFrameEx + ... + obj:/usr/lib64/libpython2.7.so.1.0 +} +{ + python should be able to take care of itself + Memcheck:Leak + match-leak-kinds: all + ... + fun:PyObject_Call +} + +{ + rados cython constants + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + fun:PyObject_Malloc + fun:PyCode_New + fun:__Pyx_InitCachedConstants + fun:initrados + fun:_PyImport_LoadDynamicModule + ... + fun:PyImport_ImportModuleLevel + ... + fun:PyObject_Call + fun:PyEval_CallObjectWithKeywords + fun:PyEval_EvalFrameEx +} + +{ + rbd cython constants + Memcheck:Leak + match-leak-kinds: definite + fun:malloc + fun:PyObject_Malloc + fun:PyCode_New + fun:__Pyx_InitCachedConstants + fun:initrbd + fun:_PyImport_LoadDynamicModule + ... + fun:PyImport_ImportModuleLevel + ... + fun:PyObject_Call + fun:PyEval_CallObjectWithKeywords + fun:PyEval_EvalFrameEx +} + +{ + dlopen() with -lceph-common https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=700899 + Memcheck:Leak + match-leak-kinds: reachable + fun:*alloc + ... + fun:_dlerror_run + fun:dlopen@@GLIBC_2.2.5 +} + +{ + ethdev_init_log thing + Memcheck:Leak + match-leak-kinds: reachable + ... + fun:ethdev_init_log + ... +} + +{ + rte_log_init() in DPDK fails to reset strdup()'ed string at exit + Memcheck:Leak + match-leak-kinds: reachable + fun:*alloc + ... + fun:rte_log_init + fun:__libc_csu_init +} + +{ + libc_csu_init (strdup, rte_log_register, etc.) + Memcheck:Leak + match-leak-kinds: reachable + ... + fun:__libc_csu_init + ... +} + +{ + Boost.Thread fails to call tls_destructor() when the thread exists + Memcheck:Leak + match-leak-kinds: reachable + ... + fun:_Znwm + ... + fun:*boost*detail*set_tss_data* + ... +} + +{ + ignore *all* ceph-mgr python crap. this is overkill, but better than nothing + Memcheck:Leak + match-leak-kinds: all + ... + fun:Py* + ... +} + +{ + something in glibc + Memcheck:Leak + match-leak-kinds: all + ... + fun:strdup + fun:__trans_list_add + ... + fun:_dl_init + ... +} + +# "Conditional jump or move depends on uninitialised value(s)" in OpenSSL +# while using aes-128-gcm with AES-NI enabled. Not observed while running +# with `OPENSSL_ia32cap="~0x200000200000000"`. +{ + uninitialised gcm.Xi in aes-128-gcm with AES-NI for msgr, part 1 + Memcheck:Cond + ... + fun:EVP_DecryptFinal_ex + fun:_ZN4ceph6crypto6onwire25AES128GCM_OnWireRxHandler34authenticated_decrypt_update_finalEONS_6buffer7v14_2_04listEj + fun:_ZN10ProtocolV231handle_read_frame_epilogue_mainEOSt10unique_ptrIN4ceph6buffer7v14_2_08ptr_nodeENS4_8disposerEEi + fun:_ZN10ProtocolV216run_continuationER2CtIS_E + ... + fun:_ZN15AsyncConnection7processEv + fun:_ZN11EventCenter14process_eventsEjPNSt6chrono8durationImSt5ratioILl1ELl1000000000EEEE + ... +} + +{ + uninitialised gcm.Xi in aes-128-gcm with AES-NI for msgr, part 2 + Memcheck:Cond + fun:_ZN4ceph6crypto6onwire25AES128GCM_OnWireRxHandler34authenticated_decrypt_update_finalEONS_6buffer7v14_2_04listEj + fun:_ZN10ProtocolV231handle_read_frame_epilogue_mainEOSt10unique_ptrIN4ceph6buffer7v14_2_08ptr_nodeENS4_8disposerEEi + fun:_ZN10ProtocolV216run_continuationER2CtIS_E + ... + fun:_ZN11EventCenter14process_eventsEjPNSt6chrono8durationImSt5ratioILl1ELl1000000000EEEE + ... +} diff --git a/qa/workunits/Makefile b/qa/workunits/Makefile new file mode 100644 index 00000000..f75f5dfd --- /dev/null +++ b/qa/workunits/Makefile @@ -0,0 +1,4 @@ +DIRS = direct_io fs + +all: + for d in $(DIRS) ; do ( cd $$d ; $(MAKE) all ) ; done diff --git a/qa/workunits/caps/mon_commands.sh b/qa/workunits/caps/mon_commands.sh new file mode 100755 index 00000000..5b5bce62 --- /dev/null +++ b/qa/workunits/caps/mon_commands.sh @@ -0,0 +1,25 @@ +#!/bin/sh -ex + +ceph-authtool --create-keyring k --gen-key -p --name client.xx +ceph auth add -i k client.xx mon "allow command foo; allow command bar *; allow command baz ...; allow command foo add * mon allow\\ rwx osd allow\\ *" + +( ceph -k k -n client.xx foo || true ) | grep 'unrecog' +( ceph -k k -n client.xx foo ooo || true ) | grep 'Access denied' +( ceph -k k -n client.xx fo || true ) | grep 'Access denied' +( ceph -k k -n client.xx fooo || true ) | grep 'Access denied' + +( ceph -k k -n client.xx bar || true ) | grep 'Access denied' +( ceph -k k -n client.xx bar a || true ) | grep 'unrecog' +( ceph -k k -n client.xx bar a b c || true ) | grep 'Access denied' +( ceph -k k -n client.xx ba || true ) | grep 'Access denied' +( ceph -k k -n client.xx barr || true ) | grep 'Access denied' + +( ceph -k k -n client.xx baz || true ) | grep -v 'Access denied' +( ceph -k k -n client.xx baz a || true ) | grep -v 'Access denied' +( ceph -k k -n client.xx baz a b || true ) | grep -v 'Access denied' + +( ceph -k k -n client.xx foo add osd.1 -i k mon 'allow rwx' osd 'allow *' || true ) | grep 'unrecog' +( ceph -k k -n client.xx foo add osd a b c -i k mon 'allow rwx' osd 'allow *' || true ) | grep 'Access denied' +( ceph -k k -n client.xx foo add osd a b c -i k mon 'allow *' || true ) | grep 'Access denied' + +echo OK \ No newline at end of file diff --git a/qa/workunits/ceph-helpers-root.sh b/qa/workunits/ceph-helpers-root.sh new file mode 100755 index 00000000..dc81b2b3 --- /dev/null +++ b/qa/workunits/ceph-helpers-root.sh @@ -0,0 +1,126 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2015 Red Hat +# +# Author: Loic Dachary +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Library Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library Public License for more details. +# + +####################################################################### + +function distro_id() { + source /etc/os-release + echo $ID +} + +function distro_version() { + source /etc/os-release + echo $VERSION +} + +function install() { + for package in "$@" ; do + install_one $package + done +} + +function install_one() { + case $(distro_id) in + ubuntu|debian|devuan) + sudo env DEBIAN_FRONTEND=noninteractive apt-get install -y "$@" + ;; + centos|fedora|rhel) + sudo yum install -y "$@" + ;; + opensuse*|suse|sles) + sudo zypper --non-interactive install "$@" + ;; + *) + echo "$(distro_id) is unknown, $@ will have to be installed manually." + ;; + esac +} + +function install_cmake3_on_centos7 { + source /etc/os-release + local MAJOR_VERSION="$(echo $VERSION_ID | cut -d. -f1)" + sudo yum-config-manager --add-repo https://dl.fedoraproject.org/pub/epel/$MAJOR_VERSION/x86_64/ + sudo yum install --nogpgcheck -y epel-release + sudo rpm --import /etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-$MAJOR_VERSION + sudo yum install -y cmake3 +} + +function install_pkg_on_ubuntu { + local project=$1 + shift + local sha1=$1 + shift + local codename=$1 + shift + local force=$1 + shift + local pkgs=$@ + local missing_pkgs + if [ $force = "force" ]; then + missing_pkgs="$@" + else + for pkg in $pkgs; do + if ! dpkg -s $pkg &> /dev/null; then + missing_pkgs+=" $pkg" + fi + done + fi + if test -n "$missing_pkgs"; then + local shaman_url="https://shaman.ceph.com/api/repos/${project}/master/${sha1}/ubuntu/${codename}/repo" + sudo curl --silent --location $shaman_url --output /etc/apt/sources.list.d/$project.list + sudo env DEBIAN_FRONTEND=noninteractive apt-get update -y -o Acquire::Languages=none -o Acquire::Translation=none || true + sudo env DEBIAN_FRONTEND=noninteractive apt-get install --allow-unauthenticated -y $missing_pkgs + fi +} + +####################################################################### + +function control_osd() { + local action=$1 + local id=$2 + + sudo systemctl $action ceph-osd@$id + + return 0 +} + +####################################################################### + +function pool_read_write() { + local size=${1:-1} + local dir=/tmp + local timeout=360 + local test_pool=test_pool + + ceph osd pool delete $test_pool $test_pool --yes-i-really-really-mean-it || return 1 + ceph osd pool create $test_pool 4 || return 1 + ceph osd pool set $test_pool size $size || return 1 + ceph osd pool set $test_pool min_size $size || return 1 + ceph osd pool application enable $test_pool rados + + echo FOO > $dir/BAR + timeout $timeout rados --pool $test_pool put BAR $dir/BAR || return 1 + timeout $timeout rados --pool $test_pool get BAR $dir/BAR.copy || return 1 + diff $dir/BAR $dir/BAR.copy || return 1 + ceph osd pool delete $test_pool $test_pool --yes-i-really-really-mean-it || return 1 +} + +####################################################################### + +set -x + +"$@" diff --git a/qa/workunits/ceph-tests/ceph-admin-commands.sh b/qa/workunits/ceph-tests/ceph-admin-commands.sh new file mode 100755 index 00000000..4a9f0a66 --- /dev/null +++ b/qa/workunits/ceph-tests/ceph-admin-commands.sh @@ -0,0 +1,10 @@ +#!/bin/sh -ex + +ceph -s +rados lspools +rbd ls +# check that the monitors work +ceph osd set nodown +ceph osd unset nodown + +exit 0 diff --git a/qa/workunits/cephtool/test.sh b/qa/workunits/cephtool/test.sh new file mode 100755 index 00000000..1929d296 --- /dev/null +++ b/qa/workunits/cephtool/test.sh @@ -0,0 +1,2920 @@ +#!/usr/bin/env bash +# -*- mode:shell-script; tab-width:8; sh-basic-offset:2; indent-tabs-mode:t -*- +# vim: ts=8 sw=8 ft=bash smarttab +set -x + +source $(dirname $0)/../../standalone/ceph-helpers.sh + +set -e +set -o functrace +PS4='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: ' +SUDO=${SUDO:-sudo} +export CEPH_DEV=1 + +function get_admin_socket() +{ + local client=$1 + + if test -n "$CEPH_ASOK_DIR"; + then + echo $(get_asok_dir)/$client.asok + else + local cluster=$(echo $CEPH_ARGS | sed -r 's/.*--cluster[[:blank:]]*([[:alnum:]]*).*/\1/') + echo "/var/run/ceph/$cluster-$client.asok" + fi +} + +function check_no_osd_down() +{ + ! ceph osd dump | grep ' down ' +} + +function wait_no_osd_down() +{ + max_run=300 + for i in $(seq 1 $max_run) ; do + if ! check_no_osd_down ; then + echo "waiting for osd(s) to come back up ($i/$max_run)" + sleep 1 + else + break + fi + done + check_no_osd_down +} + +function expect_false() +{ + set -x + if "$@"; then return 1; else return 0; fi +} + +function expect_true() +{ + set -x + if ! "$@"; then return 1; else return 0; fi +} + +TEMP_DIR=$(mktemp -d ${TMPDIR-/tmp}/cephtool.XXX) +trap "rm -fr $TEMP_DIR" 0 + +TMPFILE=$(mktemp $TEMP_DIR/test_invalid.XXX) + +# +# retry_eagain max cmd args ... +# +# retry cmd args ... if it exits on error and its output contains the +# string EAGAIN, at most $max times +# +function retry_eagain() +{ + local max=$1 + shift + local status + local tmpfile=$TEMP_DIR/retry_eagain.$$ + local count + for count in $(seq 1 $max) ; do + status=0 + "$@" > $tmpfile 2>&1 || status=$? + if test $status = 0 || + ! grep --quiet EAGAIN $tmpfile ; then + break + fi + sleep 1 + done + if test $count = $max ; then + echo retried with non zero exit status, $max times: "$@" >&2 + fi + cat $tmpfile + rm $tmpfile + return $status +} + +# +# map_enxio_to_eagain cmd arg ... +# +# add EAGAIN to the output of cmd arg ... if the output contains +# ENXIO. +# +function map_enxio_to_eagain() +{ + local status=0 + local tmpfile=$TEMP_DIR/map_enxio_to_eagain.$$ + + "$@" > $tmpfile 2>&1 || status=$? + if test $status != 0 && + grep --quiet ENXIO $tmpfile ; then + echo "EAGAIN added by $0::map_enxio_to_eagain" >> $tmpfile + fi + cat $tmpfile + rm $tmpfile + return $status +} + +function check_response() +{ + expected_string=$1 + retcode=$2 + expected_retcode=$3 + if [ "$expected_retcode" -a $retcode != $expected_retcode ] ; then + echo "return code invalid: got $retcode, expected $expected_retcode" >&2 + exit 1 + fi + + if ! grep --quiet -- "$expected_string" $TMPFILE ; then + echo "Didn't find $expected_string in output" >&2 + cat $TMPFILE >&2 + exit 1 + fi +} + +function get_config_value_or_die() +{ + local target config_opt raw val + + target=$1 + config_opt=$2 + + raw="`$SUDO ceph daemon $target config get $config_opt 2>/dev/null`" + if [[ $? -ne 0 ]]; then + echo "error obtaining config opt '$config_opt' from '$target': $raw" + exit 1 + fi + + raw=`echo $raw | sed -e 's/[{} "]//g'` + val=`echo $raw | cut -f2 -d:` + + echo "$val" + return 0 +} + +function expect_config_value() +{ + local target config_opt expected_val val + target=$1 + config_opt=$2 + expected_val=$3 + + val=$(get_config_value_or_die $target $config_opt) + + if [[ "$val" != "$expected_val" ]]; then + echo "expected '$expected_val', got '$val'" + exit 1 + fi +} + +function ceph_watch_start() +{ + local whatch_opt=--watch + + if [ -n "$1" ]; then + whatch_opt=--watch-$1 + if [ -n "$2" ]; then + whatch_opt+=" --watch-channel $2" + fi + fi + + CEPH_WATCH_FILE=${TEMP_DIR}/CEPH_WATCH_$$ + ceph $whatch_opt > $CEPH_WATCH_FILE & + CEPH_WATCH_PID=$! + + # wait until the "ceph" client is connected and receiving + # log messages from monitor + for i in `seq 3`; do + grep -q "cluster" $CEPH_WATCH_FILE && break + sleep 1 + done +} + +function ceph_watch_wait() +{ + local regexp=$1 + local timeout=30 + + if [ -n "$2" ]; then + timeout=$2 + fi + + for i in `seq ${timeout}`; do + grep -q "$regexp" $CEPH_WATCH_FILE && break + sleep 1 + done + + kill $CEPH_WATCH_PID + + if ! grep "$regexp" $CEPH_WATCH_FILE; then + echo "pattern ${regexp} not found in watch file. Full watch file content:" >&2 + cat $CEPH_WATCH_FILE >&2 + return 1 + fi +} + +function test_mon_injectargs() +{ + ceph tell osd.0 injectargs --no-osd_enable_op_tracker + ceph tell osd.0 config get osd_enable_op_tracker | grep false + ceph tell osd.0 injectargs '--osd_enable_op_tracker --osd_op_history_duration 500' + ceph tell osd.0 config get osd_enable_op_tracker | grep true + ceph tell osd.0 config get osd_op_history_duration | grep 500 + ceph tell osd.0 injectargs --no-osd_enable_op_tracker + ceph tell osd.0 config get osd_enable_op_tracker | grep false + ceph tell osd.0 injectargs -- --osd_enable_op_tracker + ceph tell osd.0 config get osd_enable_op_tracker | grep true + ceph tell osd.0 injectargs -- '--osd_enable_op_tracker --osd_op_history_duration 600' + ceph tell osd.0 config get osd_enable_op_tracker | grep true + ceph tell osd.0 config get osd_op_history_duration | grep 600 + + ceph tell osd.0 injectargs -- '--osd_deep_scrub_interval 2419200' + ceph tell osd.0 config get osd_deep_scrub_interval | grep 2419200 + + ceph tell osd.0 injectargs -- '--mon_probe_timeout 2' + ceph tell osd.0 config get mon_probe_timeout | grep 2 + + ceph tell osd.0 injectargs -- '--mon-lease 6' + ceph tell osd.0 config get mon_lease | grep 6 + + # osd-scrub-auto-repair-num-errors is an OPT_U32, so -1 is not a valid setting + expect_false ceph tell osd.0 injectargs --osd-scrub-auto-repair-num-errors -1 >& $TMPFILE || return 1 + check_response "Error EINVAL: Parse error setting osd_scrub_auto_repair_num_errors to '-1' using injectargs" + + expect_failure $TEMP_DIR "Option --osd_op_history_duration requires an argument" \ + ceph tell osd.0 injectargs -- '--osd_op_history_duration' + +} + +function test_mon_injectargs_SI() +{ + # Test SI units during injectargs and 'config set' + # We only aim at testing the units are parsed accordingly + # and don't intend to test whether the options being set + # actually expect SI units to be passed. + # Keep in mind that all integer based options that are not based on bytes + # (i.e., INT, LONG, U32, U64) will accept SI unit modifiers and be parsed to + # base 10. + initial_value=$(get_config_value_or_die "mon.a" "mon_pg_warn_min_objects") + $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10 + expect_config_value "mon.a" "mon_pg_warn_min_objects" 10 + $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10K + expect_config_value "mon.a" "mon_pg_warn_min_objects" 10000 + $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 1G + expect_config_value "mon.a" "mon_pg_warn_min_objects" 1000000000 + $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects 10F > $TMPFILE || true + check_response "'10F': (22) Invalid argument" + # now test with injectargs + ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10' + expect_config_value "mon.a" "mon_pg_warn_min_objects" 10 + ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10K' + expect_config_value "mon.a" "mon_pg_warn_min_objects" 10000 + ceph tell mon.a injectargs '--mon_pg_warn_min_objects 1G' + expect_config_value "mon.a" "mon_pg_warn_min_objects" 1000000000 + expect_false ceph tell mon.a injectargs '--mon_pg_warn_min_objects 10F' + expect_false ceph tell mon.a injectargs '--mon_globalid_prealloc -1' + $SUDO ceph daemon mon.a config set mon_pg_warn_min_objects $initial_value +} + +function test_mon_injectargs_IEC() +{ + # Test IEC units during injectargs and 'config set' + # We only aim at testing the units are parsed accordingly + # and don't intend to test whether the options being set + # actually expect IEC units to be passed. + # Keep in mind that all integer based options that are based on bytes + # (i.e., INT, LONG, U32, U64) will accept IEC unit modifiers, as well as SI + # unit modifiers (for backwards compatibility and convenience) and be parsed + # to base 2. + initial_value=$(get_config_value_or_die "mon.a" "mon_data_size_warn") + $SUDO ceph daemon mon.a config set mon_data_size_warn 15000000000 + expect_config_value "mon.a" "mon_data_size_warn" 15000000000 + $SUDO ceph daemon mon.a config set mon_data_size_warn 15G + expect_config_value "mon.a" "mon_data_size_warn" 16106127360 + $SUDO ceph daemon mon.a config set mon_data_size_warn 16Gi + expect_config_value "mon.a" "mon_data_size_warn" 17179869184 + $SUDO ceph daemon mon.a config set mon_data_size_warn 10F > $TMPFILE || true + check_response "'10F': (22) Invalid argument" + # now test with injectargs + ceph tell mon.a injectargs '--mon_data_size_warn 15000000000' + expect_config_value "mon.a" "mon_data_size_warn" 15000000000 + ceph tell mon.a injectargs '--mon_data_size_warn 15G' + expect_config_value "mon.a" "mon_data_size_warn" 16106127360 + ceph tell mon.a injectargs '--mon_data_size_warn 16Gi' + expect_config_value "mon.a" "mon_data_size_warn" 17179869184 + expect_false ceph tell mon.a injectargs '--mon_data_size_warn 10F' + $SUDO ceph daemon mon.a config set mon_data_size_warn $initial_value +} + +function test_tiering_agent() +{ + local slow=slow_eviction + local fast=fast_eviction + ceph osd pool create $slow 1 1 + ceph osd pool application enable $slow rados + ceph osd pool create $fast 1 1 + ceph osd tier add $slow $fast + ceph osd tier cache-mode $fast writeback + ceph osd tier set-overlay $slow $fast + ceph osd pool set $fast hit_set_type bloom + rados -p $slow put obj1 /etc/group + ceph osd pool set $fast target_max_objects 1 + ceph osd pool set $fast hit_set_count 1 + ceph osd pool set $fast hit_set_period 5 + # wait for the object to be evicted from the cache + local evicted + evicted=false + for i in `seq 1 300` ; do + if ! rados -p $fast ls | grep obj1 ; then + evicted=true + break + fi + sleep 1 + done + $evicted # assert + # the object is proxy read and promoted to the cache + rados -p $slow get obj1 - >/dev/null + # wait for the promoted object to be evicted again + evicted=false + for i in `seq 1 300` ; do + if ! rados -p $fast ls | grep obj1 ; then + evicted=true + break + fi + sleep 1 + done + $evicted # assert + ceph osd tier remove-overlay $slow + ceph osd tier remove $slow $fast + ceph osd pool delete $fast $fast --yes-i-really-really-mean-it + ceph osd pool delete $slow $slow --yes-i-really-really-mean-it +} + +function test_tiering_1() +{ + # tiering + ceph osd pool create slow 2 + ceph osd pool application enable slow rados + ceph osd pool create slow2 2 + ceph osd pool application enable slow2 rados + ceph osd pool create cache 2 + ceph osd pool create cache2 2 + ceph osd tier add slow cache + ceph osd tier add slow cache2 + expect_false ceph osd tier add slow2 cache + # test some state transitions + ceph osd tier cache-mode cache writeback + expect_false ceph osd tier cache-mode cache forward + ceph osd tier cache-mode cache forward --yes-i-really-mean-it + expect_false ceph osd tier cache-mode cache readonly + ceph osd tier cache-mode cache readonly --yes-i-really-mean-it + expect_false ceph osd tier cache-mode cache forward + ceph osd tier cache-mode cache forward --yes-i-really-mean-it + ceph osd tier cache-mode cache none + ceph osd tier cache-mode cache writeback + ceph osd tier cache-mode cache proxy + ceph osd tier cache-mode cache writeback + expect_false ceph osd tier cache-mode cache none + expect_false ceph osd tier cache-mode cache readonly --yes-i-really-mean-it + # test with dirty objects in the tier pool + # tier pool currently set to 'writeback' + rados -p cache put /etc/passwd /etc/passwd + flush_pg_stats + # 1 dirty object in pool 'cache' + ceph osd tier cache-mode cache proxy + expect_false ceph osd tier cache-mode cache none + expect_false ceph osd tier cache-mode cache readonly --yes-i-really-mean-it + ceph osd tier cache-mode cache writeback + # remove object from tier pool + rados -p cache rm /etc/passwd + rados -p cache cache-flush-evict-all + flush_pg_stats + # no dirty objects in pool 'cache' + ceph osd tier cache-mode cache proxy + ceph osd tier cache-mode cache none + ceph osd tier cache-mode cache readonly --yes-i-really-mean-it + TRIES=0 + while ! ceph osd pool set cache pg_num 3 --yes-i-really-mean-it 2>$TMPFILE + do + grep 'currently creating pgs' $TMPFILE + TRIES=$(( $TRIES + 1 )) + test $TRIES -ne 60 + sleep 3 + done + expect_false ceph osd pool set cache pg_num 4 + ceph osd tier cache-mode cache none + ceph osd tier set-overlay slow cache + expect_false ceph osd tier set-overlay slow cache2 + expect_false ceph osd tier remove slow cache + ceph osd tier remove-overlay slow + ceph osd tier set-overlay slow cache2 + ceph osd tier remove-overlay slow + ceph osd tier remove slow cache + ceph osd tier add slow2 cache + expect_false ceph osd tier set-overlay slow cache + ceph osd tier set-overlay slow2 cache + ceph osd tier remove-overlay slow2 + ceph osd tier remove slow2 cache + ceph osd tier remove slow cache2 + + # make sure a non-empty pool fails + rados -p cache2 put /etc/passwd /etc/passwd + while ! ceph df | grep cache2 | grep ' 1 ' ; do + echo waiting for pg stats to flush + sleep 2 + done + expect_false ceph osd tier add slow cache2 + ceph osd tier add slow cache2 --force-nonempty + ceph osd tier remove slow cache2 + + ceph osd pool ls | grep cache2 + ceph osd pool ls -f json-pretty | grep cache2 + ceph osd pool ls detail | grep cache2 + ceph osd pool ls detail -f json-pretty | grep cache2 + + ceph osd pool delete slow slow --yes-i-really-really-mean-it + ceph osd pool delete slow2 slow2 --yes-i-really-really-mean-it + ceph osd pool delete cache cache --yes-i-really-really-mean-it + ceph osd pool delete cache2 cache2 --yes-i-really-really-mean-it +} + +function test_tiering_2() +{ + # make sure we can't clobber snapshot state + ceph osd pool create snap_base 2 + ceph osd pool application enable snap_base rados + ceph osd pool create snap_cache 2 + ceph osd pool mksnap snap_cache snapname + expect_false ceph osd tier add snap_base snap_cache + ceph osd pool delete snap_base snap_base --yes-i-really-really-mean-it + ceph osd pool delete snap_cache snap_cache --yes-i-really-really-mean-it +} + +function test_tiering_3() +{ + # make sure we can't create snapshot on tier + ceph osd pool create basex 2 + ceph osd pool application enable basex rados + ceph osd pool create cachex 2 + ceph osd tier add basex cachex + expect_false ceph osd pool mksnap cache snapname + ceph osd tier remove basex cachex + ceph osd pool delete basex basex --yes-i-really-really-mean-it + ceph osd pool delete cachex cachex --yes-i-really-really-mean-it +} + +function test_tiering_4() +{ + # make sure we can't create an ec pool tier + ceph osd pool create eccache 2 2 erasure + expect_false ceph osd set-require-min-compat-client bobtail + ceph osd pool create repbase 2 + ceph osd pool application enable repbase rados + expect_false ceph osd tier add repbase eccache + ceph osd pool delete repbase repbase --yes-i-really-really-mean-it + ceph osd pool delete eccache eccache --yes-i-really-really-mean-it +} + +function test_tiering_5() +{ + # convenient add-cache command + ceph osd pool create slow 2 + ceph osd pool application enable slow rados + ceph osd pool create cache3 2 + ceph osd tier add-cache slow cache3 1024000 + ceph osd dump | grep cache3 | grep bloom | grep 'false_positive_probability: 0.05' | grep 'target_bytes 1024000' | grep '1200s x4' + ceph osd tier remove slow cache3 2> $TMPFILE || true + check_response "EBUSY: tier pool 'cache3' is the overlay for 'slow'; please remove-overlay first" + ceph osd tier remove-overlay slow + ceph osd tier remove slow cache3 + ceph osd pool ls | grep cache3 + ceph osd pool delete cache3 cache3 --yes-i-really-really-mean-it + ! ceph osd pool ls | grep cache3 || exit 1 + ceph osd pool delete slow slow --yes-i-really-really-mean-it +} + +function test_tiering_6() +{ + # check add-cache whether work + ceph osd pool create datapool 2 + ceph osd pool application enable datapool rados + ceph osd pool create cachepool 2 + ceph osd tier add-cache datapool cachepool 1024000 + ceph osd tier cache-mode cachepool writeback + rados -p datapool put object /etc/passwd + rados -p cachepool stat object + rados -p cachepool cache-flush object + rados -p datapool stat object + ceph osd tier remove-overlay datapool + ceph osd tier remove datapool cachepool + ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it + ceph osd pool delete datapool datapool --yes-i-really-really-mean-it +} + +function test_tiering_7() +{ + # protection against pool removal when used as tiers + ceph osd pool create datapool 2 + ceph osd pool application enable datapool rados + ceph osd pool create cachepool 2 + ceph osd tier add-cache datapool cachepool 1024000 + ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it 2> $TMPFILE || true + check_response "EBUSY: pool 'cachepool' is a tier of 'datapool'" + ceph osd pool delete datapool datapool --yes-i-really-really-mean-it 2> $TMPFILE || true + check_response "EBUSY: pool 'datapool' has tiers cachepool" + ceph osd tier remove-overlay datapool + ceph osd tier remove datapool cachepool + ceph osd pool delete cachepool cachepool --yes-i-really-really-mean-it + ceph osd pool delete datapool datapool --yes-i-really-really-mean-it +} + +function test_tiering_8() +{ + ## check health check + ceph osd set notieragent + ceph osd pool create datapool 2 + ceph osd pool application enable datapool rados + ceph osd pool create cache4 2 + ceph osd tier add-cache datapool cache4 1024000 + ceph osd tier cache-mode cache4 writeback + tmpfile=$(mktemp|grep tmp) + dd if=/dev/zero of=$tmpfile bs=4K count=1 + ceph osd pool set cache4 target_max_objects 200 + ceph osd pool set cache4 target_max_bytes 1000000 + rados -p cache4 put foo1 $tmpfile + rados -p cache4 put foo2 $tmpfile + rm -f $tmpfile + flush_pg_stats + ceph df | grep datapool | grep ' 2 ' + ceph osd tier remove-overlay datapool + ceph osd tier remove datapool cache4 + ceph osd pool delete cache4 cache4 --yes-i-really-really-mean-it + ceph osd pool delete datapool datapool --yes-i-really-really-mean-it + ceph osd unset notieragent +} + +function test_tiering_9() +{ + # make sure 'tier remove' behaves as we expect + # i.e., removing a tier from a pool that's not its base pool only + # results in a 'pool foo is now (or already was) not a tier of bar' + # + ceph osd pool create basepoolA 2 + ceph osd pool application enable basepoolA rados + ceph osd pool create basepoolB 2 + ceph osd pool application enable basepoolB rados + poolA_id=$(ceph osd dump | grep 'pool.*basepoolA' | awk '{print $2;}') + poolB_id=$(ceph osd dump | grep 'pool.*basepoolB' | awk '{print $2;}') + + ceph osd pool create cache5 2 + ceph osd pool create cache6 2 + ceph osd tier add basepoolA cache5 + ceph osd tier add basepoolB cache6 + ceph osd tier remove basepoolB cache5 2>&1 | grep 'not a tier of' + ceph osd dump | grep "pool.*'cache5'" 2>&1 | grep "tier_of[ \t]\+$poolA_id" + ceph osd tier remove basepoolA cache6 2>&1 | grep 'not a tier of' + ceph osd dump | grep "pool.*'cache6'" 2>&1 | grep "tier_of[ \t]\+$poolB_id" + + ceph osd tier remove basepoolA cache5 2>&1 | grep 'not a tier of' + ! ceph osd dump | grep "pool.*'cache5'" 2>&1 | grep "tier_of" || exit 1 + ceph osd tier remove basepoolB cache6 2>&1 | grep 'not a tier of' + ! ceph osd dump | grep "pool.*'cache6'" 2>&1 | grep "tier_of" || exit 1 + + ! ceph osd dump | grep "pool.*'basepoolA'" 2>&1 | grep "tiers" || exit 1 + ! ceph osd dump | grep "pool.*'basepoolB'" 2>&1 | grep "tiers" || exit 1 + + ceph osd pool delete cache6 cache6 --yes-i-really-really-mean-it + ceph osd pool delete cache5 cache5 --yes-i-really-really-mean-it + ceph osd pool delete basepoolB basepoolB --yes-i-really-really-mean-it + ceph osd pool delete basepoolA basepoolA --yes-i-really-really-mean-it +} + +function test_auth() +{ + expect_false ceph auth add client.xx mon 'invalid' osd "allow *" + expect_false ceph auth add client.xx mon 'allow *' osd "allow *" invalid "allow *" + ceph auth add client.xx mon 'allow *' osd "allow *" + ceph auth export client.xx >client.xx.keyring + ceph auth add client.xx -i client.xx.keyring + rm -f client.xx.keyring + ceph auth list | grep client.xx + ceph auth ls | grep client.xx + ceph auth get client.xx | grep caps | grep mon + ceph auth get client.xx | grep caps | grep osd + ceph auth get-key client.xx + ceph auth print-key client.xx + ceph auth print_key client.xx + ceph auth caps client.xx osd "allow rw" + expect_false sh <<< "ceph auth get client.xx | grep caps | grep mon" + ceph auth get client.xx | grep osd | grep "allow rw" + ceph auth export | grep client.xx + ceph auth export -o authfile + ceph auth import -i authfile + ceph auth export -o authfile2 + diff authfile authfile2 + rm authfile authfile2 + ceph auth del client.xx + expect_false ceph auth get client.xx + + # (almost) interactive mode + echo -e 'auth add client.xx mon "allow *" osd "allow *"\n' | ceph + ceph auth get client.xx + # script mode + echo 'auth del client.xx' | ceph + expect_false ceph auth get client.xx +} + +function test_auth_profiles() +{ + ceph auth add client.xx-profile-ro mon 'allow profile read-only' \ + mgr 'allow profile read-only' + ceph auth add client.xx-profile-rw mon 'allow profile read-write' \ + mgr 'allow profile read-write' + ceph auth add client.xx-profile-rd mon 'allow profile role-definer' + + ceph auth export > client.xx.keyring + + # read-only is allowed all read-only commands (auth excluded) + ceph -n client.xx-profile-ro -k client.xx.keyring status + ceph -n client.xx-profile-ro -k client.xx.keyring osd dump + ceph -n client.xx-profile-ro -k client.xx.keyring pg dump + ceph -n client.xx-profile-ro -k client.xx.keyring mon dump + # read-only gets access denied for rw commands or auth commands + ceph -n client.xx-profile-ro -k client.xx.keyring log foo >& $TMPFILE || true + check_response "EACCES: access denied" + ceph -n client.xx-profile-ro -k client.xx.keyring osd set noout >& $TMPFILE || true + check_response "EACCES: access denied" + ceph -n client.xx-profile-ro -k client.xx.keyring auth ls >& $TMPFILE || true + check_response "EACCES: access denied" + + # read-write is allowed for all read-write commands (except auth) + ceph -n client.xx-profile-rw -k client.xx.keyring status + ceph -n client.xx-profile-rw -k client.xx.keyring osd dump + ceph -n client.xx-profile-rw -k client.xx.keyring pg dump + ceph -n client.xx-profile-rw -k client.xx.keyring mon dump + ceph -n client.xx-profile-rw -k client.xx.keyring fs dump + ceph -n client.xx-profile-rw -k client.xx.keyring log foo + ceph -n client.xx-profile-rw -k client.xx.keyring osd set noout + ceph -n client.xx-profile-rw -k client.xx.keyring osd unset noout + # read-write gets access denied for auth commands + ceph -n client.xx-profile-rw -k client.xx.keyring auth ls >& $TMPFILE || true + check_response "EACCES: access denied" + + # role-definer is allowed RWX 'auth' commands and read-only 'mon' commands + ceph -n client.xx-profile-rd -k client.xx.keyring auth ls + ceph -n client.xx-profile-rd -k client.xx.keyring auth export + ceph -n client.xx-profile-rd -k client.xx.keyring auth add client.xx-profile-foo + ceph -n client.xx-profile-rd -k client.xx.keyring status + ceph -n client.xx-profile-rd -k client.xx.keyring osd dump >& $TMPFILE || true + check_response "EACCES: access denied" + ceph -n client.xx-profile-rd -k client.xx.keyring pg dump >& $TMPFILE || true + check_response "EACCES: access denied" + # read-only 'mon' subsystem commands are allowed + ceph -n client.xx-profile-rd -k client.xx.keyring mon dump + # but read-write 'mon' commands are not + ceph -n client.xx-profile-rd -k client.xx.keyring mon add foo 1.1.1.1 >& $TMPFILE || true + check_response "EACCES: access denied" + ceph -n client.xx-profile-rd -k client.xx.keyring fs dump >& $TMPFILE || true + check_response "EACCES: access denied" + ceph -n client.xx-profile-rd -k client.xx.keyring log foo >& $TMPFILE || true + check_response "EACCES: access denied" + ceph -n client.xx-profile-rd -k client.xx.keyring osd set noout >& $TMPFILE || true + check_response "EACCES: access denied" + + ceph -n client.xx-profile-rd -k client.xx.keyring auth del client.xx-profile-ro + ceph -n client.xx-profile-rd -k client.xx.keyring auth del client.xx-profile-rw + + # add a new role-definer with the existing role-definer + ceph -n client.xx-profile-rd -k client.xx.keyring \ + auth add client.xx-profile-rd2 mon 'allow profile role-definer' + ceph -n client.xx-profile-rd -k client.xx.keyring \ + auth export > client.xx.keyring.2 + # remove old role-definer using the new role-definer + ceph -n client.xx-profile-rd2 -k client.xx.keyring.2 \ + auth del client.xx-profile-rd + # remove the remaining role-definer with admin + ceph auth del client.xx-profile-rd2 + rm -f client.xx.keyring client.xx.keyring.2 +} + +function test_mon_caps() +{ + ceph-authtool --create-keyring $TEMP_DIR/ceph.client.bug.keyring + chmod +r $TEMP_DIR/ceph.client.bug.keyring + ceph-authtool $TEMP_DIR/ceph.client.bug.keyring -n client.bug --gen-key + ceph auth add client.bug -i $TEMP_DIR/ceph.client.bug.keyring + + # pass --no-mon-config since we are looking for the permission denied error + rados lspools --no-mon-config --keyring $TEMP_DIR/ceph.client.bug.keyring -n client.bug >& $TMPFILE || true + cat $TMPFILE + check_response "Permission denied" + + rm -rf $TEMP_DIR/ceph.client.bug.keyring + ceph auth del client.bug + ceph-authtool --create-keyring $TEMP_DIR/ceph.client.bug.keyring + chmod +r $TEMP_DIR/ceph.client.bug.keyring + ceph-authtool $TEMP_DIR/ceph.client.bug.keyring -n client.bug --gen-key + ceph-authtool -n client.bug --cap mon '' $TEMP_DIR/ceph.client.bug.keyring + ceph auth add client.bug -i $TEMP_DIR/ceph.client.bug.keyring + rados lspools --no-mon-config --keyring $TEMP_DIR/ceph.client.bug.keyring -n client.bug >& $TMPFILE || true + check_response "Permission denied" +} + +function test_mon_misc() +{ + # with and without verbosity + ceph osd dump | grep '^epoch' + ceph --concise osd dump | grep '^epoch' + + ceph osd df | grep 'MIN/MAX VAR' + osd_class=$(ceph osd crush get-device-class 0) + ceph osd df tree class $osd_class | grep 'osd.0' + ceph osd crush rm-device-class 0 + # create class first in case old device class may + # have already been automatically destroyed + ceph osd crush class create $osd_class + ceph osd df tree class $osd_class | expect_false grep 'osd.0' + ceph osd crush set-device-class $osd_class 0 + ceph osd df tree name osd.0 | grep 'osd.0' + + # df + ceph df > $TMPFILE + grep RAW $TMPFILE + grep -v DIRTY $TMPFILE + ceph df detail > $TMPFILE + grep DIRTY $TMPFILE + ceph df --format json > $TMPFILE + grep 'total_bytes' $TMPFILE + grep -v 'dirty' $TMPFILE + ceph df detail --format json > $TMPFILE + grep 'rd_bytes' $TMPFILE + grep 'dirty' $TMPFILE + ceph df --format xml | grep '' + ceph df detail --format xml | grep '' + + ceph fsid + ceph health + ceph health detail + ceph health --format json-pretty + ceph health detail --format xml-pretty + + ceph time-sync-status + + ceph node ls + for t in mon osd mds mgr ; do + ceph node ls $t + done + + ceph_watch_start + mymsg="this is a test log message $$.$(date)" + ceph log "$mymsg" + ceph log last | grep "$mymsg" + ceph log last 100 | grep "$mymsg" + ceph_watch_wait "$mymsg" + + ceph mgr dump + ceph mgr module ls + ceph mgr module enable restful + expect_false ceph mgr module enable foodne + ceph mgr module enable foodne --force + ceph mgr module disable foodne + ceph mgr module disable foodnebizbangbash + + ceph mon metadata a + ceph mon metadata + ceph mon count-metadata ceph_version + ceph mon versions + + ceph mgr metadata + ceph mgr versions + ceph mgr count-metadata ceph_version + + ceph versions + + ceph node ls +} + +function check_mds_active() +{ + fs_name=$1 + ceph fs get $fs_name | grep active +} + +function wait_mds_active() +{ + fs_name=$1 + max_run=300 + for i in $(seq 1 $max_run) ; do + if ! check_mds_active $fs_name ; then + echo "waiting for an active MDS daemon ($i/$max_run)" + sleep 5 + else + break + fi + done + check_mds_active $fs_name +} + +function get_mds_gids() +{ + fs_name=$1 + ceph fs get $fs_name --format=json | python -c "import json; import sys; print ' '.join([m['gid'].__str__() for m in json.load(sys.stdin)['mdsmap']['info'].values()])" +} + +function fail_all_mds() +{ + fs_name=$1 + ceph fs set $fs_name cluster_down true + mds_gids=$(get_mds_gids $fs_name) + for mds_gid in $mds_gids ; do + ceph mds fail $mds_gid + done + if check_mds_active $fs_name ; then + echo "An active MDS remains, something went wrong" + ceph fs get $fs_name + exit -1 + fi + +} + +function remove_all_fs() +{ + existing_fs=$(ceph fs ls --format=json | python -c "import json; import sys; print ' '.join([fs['name'] for fs in json.load(sys.stdin)])") + for fs_name in $existing_fs ; do + echo "Removing fs ${fs_name}..." + fail_all_mds $fs_name + echo "Removing existing filesystem '${fs_name}'..." + ceph fs rm $fs_name --yes-i-really-mean-it + echo "Removed '${fs_name}'." + done +} + +# So that tests requiring MDS can skip if one is not configured +# in the cluster at all +function mds_exists() +{ + ceph auth ls | grep "^mds" +} + +# some of the commands are just not idempotent. +function without_test_dup_command() +{ + if [ -z ${CEPH_CLI_TEST_DUP_COMMAND+x} ]; then + $@ + else + local saved=${CEPH_CLI_TEST_DUP_COMMAND} + unset CEPH_CLI_TEST_DUP_COMMAND + $@ + CEPH_CLI_TEST_DUP_COMMAND=saved + fi +} + +function test_mds_tell() +{ + local FS_NAME=cephfs + if ! mds_exists ; then + echo "Skipping test, no MDS found" + return + fi + + remove_all_fs + ceph osd pool create fs_data 10 + ceph osd pool create fs_metadata 10 + ceph fs new $FS_NAME fs_metadata fs_data + wait_mds_active $FS_NAME + + # Test injectargs by GID + old_mds_gids=$(get_mds_gids $FS_NAME) + echo Old GIDs: $old_mds_gids + + for mds_gid in $old_mds_gids ; do + ceph tell mds.$mds_gid injectargs "--debug-mds 20" + done + expect_false ceph tell mds.a injectargs mds_max_file_recover -1 + + # Test respawn by rank + without_test_dup_command ceph tell mds.0 respawn + new_mds_gids=$old_mds_gids + while [ $new_mds_gids -eq $old_mds_gids ] ; do + sleep 5 + new_mds_gids=$(get_mds_gids $FS_NAME) + done + echo New GIDs: $new_mds_gids + + # Test respawn by ID + without_test_dup_command ceph tell mds.a respawn + new_mds_gids=$old_mds_gids + while [ $new_mds_gids -eq $old_mds_gids ] ; do + sleep 5 + new_mds_gids=$(get_mds_gids $FS_NAME) + done + echo New GIDs: $new_mds_gids + + remove_all_fs + ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it + ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it +} + +function test_mon_mds() +{ + local FS_NAME=cephfs + remove_all_fs + + ceph osd pool create fs_data 10 + ceph osd pool create fs_metadata 10 + ceph fs new $FS_NAME fs_metadata fs_data + + ceph fs set $FS_NAME cluster_down true + ceph fs set $FS_NAME cluster_down false + + ceph mds compat rm_incompat 4 + ceph mds compat rm_incompat 4 + + # We don't want any MDSs to be up, their activity can interfere with + # the "current_epoch + 1" checking below if they're generating updates + fail_all_mds $FS_NAME + + ceph mds compat show + ceph fs dump + ceph fs get $FS_NAME + for mds_gid in $(get_mds_gids $FS_NAME) ; do + ceph mds metadata $mds_id + done + ceph mds metadata + ceph mds versions + ceph mds count-metadata os + + # XXX mds fail, but how do you undo it? + mdsmapfile=$TEMP_DIR/mdsmap.$$ + current_epoch=$(ceph fs dump -o $mdsmapfile --no-log-to-stderr 2>&1 | grep epoch | sed 's/.*epoch //') + [ -s $mdsmapfile ] + rm $mdsmapfile + + ceph osd pool create data2 10 + ceph osd pool create data3 10 + data2_pool=$(ceph osd dump | grep "pool.*'data2'" | awk '{print $2;}') + data3_pool=$(ceph osd dump | grep "pool.*'data3'" | awk '{print $2;}') + ceph fs add_data_pool cephfs $data2_pool + ceph fs add_data_pool cephfs $data3_pool + ceph fs add_data_pool cephfs 100 >& $TMPFILE || true + check_response "Error ENOENT" + ceph fs add_data_pool cephfs foobarbaz >& $TMPFILE || true + check_response "Error ENOENT" + ceph fs rm_data_pool cephfs $data2_pool + ceph fs rm_data_pool cephfs $data3_pool + ceph osd pool delete data2 data2 --yes-i-really-really-mean-it + ceph osd pool delete data3 data3 --yes-i-really-really-mean-it + ceph fs set cephfs max_mds 4 + ceph fs set cephfs max_mds 3 + ceph fs set cephfs max_mds 256 + expect_false ceph fs set cephfs max_mds 257 + ceph fs set cephfs max_mds 4 + ceph fs set cephfs max_mds 256 + expect_false ceph fs set cephfs max_mds 257 + expect_false ceph fs set cephfs max_mds asdf + expect_false ceph fs set cephfs inline_data true + ceph fs set cephfs inline_data true --yes-i-really-mean-it + ceph fs set cephfs inline_data yes --yes-i-really-mean-it + ceph fs set cephfs inline_data 1 --yes-i-really-mean-it + expect_false ceph fs set cephfs inline_data --yes-i-really-mean-it + ceph fs set cephfs inline_data false + ceph fs set cephfs inline_data no + ceph fs set cephfs inline_data 0 + expect_false ceph fs set cephfs inline_data asdf + ceph fs set cephfs max_file_size 1048576 + expect_false ceph fs set cephfs max_file_size 123asdf + + expect_false ceph fs set cephfs allow_new_snaps + ceph fs set cephfs allow_new_snaps true + ceph fs set cephfs allow_new_snaps 0 + ceph fs set cephfs allow_new_snaps false + ceph fs set cephfs allow_new_snaps no + expect_false ceph fs set cephfs allow_new_snaps taco + + # we should never be able to add EC pools as data or metadata pools + # create an ec-pool... + ceph osd pool create mds-ec-pool 10 10 erasure + set +e + ceph fs add_data_pool cephfs mds-ec-pool 2>$TMPFILE + check_response 'erasure-code' $? 22 + set -e + ec_poolnum=$(ceph osd dump | grep "pool.* 'mds-ec-pool" | awk '{print $2;}') + data_poolnum=$(ceph osd dump | grep "pool.* 'fs_data" | awk '{print $2;}') + metadata_poolnum=$(ceph osd dump | grep "pool.* 'fs_metadata" | awk '{print $2;}') + + fail_all_mds $FS_NAME + + set +e + # Check that rmfailed requires confirmation + expect_false ceph mds rmfailed 0 + ceph mds rmfailed 0 --yes-i-really-mean-it + set -e + + # Check that `fs new` is no longer permitted + expect_false ceph fs new cephfs $metadata_poolnum $data_poolnum --yes-i-really-mean-it 2>$TMPFILE + + # Check that 'fs reset' runs + ceph fs reset $FS_NAME --yes-i-really-mean-it + + # Check that creating a second FS fails by default + ceph osd pool create fs_metadata2 10 + ceph osd pool create fs_data2 10 + set +e + expect_false ceph fs new cephfs2 fs_metadata2 fs_data2 + set -e + + # Check that setting enable_multiple enables creation of second fs + ceph fs flag set enable_multiple true --yes-i-really-mean-it + ceph fs new cephfs2 fs_metadata2 fs_data2 + + # Clean up multi-fs stuff + fail_all_mds cephfs2 + ceph fs rm cephfs2 --yes-i-really-mean-it + ceph osd pool delete fs_metadata2 fs_metadata2 --yes-i-really-really-mean-it + ceph osd pool delete fs_data2 fs_data2 --yes-i-really-really-mean-it + + fail_all_mds $FS_NAME + + # Clean up to enable subsequent fs new tests + ceph fs rm $FS_NAME --yes-i-really-mean-it + + set +e + ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE + check_response 'erasure-code' $? 22 + ceph fs new $FS_NAME mds-ec-pool fs_data 2>$TMPFILE + check_response 'erasure-code' $? 22 + ceph fs new $FS_NAME mds-ec-pool mds-ec-pool 2>$TMPFILE + check_response 'erasure-code' $? 22 + set -e + + # ... new create a cache tier in front of the EC pool... + ceph osd pool create mds-tier 2 + ceph osd tier add mds-ec-pool mds-tier + ceph osd tier set-overlay mds-ec-pool mds-tier + tier_poolnum=$(ceph osd dump | grep "pool.* 'mds-tier" | awk '{print $2;}') + + # Use of a readonly tier should be forbidden + ceph osd tier cache-mode mds-tier readonly --yes-i-really-mean-it + set +e + ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE + check_response 'has a write tier (mds-tier) that is configured to forward' $? 22 + set -e + + # Use of a writeback tier should enable FS creation + ceph osd tier cache-mode mds-tier writeback + ceph fs new $FS_NAME fs_metadata mds-ec-pool --force + + # While a FS exists using the tiered pools, I should not be allowed + # to remove the tier + set +e + ceph osd tier remove-overlay mds-ec-pool 2>$TMPFILE + check_response 'in use by CephFS' $? 16 + ceph osd tier remove mds-ec-pool mds-tier 2>$TMPFILE + check_response 'in use by CephFS' $? 16 + set -e + + fail_all_mds $FS_NAME + ceph fs rm $FS_NAME --yes-i-really-mean-it + + # ... but we should be forbidden from using the cache pool in the FS directly. + set +e + ceph fs new $FS_NAME fs_metadata mds-tier --force 2>$TMPFILE + check_response 'in use as a cache tier' $? 22 + ceph fs new $FS_NAME mds-tier fs_data 2>$TMPFILE + check_response 'in use as a cache tier' $? 22 + ceph fs new $FS_NAME mds-tier mds-tier 2>$TMPFILE + check_response 'in use as a cache tier' $? 22 + set -e + + # Clean up tier + EC pools + ceph osd tier remove-overlay mds-ec-pool + ceph osd tier remove mds-ec-pool mds-tier + + # Create a FS using the 'cache' pool now that it's no longer a tier + ceph fs new $FS_NAME fs_metadata mds-tier --force + + # We should be forbidden from using this pool as a tier now that + # it's in use for CephFS + set +e + ceph osd tier add mds-ec-pool mds-tier 2>$TMPFILE + check_response 'in use by CephFS' $? 16 + set -e + + fail_all_mds $FS_NAME + ceph fs rm $FS_NAME --yes-i-really-mean-it + + # We should be permitted to use an EC pool with overwrites enabled + # as the data pool... + ceph osd pool set mds-ec-pool allow_ec_overwrites true + ceph fs new $FS_NAME fs_metadata mds-ec-pool --force 2>$TMPFILE + fail_all_mds $FS_NAME + ceph fs rm $FS_NAME --yes-i-really-mean-it + + # ...but not as the metadata pool + set +e + ceph fs new $FS_NAME mds-ec-pool fs_data 2>$TMPFILE + check_response 'erasure-code' $? 22 + set -e + + ceph osd pool delete mds-ec-pool mds-ec-pool --yes-i-really-really-mean-it + + # Create a FS and check that we can subsequently add a cache tier to it + ceph fs new $FS_NAME fs_metadata fs_data --force + + # Adding overlay to FS pool should be permitted, RADOS clients handle this. + ceph osd tier add fs_metadata mds-tier + ceph osd tier cache-mode mds-tier writeback + ceph osd tier set-overlay fs_metadata mds-tier + + # Removing tier should be permitted because the underlying pool is + # replicated (#11504 case) + ceph osd tier cache-mode mds-tier proxy + ceph osd tier remove-overlay fs_metadata + ceph osd tier remove fs_metadata mds-tier + ceph osd pool delete mds-tier mds-tier --yes-i-really-really-mean-it + + # Clean up FS + fail_all_mds $FS_NAME + ceph fs rm $FS_NAME --yes-i-really-mean-it + + + + ceph mds stat + # ceph mds tell mds.a getmap + # ceph mds rm + # ceph mds rmfailed + # ceph mds set_state + + ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it + ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it +} + +function test_mon_mds_metadata() +{ + local nmons=$(ceph tell 'mon.*' version | grep -c 'version') + test "$nmons" -gt 0 + + ceph fs dump | + sed -nEe "s/^([0-9]+):.*'([a-z])' mds\\.([0-9]+)\\..*/\\1 \\2 \\3/p" | + while read gid id rank; do + ceph mds metadata ${gid} | grep '"hostname":' + ceph mds metadata ${id} | grep '"hostname":' + ceph mds metadata ${rank} | grep '"hostname":' + + local n=$(ceph tell 'mon.*' mds metadata ${id} | grep -c '"hostname":') + test "$n" -eq "$nmons" + done + + expect_false ceph mds metadata UNKNOWN +} + +function test_mon_mon() +{ + # print help message + ceph --help mon + # no mon add/remove + ceph mon dump + ceph mon getmap -o $TEMP_DIR/monmap.$$ + [ -s $TEMP_DIR/monmap.$$ ] + # ceph mon tell + ceph mon_status + + # test mon features + ceph mon feature ls + ceph mon feature set kraken --yes-i-really-mean-it + expect_false ceph mon feature set abcd + expect_false ceph mon feature set abcd --yes-i-really-mean-it + + # test mon stat + # don't check output, just ensure it does not fail. + ceph mon stat + ceph mon stat -f json | jq '.' +} + +function gen_secrets_file() +{ + # lets assume we can have the following types + # all - generates both cephx and lockbox, with mock dm-crypt key + # cephx - only cephx + # no_cephx - lockbox and dm-crypt, no cephx + # no_lockbox - dm-crypt and cephx, no lockbox + # empty - empty file + # empty_json - correct json, empty map + # bad_json - bad json :) + # + local t=$1 + if [[ -z "$t" ]]; then + t="all" + fi + + fn=$(mktemp $TEMP_DIR/secret.XXXXXX) + echo $fn + if [[ "$t" == "empty" ]]; then + return 0 + fi + + echo "{" > $fn + if [[ "$t" == "bad_json" ]]; then + echo "asd: ; }" >> $fn + return 0 + elif [[ "$t" == "empty_json" ]]; then + echo "}" >> $fn + return 0 + fi + + cephx_secret="\"cephx_secret\": \"$(ceph-authtool --gen-print-key)\"" + lb_secret="\"cephx_lockbox_secret\": \"$(ceph-authtool --gen-print-key)\"" + dmcrypt_key="\"dmcrypt_key\": \"$(ceph-authtool --gen-print-key)\"" + + if [[ "$t" == "all" ]]; then + echo "$cephx_secret,$lb_secret,$dmcrypt_key" >> $fn + elif [[ "$t" == "cephx" ]]; then + echo "$cephx_secret" >> $fn + elif [[ "$t" == "no_cephx" ]]; then + echo "$lb_secret,$dmcrypt_key" >> $fn + elif [[ "$t" == "no_lockbox" ]]; then + echo "$cephx_secret,$dmcrypt_key" >> $fn + else + echo "unknown gen_secrets_file() type \'$fn\'" + return 1 + fi + echo "}" >> $fn + return 0 +} + +function test_mon_osd_create_destroy() +{ + ceph osd new 2>&1 | grep 'EINVAL' + ceph osd new '' -1 2>&1 | grep 'EINVAL' + ceph osd new '' 10 2>&1 | grep 'EINVAL' + + old_maxosd=$(ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//') + + old_osds=$(ceph osd ls) + num_osds=$(ceph osd ls | wc -l) + + uuid=$(uuidgen) + id=$(ceph osd new $uuid 2>/dev/null) + + for i in $old_osds; do + [[ "$i" != "$id" ]] + done + + ceph osd find $id + + id2=`ceph osd new $uuid 2>/dev/null` + + [[ $id2 == $id ]] + + ceph osd new $uuid $id + + id3=$(ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//') + ceph osd new $uuid $((id3+1)) 2>&1 | grep EEXIST + + uuid2=$(uuidgen) + id2=$(ceph osd new $uuid2) + ceph osd find $id2 + [[ "$id2" != "$id" ]] + + ceph osd new $uuid $id2 2>&1 | grep EEXIST + ceph osd new $uuid2 $id2 + + # test with secrets + empty_secrets=$(gen_secrets_file "empty") + empty_json=$(gen_secrets_file "empty_json") + all_secrets=$(gen_secrets_file "all") + cephx_only=$(gen_secrets_file "cephx") + no_cephx=$(gen_secrets_file "no_cephx") + no_lockbox=$(gen_secrets_file "no_lockbox") + bad_json=$(gen_secrets_file "bad_json") + + # empty secrets should be idempotent + new_id=$(ceph osd new $uuid $id -i $empty_secrets) + [[ "$new_id" == "$id" ]] + + # empty json, thus empty secrets + new_id=$(ceph osd new $uuid $id -i $empty_json) + [[ "$new_id" == "$id" ]] + + ceph osd new $uuid $id -i $all_secrets 2>&1 | grep 'EEXIST' + + ceph osd rm $id + ceph osd rm $id2 + ceph osd setmaxosd $old_maxosd + + ceph osd new $uuid -i $no_cephx 2>&1 | grep 'EINVAL' + ceph osd new $uuid -i $no_lockbox 2>&1 | grep 'EINVAL' + + osds=$(ceph osd ls) + id=$(ceph osd new $uuid -i $all_secrets) + for i in $osds; do + [[ "$i" != "$id" ]] + done + + ceph osd find $id + + # validate secrets and dm-crypt are set + k=$(ceph auth get-key osd.$id --format=json-pretty 2>/dev/null | jq '.key') + s=$(cat $all_secrets | jq '.cephx_secret') + [[ $k == $s ]] + k=$(ceph auth get-key client.osd-lockbox.$uuid --format=json-pretty 2>/dev/null | \ + jq '.key') + s=$(cat $all_secrets | jq '.cephx_lockbox_secret') + [[ $k == $s ]] + ceph config-key exists dm-crypt/osd/$uuid/luks + + osds=$(ceph osd ls) + id2=$(ceph osd new $uuid2 -i $cephx_only) + for i in $osds; do + [[ "$i" != "$id2" ]] + done + + ceph osd find $id2 + k=$(ceph auth get-key osd.$id --format=json-pretty 2>/dev/null | jq '.key') + s=$(cat $all_secrets | jq '.cephx_secret') + [[ $k == $s ]] + expect_false ceph auth get-key client.osd-lockbox.$uuid2 + expect_false ceph config-key exists dm-crypt/osd/$uuid2/luks + + ceph osd destroy osd.$id2 --yes-i-really-mean-it + ceph osd destroy $id2 --yes-i-really-mean-it + ceph osd find $id2 + expect_false ceph auth get-key osd.$id2 + ceph osd dump | grep osd.$id2 | grep destroyed + + id3=$id2 + uuid3=$(uuidgen) + ceph osd new $uuid3 $id3 -i $all_secrets + ceph osd dump | grep osd.$id3 | expect_false grep destroyed + ceph auth get-key client.osd-lockbox.$uuid3 + ceph auth get-key osd.$id3 + ceph config-key exists dm-crypt/osd/$uuid3/luks + + ceph osd purge-new osd.$id3 --yes-i-really-mean-it + expect_false ceph osd find $id2 + expect_false ceph auth get-key osd.$id2 + expect_false ceph auth get-key client.osd-lockbox.$uuid3 + expect_false ceph config-key exists dm-crypt/osd/$uuid3/luks + ceph osd purge osd.$id3 --yes-i-really-mean-it + ceph osd purge-new osd.$id3 --yes-i-really-mean-it # idempotent + + ceph osd purge osd.$id --yes-i-really-mean-it + ceph osd purge 123456 --yes-i-really-mean-it + expect_false ceph osd find $id + expect_false ceph auth get-key osd.$id + expect_false ceph auth get-key client.osd-lockbox.$uuid + expect_false ceph config-key exists dm-crypt/osd/$uuid/luks + + rm $empty_secrets $empty_json $all_secrets $cephx_only \ + $no_cephx $no_lockbox $bad_json + + for i in $(ceph osd ls); do + [[ "$i" != "$id" ]] + [[ "$i" != "$id2" ]] + [[ "$i" != "$id3" ]] + done + + [[ "$(ceph osd ls | wc -l)" == "$num_osds" ]] + ceph osd setmaxosd $old_maxosd + +} + +function test_mon_config_key() +{ + key=asdfasdfqwerqwreasdfuniquesa123df + ceph config-key list | grep -c $key | grep 0 + ceph config-key get $key | grep -c bar | grep 0 + ceph config-key set $key bar + ceph config-key get $key | grep bar + ceph config-key list | grep -c $key | grep 1 + ceph config-key dump | grep $key | grep bar + ceph config-key rm $key + expect_false ceph config-key get $key + ceph config-key list | grep -c $key | grep 0 + ceph config-key dump | grep -c $key | grep 0 +} + +function test_mon_osd() +{ + # + # osd blacklist + # + bl=192.168.0.1:0/1000 + ceph osd blacklist add $bl + ceph osd blacklist ls | grep $bl + ceph osd blacklist ls --format=json-pretty | sed 's/\\\//\//' | grep $bl + ceph osd dump --format=json-pretty | grep $bl + ceph osd dump | grep $bl + ceph osd blacklist rm $bl + ceph osd blacklist ls | expect_false grep $bl + + bl=192.168.0.1 + # test without nonce, invalid nonce + ceph osd blacklist add $bl + ceph osd blacklist ls | grep $bl + ceph osd blacklist rm $bl + ceph osd blacklist ls | expect_false grep $bl + expect_false "ceph osd blacklist $bl/-1" + expect_false "ceph osd blacklist $bl/foo" + + # test with wrong address + expect_false "ceph osd blacklist 1234.56.78.90/100" + + # Test `clear` + ceph osd blacklist add $bl + ceph osd blacklist ls | grep $bl + ceph osd blacklist clear + ceph osd blacklist ls | expect_false grep $bl + + # + # osd crush + # + ceph osd crush reweight-all + ceph osd crush tunables legacy + ceph osd crush show-tunables | grep argonaut + ceph osd crush tunables bobtail + ceph osd crush show-tunables | grep bobtail + ceph osd crush tunables firefly + ceph osd crush show-tunables | grep firefly + + ceph osd crush set-tunable straw_calc_version 0 + ceph osd crush get-tunable straw_calc_version | grep 0 + ceph osd crush set-tunable straw_calc_version 1 + ceph osd crush get-tunable straw_calc_version | grep 1 + + # + # require-min-compat-client + expect_false ceph osd set-require-min-compat-client dumpling # firefly tunables + ceph osd set-require-min-compat-client luminous + ceph osd get-require-min-compat-client | grep luminous + ceph osd dump | grep 'require_min_compat_client luminous' + + # + # osd scrub + # + + # blocking + ceph osd scrub 0 --block + ceph osd deep-scrub 0 --block + + # how do I tell when these are done? + ceph osd scrub 0 + ceph osd deep-scrub 0 + ceph osd repair 0 + + # pool scrub, force-recovery/backfill + pool_names=`rados lspools` + for pool_name in $pool_names + do + ceph osd pool scrub $pool_name + ceph osd pool deep-scrub $pool_name + ceph osd pool repair $pool_name + ceph osd pool force-recovery $pool_name + ceph osd pool cancel-force-recovery $pool_name + ceph osd pool force-backfill $pool_name + ceph osd pool cancel-force-backfill $pool_name + done + + for f in noup nodown noin noout noscrub nodeep-scrub nobackfill \ + norebalance norecover notieragent full + do + ceph osd set $f + ceph osd unset $f + done + expect_false ceph osd set bogus + expect_false ceph osd unset bogus + for f in sortbitwise recover_deletes require_jewel_osds \ + require_kraken_osds + do + expect_false ceph osd set $f + expect_false ceph osd unset $f + done + ceph osd require-osd-release nautilus + # can't lower (or use new command for anything but jewel) + expect_false ceph osd require-osd-release jewel + # these are no-ops but should succeed. + + ceph osd set noup + ceph osd down 0 + ceph osd dump | grep 'osd.0 down' + ceph osd unset noup + max_run=1000 + for ((i=0; i < $max_run; i++)); do + if ! ceph osd dump | grep 'osd.0 up'; then + echo "waiting for osd.0 to come back up ($i/$max_run)" + sleep 1 + else + break + fi + done + ceph osd dump | grep 'osd.0 up' + + ceph osd dump | grep 'osd.0 up' + # ceph osd find expects the OsdName, so both ints and osd.n should work. + ceph osd find 1 + ceph osd find osd.1 + expect_false ceph osd find osd.xyz + expect_false ceph osd find xyz + expect_false ceph osd find 0.1 + ceph --format plain osd find 1 # falls back to json-pretty + if [ `uname` == Linux ]; then + ceph osd metadata 1 | grep 'distro' + ceph --format plain osd metadata 1 | grep 'distro' # falls back to json-pretty + fi + ceph osd out 0 + ceph osd dump | grep 'osd.0.*out' + ceph osd in 0 + ceph osd dump | grep 'osd.0.*in' + ceph osd find 0 + + ceph osd add-nodown 0 1 + ceph health detail | grep 'NODOWN' + ceph osd rm-nodown 0 1 + ! ceph health detail | grep 'NODOWN' + + ceph osd out 0 # so we can mark it as noin later + ceph osd add-noin 0 + ceph health detail | grep 'NOIN' + ceph osd rm-noin 0 + ! ceph health detail | grep 'NOIN' + ceph osd in 0 + + ceph osd add-noout 0 + ceph health detail | grep 'NOOUT' + ceph osd rm-noout 0 + ! ceph health detail | grep 'NOOUT' + + # test osd id parse + expect_false ceph osd add-noup 797er + expect_false ceph osd add-nodown u9uwer + expect_false ceph osd add-noin 78~15 + + expect_false ceph osd rm-noup 1234567 + expect_false ceph osd rm-nodown fsadf7 + expect_false ceph osd rm-noout 790-fd + + ids=`ceph osd ls-tree default` + for osd in $ids + do + ceph osd add-nodown $osd + ceph osd add-noout $osd + done + ceph -s | grep 'NODOWN' + ceph -s | grep 'NOOUT' + ceph osd rm-nodown any + ceph osd rm-noout all + ! ceph -s | grep 'NODOWN' + ! ceph -s | grep 'NOOUT' + + # test crush node flags + ceph osd add-noup osd.0 + ceph osd add-nodown osd.0 + ceph osd add-noin osd.0 + ceph osd add-noout osd.0 + ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep "osd.0" + ceph osd rm-noup osd.0 + ceph osd rm-nodown osd.0 + ceph osd rm-noin osd.0 + ceph osd rm-noout osd.0 + ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep "osd.0" + + ceph osd crush add-bucket foo host root=default + ceph osd add-noup foo + ceph osd add-nodown foo + ceph osd add-noin foo + ceph osd add-noout foo + ceph osd dump -f json-pretty | jq ".crush_node_flags" | grep foo + ceph osd rm-noup foo + ceph osd rm-nodown foo + ceph osd rm-noin foo + ceph osd rm-noout foo + ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep foo + ceph osd add-noup foo + ceph osd dump -f json-pretty | jq ".crush_node_flags" | grep foo + ceph osd crush rm foo + ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep foo + + ceph osd set-group noup osd.0 + ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup' + ceph osd set-group noup,nodown osd.0 + ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup' + ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'nodown' + ceph osd set-group noup,nodown,noin osd.0 + ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup' + ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'nodown' + ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noin' + ceph osd set-group noup,nodown,noin,noout osd.0 + ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup' + ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'nodown' + ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noin' + ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noout' + ceph osd unset-group noup osd.0 + ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup' + ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'nodown' + ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noin' + ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noout' + ceph osd unset-group noup,nodown osd.0 + ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup\|nodown' + ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noin' + ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noout' + ceph osd unset-group noup,nodown,noin osd.0 + ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup\|nodown\|noin' + ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noout' + ceph osd unset-group noup,nodown,noin,noout osd.0 + ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup\|nodown\|noin\|noout' + + ceph osd set-group noup,nodown,noin,noout osd.0 osd.1 + ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup' + ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'nodown' + ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noin' + ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noout' + ceph osd dump -f json-pretty | jq ".osds[1].state" | grep 'noup' + ceph osd dump -f json-pretty | jq ".osds[1].state" | grep 'nodown' + ceph osd dump -f json-pretty | jq ".osds[1].state" | grep 'noin' + ceph osd dump -f json-pretty | jq ".osds[1].state" | grep 'noout' + ceph osd unset-group noup,nodown,noin,noout osd.0 osd.1 + ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup\|nodown\|noin\|noout' + ceph osd dump -f json-pretty | jq ".osds[1].state" | expect_false grep 'noup\|nodown\|noin\|noout' + + ceph osd set-group noup all + ceph osd dump -f json-pretty | jq ".osds[0].state" | grep 'noup' + ceph osd unset-group noup all + ceph osd dump -f json-pretty | jq ".osds[0].state" | expect_false grep 'noup' + + # crush node flags + ceph osd crush add-bucket foo host root=default + ceph osd set-group noup foo + ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noup' + ceph osd set-group noup,nodown foo + ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noup' + ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'nodown' + ceph osd set-group noup,nodown,noin foo + ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noup' + ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'nodown' + ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin' + ceph osd set-group noup,nodown,noin,noout foo + ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noup' + ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'nodown' + ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin' + ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout' + + ceph osd unset-group noup foo + ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | expect_false grep 'noup' + ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'nodown' + ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin' + ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout' + ceph osd unset-group noup,nodown foo + ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | expect_false grep 'noup\|nodown' + ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin' + ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout' + ceph osd unset-group noup,nodown,noin foo + ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | expect_false grep 'noup\|nodown\|noin' + ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout' + ceph osd unset-group noup,nodown,noin,noout foo + ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | expect_false grep 'noup\|nodown\|noin\|noout' + + ceph osd set-group noin,noout foo + ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin' + ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout' + ceph osd unset-group noin,noout foo + ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep 'foo' + + ceph osd set-group noup,nodown,noin,noout foo + ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noup' + ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'nodown' + ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noin' + ceph osd dump -f json-pretty | jq ".crush_node_flags.foo" | grep 'noout' + ceph osd crush rm foo + ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep 'foo' + + # test device class flags + osd_0_device_class=$(ceph osd crush get-device-class osd.0) + ceph osd set-group noup $osd_0_device_class + ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noup' + ceph osd set-group noup,nodown $osd_0_device_class + ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noup' + ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'nodown' + ceph osd set-group noup,nodown,noin $osd_0_device_class + ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noup' + ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'nodown' + ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noin' + ceph osd set-group noup,nodown,noin,noout $osd_0_device_class + ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noup' + ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'nodown' + ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noin' + ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noout' + + ceph osd unset-group noup $osd_0_device_class + ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | expect_false grep 'noup' + ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'nodown' + ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noin' + ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noout' + ceph osd unset-group noup,nodown $osd_0_device_class + ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | expect_false grep 'noup\|nodown' + ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noin' + ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noout' + ceph osd unset-group noup,nodown,noin $osd_0_device_class + ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | expect_false grep 'noup\|nodown\|noin' + ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noout' + ceph osd unset-group noup,nodown,noin,noout $osd_0_device_class + ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | expect_false grep 'noup\|nodown\|noin\|noout' + + ceph osd set-group noin,noout $osd_0_device_class + ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noin' + ceph osd dump -f json-pretty | jq ".device_class_flags.$osd_0_device_class" | grep 'noout' + ceph osd unset-group noin,noout $osd_0_device_class + ceph osd dump -f json-pretty | jq ".crush_node_flags" | expect_false grep $osd_0_device_class + + # make sure mark out preserves weight + ceph osd reweight osd.0 .5 + ceph osd dump | grep ^osd.0 | grep 'weight 0.5' + ceph osd out 0 + ceph osd in 0 + ceph osd dump | grep ^osd.0 | grep 'weight 0.5' + + ceph osd getmap -o $f + [ -s $f ] + rm $f + save=$(ceph osd getmaxosd | sed -e 's/max_osd = //' -e 's/ in epoch.*//') + [ "$save" -gt 0 ] + ceph osd setmaxosd $((save - 1)) 2>&1 | grep 'EBUSY' + ceph osd setmaxosd 10 + ceph osd getmaxosd | grep 'max_osd = 10' + ceph osd setmaxosd $save + ceph osd getmaxosd | grep "max_osd = $save" + + for id in `ceph osd ls` ; do + retry_eagain 5 map_enxio_to_eagain ceph tell osd.$id version + done + + ceph osd rm 0 2>&1 | grep 'EBUSY' + + local old_osds=$(echo $(ceph osd ls)) + id=`ceph osd create` + ceph osd find $id + ceph osd lost $id --yes-i-really-mean-it + expect_false ceph osd setmaxosd $id + local new_osds=$(echo $(ceph osd ls)) + for id in $(echo $new_osds | sed -e "s/$old_osds//") ; do + ceph osd rm $id + done + + uuid=`uuidgen` + id=`ceph osd create $uuid` + id2=`ceph osd create $uuid` + [ "$id" = "$id2" ] + ceph osd rm $id + + ceph --help osd + + # reset max_osd. + ceph osd setmaxosd $id + ceph osd getmaxosd | grep "max_osd = $save" + local max_osd=$save + + ceph osd create $uuid 0 2>&1 | grep 'EINVAL' + ceph osd create $uuid $((max_osd - 1)) 2>&1 | grep 'EINVAL' + + id=`ceph osd create $uuid $max_osd` + [ "$id" = "$max_osd" ] + ceph osd find $id + max_osd=$((max_osd + 1)) + ceph osd getmaxosd | grep "max_osd = $max_osd" + + ceph osd create $uuid $((id - 1)) 2>&1 | grep 'EEXIST' + ceph osd create $uuid $((id + 1)) 2>&1 | grep 'EEXIST' + id2=`ceph osd create $uuid` + [ "$id" = "$id2" ] + id2=`ceph osd create $uuid $id` + [ "$id" = "$id2" ] + + uuid=`uuidgen` + local gap_start=$max_osd + id=`ceph osd create $uuid $((gap_start + 100))` + [ "$id" = "$((gap_start + 100))" ] + max_osd=$((id + 1)) + ceph osd getmaxosd | grep "max_osd = $max_osd" + + ceph osd create $uuid $gap_start 2>&1 | grep 'EEXIST' + + # + # When CEPH_CLI_TEST_DUP_COMMAND is set, osd create + # is repeated and consumes two osd id, not just one. + # + local next_osd=$gap_start + id=`ceph osd create $(uuidgen)` + [ "$id" = "$next_osd" ] + + next_osd=$((id + 1)) + id=`ceph osd create $(uuidgen) $next_osd` + [ "$id" = "$next_osd" ] + + local new_osds=$(echo $(ceph osd ls)) + for id in $(echo $new_osds | sed -e "s/$old_osds//") ; do + [ $id -ge $save ] + ceph osd rm $id + done + ceph osd setmaxosd $save + + ceph osd ls + ceph osd pool create data 10 + ceph osd pool application enable data rados + ceph osd lspools | grep data + ceph osd map data foo | grep 'pool.*data.*object.*foo.*pg.*up.*acting' + ceph osd map data foo namespace| grep 'pool.*data.*object.*namespace/foo.*pg.*up.*acting' + ceph osd pool delete data data --yes-i-really-really-mean-it + + ceph osd pause + ceph osd dump | grep 'flags.*pauserd,pausewr' + ceph osd unpause + + ceph osd tree + ceph osd tree up + ceph osd tree down + ceph osd tree in + ceph osd tree out + ceph osd tree destroyed + ceph osd tree up in + ceph osd tree up out + ceph osd tree down in + ceph osd tree down out + ceph osd tree out down + expect_false ceph osd tree up down + expect_false ceph osd tree up destroyed + expect_false ceph osd tree down destroyed + expect_false ceph osd tree up down destroyed + expect_false ceph osd tree in out + expect_false ceph osd tree up foo + + ceph osd metadata + ceph osd count-metadata os + ceph osd versions + + ceph osd perf + ceph osd blocked-by + + ceph osd stat | grep up +} + +function test_mon_crush() +{ + f=$TEMP_DIR/map.$$ + epoch=$(ceph osd getcrushmap -o $f 2>&1 | tail -n1) + [ -s $f ] + [ "$epoch" -gt 1 ] + nextepoch=$(( $epoch + 1 )) + echo epoch $epoch nextepoch $nextepoch + rm -f $f.epoch + expect_false ceph osd setcrushmap $nextepoch -i $f + gotepoch=$(ceph osd setcrushmap $epoch -i $f 2>&1 | tail -n1) + echo gotepoch $gotepoch + [ "$gotepoch" -eq "$nextepoch" ] + # should be idempotent + gotepoch=$(ceph osd setcrushmap $epoch -i $f 2>&1 | tail -n1) + echo epoch $gotepoch + [ "$gotepoch" -eq "$nextepoch" ] + rm $f +} + +function test_mon_osd_pool() +{ + # + # osd pool + # + ceph osd pool create data 10 + ceph osd pool application enable data rados + ceph osd pool mksnap data datasnap + rados -p data lssnap | grep datasnap + ceph osd pool rmsnap data datasnap + expect_false ceph osd pool rmsnap pool_fake snapshot + ceph osd pool delete data data --yes-i-really-really-mean-it + + ceph osd pool create data2 10 + ceph osd pool application enable data2 rados + ceph osd pool rename data2 data3 + ceph osd lspools | grep data3 + ceph osd pool delete data3 data3 --yes-i-really-really-mean-it + + ceph osd pool create replicated 12 12 replicated + ceph osd pool create replicated 12 12 replicated + ceph osd pool create replicated 12 12 # default is replicated + ceph osd pool create replicated 12 # default is replicated, pgp_num = pg_num + ceph osd pool application enable replicated rados + # should fail because the type is not the same + expect_false ceph osd pool create replicated 12 12 erasure + ceph osd lspools | grep replicated + ceph osd pool create ec_test 1 1 erasure + ceph osd pool application enable ec_test rados + set +e + ceph osd count-metadata osd_objectstore | grep 'bluestore' + if [ $? -eq 1 ]; then # enable ec_overwrites on non-bluestore pools should fail + ceph osd pool set ec_test allow_ec_overwrites true >& $TMPFILE + check_response "pool must only be stored on bluestore for scrubbing to work" $? 22 + else + ceph osd pool set ec_test allow_ec_overwrites true || return 1 + expect_false ceph osd pool set ec_test allow_ec_overwrites false + fi + set -e + ceph osd pool delete replicated replicated --yes-i-really-really-mean-it + ceph osd pool delete ec_test ec_test --yes-i-really-really-mean-it + + # test create pool with rule + ceph osd erasure-code-profile set foo foo + ceph osd erasure-code-profile ls | grep foo + ceph osd crush rule create-erasure foo foo + ceph osd pool create erasure 12 12 erasure foo + expect_false ceph osd erasure-code-profile rm foo + ceph osd pool delete erasure erasure --yes-i-really-really-mean-it + ceph osd crush rule rm foo + ceph osd erasure-code-profile rm foo + +} + +function test_mon_osd_pool_quota() +{ + # + # test osd pool set/get quota + # + + # create tmp pool + ceph osd pool create tmp-quota-pool 36 + ceph osd pool application enable tmp-quota-pool rados + # + # set erroneous quotas + # + expect_false ceph osd pool set-quota tmp-quota-pool max_fooness 10 + expect_false ceph osd pool set-quota tmp-quota-pool max_bytes -1 + expect_false ceph osd pool set-quota tmp-quota-pool max_objects aaa + # + # set valid quotas + # + ceph osd pool set-quota tmp-quota-pool max_bytes 10 + ceph osd pool set-quota tmp-quota-pool max_objects 10M + # + # get quotas in json-pretty format + # + ceph osd pool get-quota tmp-quota-pool --format=json-pretty | \ + grep '"quota_max_objects":.*10000000' + ceph osd pool get-quota tmp-quota-pool --format=json-pretty | \ + grep '"quota_max_bytes":.*10' + # + # get quotas + # + ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*10 B' + ceph osd pool get-quota tmp-quota-pool | grep 'max objects.*10.*M objects' + # + # set valid quotas with unit prefix + # + ceph osd pool set-quota tmp-quota-pool max_bytes 10K + # + # get quotas + # + ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*10 Ki' + # + # set valid quotas with unit prefix + # + ceph osd pool set-quota tmp-quota-pool max_bytes 10Ki + # + # get quotas + # + ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*10 Ki' + # + # + # reset pool quotas + # + ceph osd pool set-quota tmp-quota-pool max_bytes 0 + ceph osd pool set-quota tmp-quota-pool max_objects 0 + # + # test N/A quotas + # + ceph osd pool get-quota tmp-quota-pool | grep 'max bytes.*N/A' + ceph osd pool get-quota tmp-quota-pool | grep 'max objects.*N/A' + # + # cleanup tmp pool + ceph osd pool delete tmp-quota-pool tmp-quota-pool --yes-i-really-really-mean-it +} + +function test_mon_pg() +{ + # Make sure we start healthy. + wait_for_health_ok + + ceph pg debug unfound_objects_exist + ceph pg debug degraded_pgs_exist + ceph pg deep-scrub 1.0 + ceph pg dump + ceph pg dump pgs_brief --format=json + ceph pg dump pgs --format=json + ceph pg dump pools --format=json + ceph pg dump osds --format=json + ceph pg dump sum --format=json + ceph pg dump all --format=json + ceph pg dump pgs_brief osds --format=json + ceph pg dump pools osds pgs_brief --format=json + ceph pg dump_json + ceph pg dump_pools_json + ceph pg dump_stuck inactive + ceph pg dump_stuck unclean + ceph pg dump_stuck stale + ceph pg dump_stuck undersized + ceph pg dump_stuck degraded + ceph pg ls + ceph pg ls 1 + ceph pg ls stale + expect_false ceph pg ls scrubq + ceph pg ls active stale repair recovering + ceph pg ls 1 active + ceph pg ls 1 active stale + ceph pg ls-by-primary osd.0 + ceph pg ls-by-primary osd.0 1 + ceph pg ls-by-primary osd.0 active + ceph pg ls-by-primary osd.0 active stale + ceph pg ls-by-primary osd.0 1 active stale + ceph pg ls-by-osd osd.0 + ceph pg ls-by-osd osd.0 1 + ceph pg ls-by-osd osd.0 active + ceph pg ls-by-osd osd.0 active stale + ceph pg ls-by-osd osd.0 1 active stale + ceph pg ls-by-pool rbd + ceph pg ls-by-pool rbd active stale + # can't test this... + # ceph pg force_create_pg + ceph pg getmap -o $TEMP_DIR/map.$$ + [ -s $TEMP_DIR/map.$$ ] + ceph pg map 1.0 | grep acting + ceph pg repair 1.0 + ceph pg scrub 1.0 + + ceph osd set-full-ratio .962 + ceph osd dump | grep '^full_ratio 0.962' + ceph osd set-backfillfull-ratio .912 + ceph osd dump | grep '^backfillfull_ratio 0.912' + ceph osd set-nearfull-ratio .892 + ceph osd dump | grep '^nearfull_ratio 0.892' + + # Check health status + ceph osd set-nearfull-ratio .913 + ceph health -f json | grep OSD_OUT_OF_ORDER_FULL + ceph health detail | grep OSD_OUT_OF_ORDER_FULL + ceph osd set-nearfull-ratio .892 + ceph osd set-backfillfull-ratio .963 + ceph health -f json | grep OSD_OUT_OF_ORDER_FULL + ceph health detail | grep OSD_OUT_OF_ORDER_FULL + ceph osd set-backfillfull-ratio .912 + + # Check injected full results + $SUDO ceph --admin-daemon $(get_admin_socket osd.0) injectfull nearfull + wait_for_health "OSD_NEARFULL" + ceph health detail | grep "osd.0 is near full" + $SUDO ceph --admin-daemon $(get_admin_socket osd.0) injectfull none + wait_for_health_ok + + $SUDO ceph --admin-daemon $(get_admin_socket osd.1) injectfull backfillfull + wait_for_health "OSD_BACKFILLFULL" + ceph health detail | grep "osd.1 is backfill full" + $SUDO ceph --admin-daemon $(get_admin_socket osd.1) injectfull none + wait_for_health_ok + + $SUDO ceph --admin-daemon $(get_admin_socket osd.2) injectfull failsafe + # failsafe and full are the same as far as the monitor is concerned + wait_for_health "OSD_FULL" + ceph health detail | grep "osd.2 is full" + $SUDO ceph --admin-daemon $(get_admin_socket osd.2) injectfull none + wait_for_health_ok + + $SUDO ceph --admin-daemon $(get_admin_socket osd.0) injectfull full + wait_for_health "OSD_FULL" + ceph health detail | grep "osd.0 is full" + $SUDO ceph --admin-daemon $(get_admin_socket osd.0) injectfull none + wait_for_health_ok + + ceph pg stat | grep 'pgs:' + ceph pg 1.0 query + ceph tell 1.0 query + ceph quorum enter + ceph quorum_status + ceph report | grep osd_stats + ceph status + ceph -s + + # + # tell osd version + # + ceph tell osd.0 version + expect_false ceph tell osd.9999 version + expect_false ceph tell osd.foo version + + # back to pg stuff + + ceph tell osd.0 dump_pg_recovery_stats | grep Started + + ceph osd reweight 0 0.9 + expect_false ceph osd reweight 0 -1 + ceph osd reweight osd.0 1 + + ceph osd primary-affinity osd.0 .9 + expect_false ceph osd primary-affinity osd.0 -2 + expect_false ceph osd primary-affinity osd.9999 .5 + ceph osd primary-affinity osd.0 1 + + ceph osd pool set rbd size 2 + ceph osd pg-temp 1.0 0 1 + ceph osd pg-temp 1.0 osd.1 osd.0 + expect_false ceph osd pg-temp 1.0 0 1 2 + expect_false ceph osd pg-temp asdf qwer + expect_false ceph osd pg-temp 1.0 asdf + ceph osd pg-temp 1.0 # cleanup pg-temp + + ceph pg repeer 1.0 + expect_false ceph pg repeer 0.0 # pool 0 shouldn't exist anymore + + # don't test ceph osd primary-temp for now +} + +function test_mon_osd_pool_set() +{ + TEST_POOL_GETSET=pool_getset + ceph osd pool create $TEST_POOL_GETSET 1 + ceph osd pool application enable $TEST_POOL_GETSET rados + ceph osd pool set $TEST_POOL_GETSET pg_autoscale_mode off + wait_for_clean + ceph osd pool get $TEST_POOL_GETSET all + + for s in pg_num pgp_num size min_size crush_rule; do + ceph osd pool get $TEST_POOL_GETSET $s + done + + old_size=$(ceph osd pool get $TEST_POOL_GETSET size | sed -e 's/size: //') + (( new_size = old_size + 1 )) + ceph osd pool set $TEST_POOL_GETSET size $new_size + ceph osd pool get $TEST_POOL_GETSET size | grep "size: $new_size" + ceph osd pool set $TEST_POOL_GETSET size $old_size + + ceph osd pool create pool_erasure 1 1 erasure + ceph osd pool application enable pool_erasure rados + wait_for_clean + set +e + ceph osd pool set pool_erasure size 4444 2>$TMPFILE + check_response 'not change the size' + set -e + ceph osd pool get pool_erasure erasure_code_profile + + for flag in nodelete nopgchange nosizechange write_fadvise_dontneed noscrub nodeep-scrub; do + ceph osd pool set $TEST_POOL_GETSET $flag false + ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: false" + ceph osd pool set $TEST_POOL_GETSET $flag true + ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: true" + ceph osd pool set $TEST_POOL_GETSET $flag 1 + ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: true" + ceph osd pool set $TEST_POOL_GETSET $flag 0 + ceph osd pool get $TEST_POOL_GETSET $flag | grep "$flag: false" + expect_false ceph osd pool set $TEST_POOL_GETSET $flag asdf + expect_false ceph osd pool set $TEST_POOL_GETSET $flag 2 + done + + ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | expect_false grep '.' + ceph osd pool set $TEST_POOL_GETSET scrub_min_interval 123456 + ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | grep 'scrub_min_interval: 123456' + ceph osd pool set $TEST_POOL_GETSET scrub_min_interval 0 + ceph osd pool get $TEST_POOL_GETSET scrub_min_interval | expect_false grep '.' + + ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | expect_false grep '.' + ceph osd pool set $TEST_POOL_GETSET scrub_max_interval 123456 + ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | grep 'scrub_max_interval: 123456' + ceph osd pool set $TEST_POOL_GETSET scrub_max_interval 0 + ceph osd pool get $TEST_POOL_GETSET scrub_max_interval | expect_false grep '.' + + ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | expect_false grep '.' + ceph osd pool set $TEST_POOL_GETSET deep_scrub_interval 123456 + ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | grep 'deep_scrub_interval: 123456' + ceph osd pool set $TEST_POOL_GETSET deep_scrub_interval 0 + ceph osd pool get $TEST_POOL_GETSET deep_scrub_interval | expect_false grep '.' + + ceph osd pool get $TEST_POOL_GETSET recovery_priority | expect_false grep '.' + ceph osd pool set $TEST_POOL_GETSET recovery_priority 5 + ceph osd pool get $TEST_POOL_GETSET recovery_priority | grep 'recovery_priority: 5' + ceph osd pool set $TEST_POOL_GETSET recovery_priority -5 + ceph osd pool get $TEST_POOL_GETSET recovery_priority | grep 'recovery_priority: -5' + ceph osd pool set $TEST_POOL_GETSET recovery_priority 0 + ceph osd pool get $TEST_POOL_GETSET recovery_priority | expect_false grep '.' + expect_false ceph osd pool set $TEST_POOL_GETSET recovery_priority -11 + expect_false ceph osd pool set $TEST_POOL_GETSET recovery_priority 11 + + ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | expect_false grep '.' + ceph osd pool set $TEST_POOL_GETSET recovery_op_priority 5 + ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | grep 'recovery_op_priority: 5' + ceph osd pool set $TEST_POOL_GETSET recovery_op_priority 0 + ceph osd pool get $TEST_POOL_GETSET recovery_op_priority | expect_false grep '.' + + ceph osd pool get $TEST_POOL_GETSET scrub_priority | expect_false grep '.' + ceph osd pool set $TEST_POOL_GETSET scrub_priority 5 + ceph osd pool get $TEST_POOL_GETSET scrub_priority | grep 'scrub_priority: 5' + ceph osd pool set $TEST_POOL_GETSET scrub_priority 0 + ceph osd pool get $TEST_POOL_GETSET scrub_priority | expect_false grep '.' + + ceph osd pool set $TEST_POOL_GETSET nopgchange 1 + expect_false ceph osd pool set $TEST_POOL_GETSET pg_num 10 + expect_false ceph osd pool set $TEST_POOL_GETSET pgp_num 10 + ceph osd pool set $TEST_POOL_GETSET nopgchange 0 + ceph osd pool set $TEST_POOL_GETSET pg_num 10 + wait_for_clean + ceph osd pool set $TEST_POOL_GETSET pgp_num 10 + expect_false ceph osd pool set $TEST_POOL_GETSET pg_num 0 + expect_false ceph osd pool set $TEST_POOL_GETSET pgp_num 0 + + old_pgs=$(ceph osd pool get $TEST_POOL_GETSET pg_num | sed -e 's/pg_num: //') + new_pgs=$(($old_pgs + $(ceph osd stat --format json | jq '.num_osds') * 32)) + ceph osd pool set $TEST_POOL_GETSET pg_num $new_pgs + ceph osd pool set $TEST_POOL_GETSET pgp_num $new_pgs + wait_for_clean + + ceph osd pool set $TEST_POOL_GETSET nosizechange 1 + expect_false ceph osd pool set $TEST_POOL_GETSET size 2 + expect_false ceph osd pool set $TEST_POOL_GETSET min_size 2 + ceph osd pool set $TEST_POOL_GETSET nosizechange 0 + ceph osd pool set $TEST_POOL_GETSET size 2 + wait_for_clean + ceph osd pool set $TEST_POOL_GETSET min_size 2 + + expect_false ceph osd pool set $TEST_POOL_GETSET hashpspool 0 + ceph osd pool set $TEST_POOL_GETSET hashpspool 0 --yes-i-really-mean-it + + expect_false ceph osd pool set $TEST_POOL_GETSET hashpspool 1 + ceph osd pool set $TEST_POOL_GETSET hashpspool 1 --yes-i-really-mean-it + + ceph osd pool get rbd crush_rule | grep 'crush_rule: ' + + ceph osd pool get $TEST_POOL_GETSET compression_mode | expect_false grep '.' + ceph osd pool set $TEST_POOL_GETSET compression_mode aggressive + ceph osd pool get $TEST_POOL_GETSET compression_mode | grep 'aggressive' + ceph osd pool set $TEST_POOL_GETSET compression_mode unset + ceph osd pool get $TEST_POOL_GETSET compression_mode | expect_false grep '.' + + ceph osd pool get $TEST_POOL_GETSET compression_algorithm | expect_false grep '.' + ceph osd pool set $TEST_POOL_GETSET compression_algorithm zlib + ceph osd pool get $TEST_POOL_GETSET compression_algorithm | grep 'zlib' + ceph osd pool set $TEST_POOL_GETSET compression_algorithm unset + ceph osd pool get $TEST_POOL_GETSET compression_algorithm | expect_false grep '.' + + ceph osd pool get $TEST_POOL_GETSET compression_required_ratio | expect_false grep '.' + expect_false ceph osd pool set $TEST_POOL_GETSET compression_required_ratio 1.1 + expect_false ceph osd pool set $TEST_POOL_GETSET compression_required_ratio -.2 + ceph osd pool set $TEST_POOL_GETSET compression_required_ratio .2 + ceph osd pool get $TEST_POOL_GETSET compression_required_ratio | grep '.2' + ceph osd pool set $TEST_POOL_GETSET compression_required_ratio 0 + ceph osd pool get $TEST_POOL_GETSET compression_required_ratio | expect_false grep '.' + + ceph osd pool get $TEST_POOL_GETSET csum_type | expect_false grep '.' + ceph osd pool set $TEST_POOL_GETSET csum_type crc32c + ceph osd pool get $TEST_POOL_GETSET csum_type | grep 'crc32c' + ceph osd pool set $TEST_POOL_GETSET csum_type unset + ceph osd pool get $TEST_POOL_GETSET csum_type | expect_false grep '.' + + for size in compression_max_blob_size compression_min_blob_size csum_max_block csum_min_block; do + ceph osd pool get $TEST_POOL_GETSET $size | expect_false grep '.' + ceph osd pool set $TEST_POOL_GETSET $size 100 + ceph osd pool get $TEST_POOL_GETSET $size | grep '100' + ceph osd pool set $TEST_POOL_GETSET $size 0 + ceph osd pool get $TEST_POOL_GETSET $size | expect_false grep '.' + done + + ceph osd pool set $TEST_POOL_GETSET nodelete 1 + expect_false ceph osd pool delete $TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it + ceph osd pool set $TEST_POOL_GETSET nodelete 0 + ceph osd pool delete $TEST_POOL_GETSET $TEST_POOL_GETSET --yes-i-really-really-mean-it + +} + +function test_mon_osd_tiered_pool_set() +{ + # this is really a tier pool + ceph osd pool create real-tier 2 + ceph osd tier add rbd real-tier + + # expect us to be unable to set negative values for hit_set_* + for o in hit_set_period hit_set_count hit_set_fpp; do + expect_false ceph osd pool set real_tier $o -1 + done + + # and hit_set_fpp should be in range 0..1 + expect_false ceph osd pool set real_tier hit_set_fpp 2 + + ceph osd pool set real-tier hit_set_type explicit_hash + ceph osd pool get real-tier hit_set_type | grep "hit_set_type: explicit_hash" + ceph osd pool set real-tier hit_set_type explicit_object + ceph osd pool get real-tier hit_set_type | grep "hit_set_type: explicit_object" + ceph osd pool set real-tier hit_set_type bloom + ceph osd pool get real-tier hit_set_type | grep "hit_set_type: bloom" + expect_false ceph osd pool set real-tier hit_set_type i_dont_exist + ceph osd pool set real-tier hit_set_period 123 + ceph osd pool get real-tier hit_set_period | grep "hit_set_period: 123" + ceph osd pool set real-tier hit_set_count 12 + ceph osd pool get real-tier hit_set_count | grep "hit_set_count: 12" + ceph osd pool set real-tier hit_set_fpp .01 + ceph osd pool get real-tier hit_set_fpp | grep "hit_set_fpp: 0.01" + + ceph osd pool set real-tier target_max_objects 123 + ceph osd pool get real-tier target_max_objects | \ + grep 'target_max_objects:[ \t]\+123' + ceph osd pool set real-tier target_max_bytes 123456 + ceph osd pool get real-tier target_max_bytes | \ + grep 'target_max_bytes:[ \t]\+123456' + ceph osd pool set real-tier cache_target_dirty_ratio .123 + ceph osd pool get real-tier cache_target_dirty_ratio | \ + grep 'cache_target_dirty_ratio:[ \t]\+0.123' + expect_false ceph osd pool set real-tier cache_target_dirty_ratio -.2 + expect_false ceph osd pool set real-tier cache_target_dirty_ratio 1.1 + ceph osd pool set real-tier cache_target_dirty_high_ratio .123 + ceph osd pool get real-tier cache_target_dirty_high_ratio | \ + grep 'cache_target_dirty_high_ratio:[ \t]\+0.123' + expect_false ceph osd pool set real-tier cache_target_dirty_high_ratio -.2 + expect_false ceph osd pool set real-tier cache_target_dirty_high_ratio 1.1 + ceph osd pool set real-tier cache_target_full_ratio .123 + ceph osd pool get real-tier cache_target_full_ratio | \ + grep 'cache_target_full_ratio:[ \t]\+0.123' + ceph osd dump -f json-pretty | grep '"cache_target_full_ratio_micro": 123000' + ceph osd pool set real-tier cache_target_full_ratio 1.0 + ceph osd pool set real-tier cache_target_full_ratio 0 + expect_false ceph osd pool set real-tier cache_target_full_ratio 1.1 + ceph osd pool set real-tier cache_min_flush_age 123 + ceph osd pool get real-tier cache_min_flush_age | \ + grep 'cache_min_flush_age:[ \t]\+123' + ceph osd pool set real-tier cache_min_evict_age 234 + ceph osd pool get real-tier cache_min_evict_age | \ + grep 'cache_min_evict_age:[ \t]\+234' + + # this is not a tier pool + ceph osd pool create fake-tier 2 + ceph osd pool application enable fake-tier rados + wait_for_clean + + expect_false ceph osd pool set fake-tier hit_set_type explicit_hash + expect_false ceph osd pool get fake-tier hit_set_type + expect_false ceph osd pool set fake-tier hit_set_type explicit_object + expect_false ceph osd pool get fake-tier hit_set_type + expect_false ceph osd pool set fake-tier hit_set_type bloom + expect_false ceph osd pool get fake-tier hit_set_type + expect_false ceph osd pool set fake-tier hit_set_type i_dont_exist + expect_false ceph osd pool set fake-tier hit_set_period 123 + expect_false ceph osd pool get fake-tier hit_set_period + expect_false ceph osd pool set fake-tier hit_set_count 12 + expect_false ceph osd pool get fake-tier hit_set_count + expect_false ceph osd pool set fake-tier hit_set_fpp .01 + expect_false ceph osd pool get fake-tier hit_set_fpp + + expect_false ceph osd pool set fake-tier target_max_objects 123 + expect_false ceph osd pool get fake-tier target_max_objects + expect_false ceph osd pool set fake-tier target_max_bytes 123456 + expect_false ceph osd pool get fake-tier target_max_bytes + expect_false ceph osd pool set fake-tier cache_target_dirty_ratio .123 + expect_false ceph osd pool get fake-tier cache_target_dirty_ratio + expect_false ceph osd pool set fake-tier cache_target_dirty_ratio -.2 + expect_false ceph osd pool set fake-tier cache_target_dirty_ratio 1.1 + expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio .123 + expect_false ceph osd pool get fake-tier cache_target_dirty_high_ratio + expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio -.2 + expect_false ceph osd pool set fake-tier cache_target_dirty_high_ratio 1.1 + expect_false ceph osd pool set fake-tier cache_target_full_ratio .123 + expect_false ceph osd pool get fake-tier cache_target_full_ratio + expect_false ceph osd pool set fake-tier cache_target_full_ratio 1.0 + expect_false ceph osd pool set fake-tier cache_target_full_ratio 0 + expect_false ceph osd pool set fake-tier cache_target_full_ratio 1.1 + expect_false ceph osd pool set fake-tier cache_min_flush_age 123 + expect_false ceph osd pool get fake-tier cache_min_flush_age + expect_false ceph osd pool set fake-tier cache_min_evict_age 234 + expect_false ceph osd pool get fake-tier cache_min_evict_age + + ceph osd tier remove rbd real-tier + ceph osd pool delete real-tier real-tier --yes-i-really-really-mean-it + ceph osd pool delete fake-tier fake-tier --yes-i-really-really-mean-it +} + +function test_mon_osd_erasure_code() +{ + + ceph osd erasure-code-profile set fooprofile a=b c=d + ceph osd erasure-code-profile set fooprofile a=b c=d + expect_false ceph osd erasure-code-profile set fooprofile a=b c=d e=f + ceph osd erasure-code-profile set fooprofile a=b c=d e=f --force + ceph osd erasure-code-profile set fooprofile a=b c=d e=f + expect_false ceph osd erasure-code-profile set fooprofile a=b c=d e=f g=h + # make sure ruleset-foo doesn't work anymore + expect_false ceph osd erasure-code-profile set barprofile ruleset-failure-domain=host + ceph osd erasure-code-profile set barprofile crush-failure-domain=host + # clean up + ceph osd erasure-code-profile rm fooprofile + ceph osd erasure-code-profile rm barprofile + + # try weird k and m values + expect_false ceph osd erasure-code-profile set badk k=1 m=1 + expect_false ceph osd erasure-code-profile set badk k=1 m=2 + expect_false ceph osd erasure-code-profile set badk k=0 m=2 + expect_false ceph osd erasure-code-profile set badk k=-1 m=2 + expect_false ceph osd erasure-code-profile set badm k=2 m=0 + expect_false ceph osd erasure-code-profile set badm k=2 m=-1 + ceph osd erasure-code-profile set good k=2 m=1 + ceph osd erasure-code-profile rm good +} + +function test_mon_osd_misc() +{ + set +e + + # expect error about missing 'pool' argument + ceph osd map 2>$TMPFILE; check_response 'pool' $? 22 + + # expect error about unused argument foo + ceph osd ls foo 2>$TMPFILE; check_response 'unused' $? 22 + + # expect "not in range" for invalid overload percentage + ceph osd reweight-by-utilization 80 2>$TMPFILE; check_response 'higher than 100' $? 22 + + set -e + + ceph osd reweight-by-utilization 110 + ceph osd reweight-by-utilization 110 .5 + expect_false ceph osd reweight-by-utilization 110 0 + expect_false ceph osd reweight-by-utilization 110 -0.1 + ceph osd test-reweight-by-utilization 110 .5 --no-increasing + ceph osd test-reweight-by-utilization 110 .5 4 --no-increasing + expect_false ceph osd test-reweight-by-utilization 110 .5 0 --no-increasing + expect_false ceph osd test-reweight-by-utilization 110 .5 -10 --no-increasing + ceph osd reweight-by-pg 110 + ceph osd test-reweight-by-pg 110 .5 + ceph osd reweight-by-pg 110 rbd + ceph osd reweight-by-pg 110 .5 rbd + expect_false ceph osd reweight-by-pg 110 boguspoolasdfasdfasdf +} + +function test_mon_heap_profiler() +{ + do_test=1 + set +e + # expect 'heap' commands to be correctly parsed + ceph heap stats 2>$TMPFILE + if [[ $? -eq 22 && `grep 'tcmalloc not enabled' $TMPFILE` ]]; then + echo "tcmalloc not enabled; skip heap profiler test" + do_test=0 + fi + set -e + + [[ $do_test -eq 0 ]] && return 0 + + ceph heap start_profiler + ceph heap dump + ceph heap stop_profiler + ceph heap release +} + +function test_admin_heap_profiler() +{ + do_test=1 + set +e + # expect 'heap' commands to be correctly parsed + ceph heap stats 2>$TMPFILE + if [[ $? -eq 22 && `grep 'tcmalloc not enabled' $TMPFILE` ]]; then + echo "tcmalloc not enabled; skip heap profiler test" + do_test=0 + fi + set -e + + [[ $do_test -eq 0 ]] && return 0 + + local admin_socket=$(get_admin_socket osd.0) + + $SUDO ceph --admin-daemon $admin_socket heap start_profiler + $SUDO ceph --admin-daemon $admin_socket heap dump + $SUDO ceph --admin-daemon $admin_socket heap stop_profiler + $SUDO ceph --admin-daemon $admin_socket heap release +} + +function test_osd_bench() +{ + # test osd bench limits + # As we should not rely on defaults (as they may change over time), + # lets inject some values and perform some simple tests + # max iops: 10 # 100 IOPS + # max throughput: 10485760 # 10MB/s + # max block size: 2097152 # 2MB + # duration: 10 # 10 seconds + + local args="\ + --osd-bench-duration 10 \ + --osd-bench-max-block-size 2097152 \ + --osd-bench-large-size-max-throughput 10485760 \ + --osd-bench-small-size-max-iops 10" + ceph tell osd.0 injectargs ${args## } + + # anything with a bs larger than 2097152 must fail + expect_false ceph tell osd.0 bench 1 2097153 + # but using 'osd_bench_max_bs' must succeed + ceph tell osd.0 bench 1 2097152 + + # we assume 1MB as a large bs; anything lower is a small bs + # for a 4096 bytes bs, for 10 seconds, we are limited by IOPS + # max count: 409600 (bytes) + + # more than max count must not be allowed + expect_false ceph tell osd.0 bench 409601 4096 + # but 409600 must be succeed + ceph tell osd.0 bench 409600 4096 + + # for a large bs, we are limited by throughput. + # for a 2MB block size for 10 seconds, assuming 10MB/s throughput, + # the max count will be (10MB * 10s) = 100MB + # max count: 104857600 (bytes) + + # more than max count must not be allowed + expect_false ceph tell osd.0 bench 104857601 2097152 + # up to max count must be allowed + ceph tell osd.0 bench 104857600 2097152 +} + +function test_osd_negative_filestore_merge_threshold() +{ + $SUDO ceph daemon osd.0 config set filestore_merge_threshold -1 + expect_config_value "osd.0" "filestore_merge_threshold" -1 +} + +function test_mon_tell() +{ + ceph tell mon.a version + ceph tell mon.b version + expect_false ceph tell mon.foo version + + sleep 1 + + ceph_watch_start debug audit + ceph tell mon.a version + ceph_watch_wait 'mon.a \[DBG\] from.*cmd=\[{"prefix": "version"}\]: dispatch' + + ceph_watch_start debug audit + ceph tell mon.b version + ceph_watch_wait 'mon.b \[DBG\] from.*cmd=\[{"prefix": "version"}\]: dispatch' +} + +function test_mon_ping() +{ + ceph ping mon.a + ceph ping mon.b + expect_false ceph ping mon.foo + + ceph ping mon.\* +} + +function test_mon_deprecated_commands() +{ + # current DEPRECATED commands are: + # ceph compact + # ceph scrub + # ceph sync force + # + # Testing should be accomplished by setting + # 'mon_debug_deprecated_as_obsolete = true' and expecting ENOTSUP for + # each one of these commands. + + ceph tell mon.a injectargs '--mon-debug-deprecated-as-obsolete' + expect_false ceph tell mon.a compact 2> $TMPFILE + check_response "\(EOPNOTSUPP\|ENOTSUP\): command is obsolete" + + expect_false ceph tell mon.a scrub 2> $TMPFILE + check_response "\(EOPNOTSUPP\|ENOTSUP\): command is obsolete" + + expect_false ceph tell mon.a sync force 2> $TMPFILE + check_response "\(EOPNOTSUPP\|ENOTSUP\): command is obsolete" + + ceph tell mon.a injectargs '--no-mon-debug-deprecated-as-obsolete' +} + +function test_mon_cephdf_commands() +{ + # ceph df detail: + # pool section: + # RAW USED The near raw used per pool in raw total + + ceph osd pool create cephdf_for_test 1 1 replicated + ceph osd pool application enable cephdf_for_test rados + ceph osd pool set cephdf_for_test size 2 + + dd if=/dev/zero of=./cephdf_for_test bs=4k count=1 + rados put cephdf_for_test cephdf_for_test -p cephdf_for_test + + #wait for update + for i in `seq 1 10`; do + rados -p cephdf_for_test ls - | grep -q cephdf_for_test && break + sleep 1 + done + # "rados ls" goes straight to osd, but "ceph df" is served by mon. so we need + # to sync mon with osd + flush_pg_stats + local jq_filter='.pools | .[] | select(.name == "cephdf_for_test") | .stats' + stored=`ceph df detail --format=json | jq "$jq_filter.stored * 2"` + stored_raw=`ceph df detail --format=json | jq "$jq_filter.stored_raw"` + + ceph osd pool delete cephdf_for_test cephdf_for_test --yes-i-really-really-mean-it + rm ./cephdf_for_test + + expect_false test $stored != $stored_raw +} + +function test_mon_pool_application() +{ + ceph osd pool create app_for_test 10 + + ceph osd pool application enable app_for_test rbd + expect_false ceph osd pool application enable app_for_test rgw + ceph osd pool application enable app_for_test rgw --yes-i-really-mean-it + ceph osd pool ls detail | grep "application rbd,rgw" + ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{},"rgw":{}}' + + expect_false ceph osd pool application set app_for_test cephfs key value + ceph osd pool application set app_for_test rbd key1 value1 + ceph osd pool application set app_for_test rbd key2 value2 + ceph osd pool application set app_for_test rgw key1 value1 + ceph osd pool application get app_for_test rbd key1 | grep 'value1' + ceph osd pool application get app_for_test rbd key2 | grep 'value2' + ceph osd pool application get app_for_test rgw key1 | grep 'value1' + + ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{"key1":"value1","key2":"value2"},"rgw":{"key1":"value1"}}' + + ceph osd pool application rm app_for_test rgw key1 + ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{"key1":"value1","key2":"value2"},"rgw":{}}' + ceph osd pool application rm app_for_test rbd key2 + ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{"key1":"value1"},"rgw":{}}' + ceph osd pool application rm app_for_test rbd key1 + ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{},"rgw":{}}' + ceph osd pool application rm app_for_test rbd key1 # should be idempotent + + expect_false ceph osd pool application disable app_for_test rgw + ceph osd pool application disable app_for_test rgw --yes-i-really-mean-it + ceph osd pool application disable app_for_test rgw --yes-i-really-mean-it # should be idempotent + ceph osd pool ls detail | grep "application rbd" + ceph osd pool ls detail --format=json | grep '"application_metadata":{"rbd":{}}' + + ceph osd pool application disable app_for_test rgw --yes-i-really-mean-it + ceph osd pool ls detail | grep -v "application " + ceph osd pool ls detail --format=json | grep '"application_metadata":{}' + + ceph osd pool rm app_for_test app_for_test --yes-i-really-really-mean-it +} + +function test_mon_tell_help_command() +{ + ceph tell mon.a help + + # wrong target + expect_false ceph tell mon.zzz help +} + +function test_mon_stdin_stdout() +{ + echo foo | ceph config-key set test_key -i - + ceph config-key get test_key -o - | grep -c foo | grep -q 1 +} + +function test_osd_tell_help_command() +{ + ceph tell osd.1 help + expect_false ceph tell osd.100 help +} + +function test_osd_compact() +{ + ceph tell osd.1 compact + $SUDO ceph daemon osd.1 compact +} + +function test_mds_tell_help_command() +{ + local FS_NAME=cephfs + if ! mds_exists ; then + echo "Skipping test, no MDS found" + return + fi + + remove_all_fs + ceph osd pool create fs_data 10 + ceph osd pool create fs_metadata 10 + ceph fs new $FS_NAME fs_metadata fs_data + wait_mds_active $FS_NAME + + + ceph tell mds.a help + expect_false ceph tell mds.z help + + remove_all_fs + ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it + ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it +} + +function test_mgr_tell() +{ + ceph tell mgr help + #ceph tell mgr fs status # see http://tracker.ceph.com/issues/20761 + ceph tell mgr osd status +} + +function test_mgr_devices() +{ + ceph device ls + expect_false ceph device info doesnotexist + expect_false ceph device get-health-metrics doesnotexist +} + +function test_per_pool_scrub_status() +{ + ceph osd pool create noscrub_pool 12 + ceph osd pool create noscrub_pool2 12 + ceph -s | expect_false grep -q "Some pool(s) have the.*scrub.* flag(s) set" + ceph -s --format json | \ + jq .health.checks.POOL_SCRUB_FLAGS.summary.message | \ + expect_false grep -q "Some pool(s) have the.*scrub.* flag(s) set" + ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail | + expect_false grep -q "Pool .* has .*scrub.* flag" + ceph health detail | jq .health.checks.POOL_SCRUB_FLAGS.detail | \ + expect_false grep -q "Pool .* has .*scrub.* flag" + + ceph osd pool set noscrub_pool noscrub 1 + ceph -s | expect_true grep -q "Some pool(s) have the noscrub flag(s) set" + ceph -s --format json | \ + jq .health.checks.POOL_SCRUB_FLAGS.summary.message | \ + expect_true grep -q "Some pool(s) have the noscrub flag(s) set" + ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail | \ + expect_true grep -q "Pool noscrub_pool has noscrub flag" + ceph health detail | expect_true grep -q "Pool noscrub_pool has noscrub flag" + + ceph osd pool set noscrub_pool nodeep-scrub 1 + ceph osd pool set noscrub_pool2 nodeep-scrub 1 + ceph -s | expect_true grep -q "Some pool(s) have the noscrub, nodeep-scrub flag(s) set" + ceph -s --format json | \ + jq .health.checks.POOL_SCRUB_FLAGS.summary.message | \ + expect_true grep -q "Some pool(s) have the noscrub, nodeep-scrub flag(s) set" + ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail | \ + expect_true grep -q "Pool noscrub_pool has noscrub flag" + ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail | \ + expect_true grep -q "Pool noscrub_pool has nodeep-scrub flag" + ceph report | jq .health.checks.POOL_SCRUB_FLAGS.detail | \ + expect_true grep -q "Pool noscrub_pool2 has nodeep-scrub flag" + ceph health detail | expect_true grep -q "Pool noscrub_pool has noscrub flag" + ceph health detail | expect_true grep -q "Pool noscrub_pool has nodeep-scrub flag" + ceph health detail | expect_true grep -q "Pool noscrub_pool2 has nodeep-scrub flag" + + ceph osd pool rm noscrub_pool noscrub_pool --yes-i-really-really-mean-it + ceph osd pool rm noscrub_pool2 noscrub_pool2 --yes-i-really-really-mean-it +} + +# +# New tests should be added to the TESTS array below +# +# Individual tests may be run using the '-t ' argument +# The user can specify '-t ' as many times as she wants +# +# Tests will be run in order presented in the TESTS array, or in +# the order specified by the '-t ' options. +# +# '-l' will list all the available test names +# '-h' will show usage +# +# The test maintains backward compatibility: not specifying arguments +# will run all tests following the order they appear in the TESTS array. +# + +set +x +MON_TESTS+=" mon_injectargs" +MON_TESTS+=" mon_injectargs_SI" +for i in `seq 9`; do + MON_TESTS+=" tiering_$i"; +done +MON_TESTS+=" auth" +MON_TESTS+=" auth_profiles" +MON_TESTS+=" mon_misc" +MON_TESTS+=" mon_mon" +MON_TESTS+=" mon_osd" +MON_TESTS+=" mon_config_key" +MON_TESTS+=" mon_crush" +MON_TESTS+=" mon_osd_create_destroy" +MON_TESTS+=" mon_osd_pool" +MON_TESTS+=" mon_osd_pool_quota" +MON_TESTS+=" mon_pg" +MON_TESTS+=" mon_osd_pool_set" +MON_TESTS+=" mon_osd_tiered_pool_set" +MON_TESTS+=" mon_osd_erasure_code" +MON_TESTS+=" mon_osd_misc" +MON_TESTS+=" mon_heap_profiler" +MON_TESTS+=" mon_tell" +MON_TESTS+=" mon_ping" +MON_TESTS+=" mon_deprecated_commands" +MON_TESTS+=" mon_caps" +MON_TESTS+=" mon_cephdf_commands" +MON_TESTS+=" mon_tell_help_command" +MON_TESTS+=" mon_stdin_stdout" + +OSD_TESTS+=" osd_bench" +OSD_TESTS+=" osd_negative_filestore_merge_threshold" +OSD_TESTS+=" tiering_agent" +OSD_TESTS+=" admin_heap_profiler" +OSD_TESTS+=" osd_tell_help_command" +OSD_TESTS+=" osd_compact" +OSD_TESTS+=" per_pool_scrub_status" + +MDS_TESTS+=" mds_tell" +MDS_TESTS+=" mon_mds" +MDS_TESTS+=" mon_mds_metadata" +MDS_TESTS+=" mds_tell_help_command" + +MGR_TESTS+=" mgr_tell" +MGR_TESTS+=" mgr_devices" + +TESTS+=$MON_TESTS +TESTS+=$OSD_TESTS +TESTS+=$MDS_TESTS +TESTS+=$MGR_TESTS + +# +# "main" follows +# + +function list_tests() +{ + echo "AVAILABLE TESTS" + for i in $TESTS; do + echo " $i" + done +} + +function usage() +{ + echo "usage: $0 [-h|-l|-t [-t ...]]" +} + +tests_to_run=() + +sanity_check=true + +while [[ $# -gt 0 ]]; do + opt=$1 + + case "$opt" in + "-l" ) + do_list=1 + ;; + "--asok-does-not-need-root" ) + SUDO="" + ;; + "--no-sanity-check" ) + sanity_check=false + ;; + "--test-mon" ) + tests_to_run+="$MON_TESTS" + ;; + "--test-osd" ) + tests_to_run+="$OSD_TESTS" + ;; + "--test-mds" ) + tests_to_run+="$MDS_TESTS" + ;; + "--test-mgr" ) + tests_to_run+="$MGR_TESTS" + ;; + "-t" ) + shift + if [[ -z "$1" ]]; then + echo "missing argument to '-t'" + usage ; + exit 1 + fi + tests_to_run+=" $1" + ;; + "-h" ) + usage ; + exit 0 + ;; + esac + shift +done + +if [[ $do_list -eq 1 ]]; then + list_tests ; + exit 0 +fi + +ceph osd pool create rbd 10 + +if test -z "$tests_to_run" ; then + tests_to_run="$TESTS" +fi + +if $sanity_check ; then + wait_no_osd_down +fi +for i in $tests_to_run; do + if $sanity_check ; then + check_no_osd_down + fi + set -x + test_${i} + set +x +done +if $sanity_check ; then + check_no_osd_down +fi + +set -x + +echo OK diff --git a/qa/workunits/cephtool/test_daemon.sh b/qa/workunits/cephtool/test_daemon.sh new file mode 100755 index 00000000..08ae937c --- /dev/null +++ b/qa/workunits/cephtool/test_daemon.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash + +set -ex + +expect_false() +{ + set -x + if "$@"; then return 1; else return 0; fi +} + +echo note: assuming mon.a is on the current host + +# can set to 'sudo ./ceph' to execute tests from current dir for development +CEPH=${CEPH:-'sudo ceph'} + +${CEPH} daemon mon.a version | grep version + +# get debug_ms setting and strip it, painfully for reuse +old_ms=$(${CEPH} daemon mon.a config get debug_ms | \ + grep debug_ms | sed -e 's/.*: //' -e 's/["\}\\]//g') +${CEPH} daemon mon.a config set debug_ms 13 +new_ms=$(${CEPH} daemon mon.a config get debug_ms | \ + grep debug_ms | sed -e 's/.*: //' -e 's/["\}\\]//g') +[ "$new_ms" = "13/13" ] +${CEPH} daemon mon.a config set debug_ms $old_ms +new_ms=$(${CEPH} daemon mon.a config get debug_ms | \ + grep debug_ms | sed -e 's/.*: //' -e 's/["\}\\]//g') +[ "$new_ms" = "$old_ms" ] + +# unregistered/non-existent command +expect_false ${CEPH} daemon mon.a bogus_command_blah foo + +set +e +OUTPUT=$(${CEPH} -c /not/a/ceph.conf daemon mon.a help 2>&1) +# look for EINVAL +if [ $? != 22 ] ; then exit 1; fi +if ! echo "$OUTPUT" | grep -q '.*open.*/not/a/ceph.conf'; then + echo "didn't find expected error in bad conf search" + exit 1 +fi +set -e + +echo OK diff --git a/qa/workunits/cephtool/test_kvstore_tool.sh b/qa/workunits/cephtool/test_kvstore_tool.sh new file mode 100755 index 00000000..b7953dd2 --- /dev/null +++ b/qa/workunits/cephtool/test_kvstore_tool.sh @@ -0,0 +1,71 @@ +#!/usr/bin/env bash + +set -x + +source $(dirname $0)/../../standalone/ceph-helpers.sh + +set -e +set -o functrace +PS4='${BASH_SOURCE[0]}:$LINENO: ${FUNCNAME[0]}: ' +SUDO=${SUDO:-sudo} +export CEPH_DEV=1 + +echo note: test ceph_kvstore_tool with bluestore + +expect_false() +{ + set -x + if "$@"; then return 1; else return 0; fi +} + +TEMP_DIR=$(mktemp -d ./cephtool.XXX) +trap "rm -fr $TEMP_DIR" 0 + +TEMP_FILE=$(mktemp $TEMP_DIR/test_invalid.XXX) + +function test_ceph_kvstore_tool() +{ + # create a data directory + ceph-objectstore-tool --data-path ${TEMP_DIR} --op mkfs --no-mon-config + + # list + origin_kv_nums=`ceph-kvstore-tool bluestore-kv ${TEMP_DIR} list 2>/dev/null | wc -l` + + # exists + prefix=`ceph-kvstore-tool bluestore-kv ${TEMP_DIR} list 2>/dev/null | head -n 1 | awk '{print $1}'` + ceph-kvstore-tool bluestore-kv ${TEMP_DIR} exists ${prefix} + expect_false ceph-kvstore-tool bluestore-kv ${TEMP_DIR} exists ${prefix}notexist + + # list-crc + ceph-kvstore-tool bluestore-kv ${TEMP_DIR} list-crc + ceph-kvstore-tool bluestore-kv ${TEMP_DIR} list-crc ${prefix} + + # list with prefix + ceph-kvstore-tool bluestore-kv ${TEMP_DIR} list ${prefix} + + # set + echo "helloworld" >> ${TEMP_FILE} + ceph-kvstore-tool bluestore-kv ${TEMP_DIR} set TESTPREFIX TESTKEY in ${TEMP_FILE} + ceph-kvstore-tool bluestore-kv ${TEMP_DIR} exists TESTPREFIX TESTKEY + + # get + ceph-kvstore-tool bluestore-kv ${TEMP_DIR} get TESTPREFIX TESTKEY out ${TEMP_FILE}.bak + diff ${TEMP_FILE} ${TEMP_FILE}.bak + + # rm + ceph-kvstore-tool bluestore-kv ${TEMP_DIR} rm TESTPREFIX TESTKEY + expect_false ceph-kvstore-tool bluestore-kv ${TEMP_DIR} exists TESTPREFIX TESTKEY + + # compact + ceph-kvstore-tool bluestore-kv ${TEMP_DIR} compact + + # destructive-repair + ceph-kvstore-tool bluestore-kv ${TEMP_DIR} destructive-repair + + current_kv_nums=`ceph-kvstore-tool bluestore-kv ${TEMP_DIR} list 2>/dev/null | wc -l` + test ${origin_kv_nums} -eq ${current_kv_nums} +} + +test_ceph_kvstore_tool + +echo OK diff --git a/qa/workunits/cls/test_cls_hello.sh b/qa/workunits/cls/test_cls_hello.sh new file mode 100755 index 00000000..0a2e0962 --- /dev/null +++ b/qa/workunits/cls/test_cls_hello.sh @@ -0,0 +1,5 @@ +#!/bin/sh -e + +ceph_test_cls_hello + +exit 0 diff --git a/qa/workunits/cls/test_cls_journal.sh b/qa/workunits/cls/test_cls_journal.sh new file mode 100755 index 00000000..9aa7450a --- /dev/null +++ b/qa/workunits/cls/test_cls_journal.sh @@ -0,0 +1,6 @@ +#!/bin/sh -e + +GTEST_FILTER=${CLS_JOURNAL_GTEST_FILTER:-*} +ceph_test_cls_journal --gtest_filter=${GTEST_FILTER} + +exit 0 diff --git a/qa/workunits/cls/test_cls_lock.sh b/qa/workunits/cls/test_cls_lock.sh new file mode 100755 index 00000000..c1452705 --- /dev/null +++ b/qa/workunits/cls/test_cls_lock.sh @@ -0,0 +1,5 @@ +#!/bin/sh -e + +ceph_test_cls_lock + +exit 0 diff --git a/qa/workunits/cls/test_cls_log.sh b/qa/workunits/cls/test_cls_log.sh new file mode 100755 index 00000000..523f985e --- /dev/null +++ b/qa/workunits/cls/test_cls_log.sh @@ -0,0 +1,5 @@ +#!/bin/sh -e + +ceph_test_cls_log + +exit 0 diff --git a/qa/workunits/cls/test_cls_numops.sh b/qa/workunits/cls/test_cls_numops.sh new file mode 100755 index 00000000..dcbafcab --- /dev/null +++ b/qa/workunits/cls/test_cls_numops.sh @@ -0,0 +1,5 @@ +#!/bin/sh -e + +ceph_test_cls_numops + +exit 0 diff --git a/qa/workunits/cls/test_cls_rbd.sh b/qa/workunits/cls/test_cls_rbd.sh new file mode 100755 index 00000000..fd4bec0f --- /dev/null +++ b/qa/workunits/cls/test_cls_rbd.sh @@ -0,0 +1,6 @@ +#!/bin/sh -e + +GTEST_FILTER=${CLS_RBD_GTEST_FILTER:-*} +ceph_test_cls_rbd --gtest_filter=${GTEST_FILTER} + +exit 0 diff --git a/qa/workunits/cls/test_cls_refcount.sh b/qa/workunits/cls/test_cls_refcount.sh new file mode 100755 index 00000000..d722f5ad --- /dev/null +++ b/qa/workunits/cls/test_cls_refcount.sh @@ -0,0 +1,5 @@ +#!/bin/sh -e + +ceph_test_cls_refcount + +exit 0 diff --git a/qa/workunits/cls/test_cls_rgw.sh b/qa/workunits/cls/test_cls_rgw.sh new file mode 100755 index 00000000..257338a0 --- /dev/null +++ b/qa/workunits/cls/test_cls_rgw.sh @@ -0,0 +1,8 @@ +#!/bin/sh -e + +ceph_test_cls_rgw +#ceph_test_cls_rgw_meta +#ceph_test_cls_rgw_log +#ceph_test_cls_rgw_opstate + +exit 0 diff --git a/qa/workunits/cls/test_cls_sdk.sh b/qa/workunits/cls/test_cls_sdk.sh new file mode 100755 index 00000000..f1ccdc3b --- /dev/null +++ b/qa/workunits/cls/test_cls_sdk.sh @@ -0,0 +1,5 @@ +#!/bin/sh -e + +ceph_test_cls_sdk + +exit 0 diff --git a/qa/workunits/direct_io/.gitignore b/qa/workunits/direct_io/.gitignore new file mode 100644 index 00000000..80f1fd1a --- /dev/null +++ b/qa/workunits/direct_io/.gitignore @@ -0,0 +1,3 @@ +/direct_io_test +/test_sync_io +/test_short_dio_read diff --git a/qa/workunits/direct_io/Makefile b/qa/workunits/direct_io/Makefile new file mode 100644 index 00000000..20fec0be --- /dev/null +++ b/qa/workunits/direct_io/Makefile @@ -0,0 +1,11 @@ +CFLAGS = -Wall -Wextra -D_GNU_SOURCE + +TARGETS = direct_io_test test_sync_io test_short_dio_read + +.c: + $(CC) $(CFLAGS) $@.c -o $@ + +all: $(TARGETS) + +clean: + rm $(TARGETS) diff --git a/qa/workunits/direct_io/big.sh b/qa/workunits/direct_io/big.sh new file mode 100755 index 00000000..43bd6d72 --- /dev/null +++ b/qa/workunits/direct_io/big.sh @@ -0,0 +1,6 @@ +#!/bin/sh -ex + +echo "test large (16MB) dio write" +dd if=/dev/zero of=foo.big bs=16M count=1 oflag=direct + +echo OK diff --git a/qa/workunits/direct_io/direct_io_test.c b/qa/workunits/direct_io/direct_io_test.c new file mode 100644 index 00000000..ccfbbb86 --- /dev/null +++ b/qa/workunits/direct_io/direct_io_test.c @@ -0,0 +1,312 @@ +/* + * Ceph - scalable distributed file system + * + * Copyright (C) 2011 New Dream Network + * + * This is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1, as published by the Free Software + * Foundation. See file COPYING. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * direct_io_test + * + * This test does some I/O using O_DIRECT. + * + * Semantics of O_DIRECT can be found at http://lwn.net/Articles/348739/ + * + */ + +static int g_num_pages = 100; + +static int g_duration = 10; + +struct chunk { + uint64_t offset; + uint64_t pad0; + uint64_t pad1; + uint64_t pad2; + uint64_t pad3; + uint64_t pad4; + uint64_t pad5; + uint64_t not_offset; +} __attribute__((packed)); + +static int page_size; + +static char temp_file[] = "direct_io_temp_file_XXXXXX"; + +static int safe_write(int fd, const void *buf, signed int len) +{ + const char *b = (const char*)buf; + /* Handle EINTR and short writes */ + while (1) { + int res = write(fd, b, len); + if (res < 0) { + int err = errno; + if (err != EINTR) { + return err; + } + } + len -= res; + b += res; + if (len <= 0) + return 0; + } +} + +static int do_read(int fd, char *buf, int buf_sz) +{ + /* We assume no short reads or EINTR. It's not really clear how + * those things interact with O_DIRECT. */ + int ret = read(fd, buf, buf_sz); + if (ret < 0) { + int err = errno; + printf("do_read: error: %d (%s)\n", err, strerror(err)); + return err; + } + if (ret != buf_sz) { + printf("do_read: short read\n"); + return -EIO; + } + return 0; +} + +static int setup_temp_file(void) +{ + int fd; + int64_t num_chunks, i; + + if (page_size % sizeof(struct chunk)) { + printf("setup_big_file: page_size doesn't divide evenly " + "into data blocks.\n"); + return -EINVAL; + } + + fd = mkstemp(temp_file); + if (fd < 0) { + int err = errno; + printf("setup_big_file: mkostemps failed with error %d\n", err); + return err; + } + + num_chunks = g_num_pages * (page_size / sizeof(struct chunk)); + for (i = 0; i < num_chunks; ++i) { + int ret; + struct chunk c; + memset(&c, 0, sizeof(c)); + c.offset = i * sizeof(struct chunk); + c.pad0 = 0; + c.pad1 = 1; + c.pad2 = 2; + c.pad3 = 3; + c.pad4 = 4; + c.pad5 = 5; + c.not_offset = ~c.offset; + ret = safe_write(fd, &c, sizeof(struct chunk)); + if (ret) { + printf("setup_big_file: safe_write failed with " + "error: %d\n", ret); + TEMP_FAILURE_RETRY(close(fd)); + unlink(temp_file); + return ret; + } + } + TEMP_FAILURE_RETRY(close(fd)); + return 0; +} + +static int verify_chunk(const struct chunk *c, uint64_t offset) +{ + if (c->offset != offset) { + printf("verify_chunk(%" PRId64 "): bad offset value (got: %" + PRId64 ", expected: %" PRId64 "\n", offset, c->offset, offset); + return EIO; + } + if (c->pad0 != 0) { + printf("verify_chunk(%" PRId64 "): bad pad0 value\n", offset); + return EIO; + } + if (c->pad1 != 1) { + printf("verify_chunk(%" PRId64 "): bad pad1 value\n", offset); + return EIO; + } + if (c->pad2 != 2) { + printf("verify_chunk(%" PRId64 "): bad pad2 value\n", offset); + return EIO; + } + if (c->pad3 != 3) { + printf("verify_chunk(%" PRId64 "): bad pad3 value\n", offset); + return EIO; + } + if (c->pad4 != 4) { + printf("verify_chunk(%" PRId64 "): bad pad4 value\n", offset); + return EIO; + } + if (c->pad5 != 5) { + printf("verify_chunk(%" PRId64 "): bad pad5 value\n", offset); + return EIO; + } + if (c->not_offset != ~offset) { + printf("verify_chunk(%" PRId64 "): bad not_offset value\n", + offset); + return EIO; + } + return 0; +} + +static int do_o_direct_reads(void) +{ + int fd, ret; + unsigned int i; + void *buf = 0; + time_t cur_time, end_time; + ret = posix_memalign(&buf, page_size, page_size); + if (ret) { + printf("do_o_direct_reads: posix_memalign returned %d\n", ret); + goto done; + } + + fd = open(temp_file, O_RDONLY | O_DIRECT); + if (fd < 0) { + ret = errno; + printf("do_o_direct_reads: error opening fd: %d\n", ret); + goto free_buf; + } + + // read the first chunk and see if it looks OK + ret = do_read(fd, buf, page_size); + if (ret) + goto close_fd; + ret = verify_chunk((struct chunk*)buf, 0); + if (ret) + goto close_fd; + + // read some random chunks and see how they look + cur_time = time(NULL); + end_time = cur_time + g_duration; + i = 0; + do { + time_t next_time; + uint64_t offset; + int page; + unsigned int seed; + + seed = i++; + page = rand_r(&seed) % g_num_pages; + offset = page; + offset *= page_size; + if (lseek64(fd, offset, SEEK_SET) == -1) { + int err = errno; + printf("lseek64(%" PRId64 ") failed: error %d (%s)\n", + offset, err, strerror(err)); + goto close_fd; + } + ret = do_read(fd, buf, page_size); + if (ret) + goto close_fd; + ret = verify_chunk((struct chunk*)buf, offset); + if (ret) + goto close_fd; + next_time = time(NULL); + if (next_time > cur_time) { + printf("."); + } + cur_time = next_time; + } while (time(NULL) < end_time); + + printf("\ndo_o_direct_reads: SUCCESS\n"); +close_fd: + TEMP_FAILURE_RETRY(close(fd)); +free_buf: + free(buf); +done: + return ret; +} + +static void usage(char *argv0) +{ + printf("%s: tests direct I/O\n", argv0); + printf("-d : sets duration to \n"); + printf("-h: this help\n"); + printf("-p : sets number of pages to allocate\n"); +} + +static void parse_args(int argc, char *argv[]) +{ + int c; + while ((c = getopt (argc, argv, "d:hp:")) != -1) { + switch (c) { + case 'd': + g_duration = atoi(optarg); + if (g_duration <= 0) { + printf("tried to set invalid value of " + "g_duration: %d\n", g_num_pages); + exit(1); + } + break; + case 'h': + usage(argv[0]); + exit(0); + break; + case 'p': + g_num_pages = atoi(optarg); + if (g_num_pages <= 0) { + printf("tried to set invalid value of " + "g_num_pages: %d\n", g_num_pages); + exit(1); + } + break; + case '?': + usage(argv[0]); + exit(1); + break; + default: + usage(argv[0]); + exit(1); + break; + } + } +} + +int main(int argc, char *argv[]) +{ + int ret; + + parse_args(argc, argv); + + setvbuf(stdout, NULL, _IONBF, 0); + + page_size = getpagesize(); + + ret = setup_temp_file(); + if (ret) { + printf("setup_temp_file failed with error %d\n", ret); + goto done; + } + + ret = do_o_direct_reads(); + if (ret) { + printf("do_o_direct_reads failed with error %d\n", ret); + goto unlink_temp_file; + } + +unlink_temp_file: + unlink(temp_file); +done: + return ret; +} diff --git a/qa/workunits/direct_io/misc.sh b/qa/workunits/direct_io/misc.sh new file mode 100755 index 00000000..6de080d2 --- /dev/null +++ b/qa/workunits/direct_io/misc.sh @@ -0,0 +1,16 @@ +#!/bin/sh -ex + +# a few test cases from henry +echo "test read from hole" +dd if=/dev/zero of=dd3 bs=1 seek=1048576 count=0 +dd if=dd3 of=/tmp/ddout1 skip=8 bs=512 count=2 iflag=direct +dd if=/dev/zero of=/tmp/dd3 bs=512 count=2 +cmp /tmp/dd3 /tmp/ddout1 + +echo "other thing" +dd if=/dev/urandom of=/tmp/dd10 bs=500 count=1 +dd if=/tmp/dd10 of=dd10 bs=512 seek=8388 count=1 +dd if=dd10 of=/tmp/dd10out bs=512 skip=8388 count=1 iflag=direct +cmp /tmp/dd10 /tmp/dd10out + +echo OK diff --git a/qa/workunits/direct_io/test_short_dio_read.c b/qa/workunits/direct_io/test_short_dio_read.c new file mode 100644 index 00000000..50248555 --- /dev/null +++ b/qa/workunits/direct_io/test_short_dio_read.c @@ -0,0 +1,57 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +int main() +{ + char buf[409600]; + ssize_t r; + int err; + int fd = open("shortfile", O_WRONLY|O_CREAT, 0644); + + if (fd < 0) { + err = errno; + printf("error: open() failed with: %d (%s)\n", err, strerror(err)); + exit(err); + } + + printf("writing first 3 bytes of 10k file\n"); + r = write(fd, "foo", 3); + if (r == -1) { + err = errno; + printf("error: write() failed with: %d (%s)\n", err, strerror(err)); + close(fd); + exit(err); + } + r = ftruncate(fd, 10000); + if (r == -1) { + err = errno; + printf("error: ftruncate() failed with: %d (%s)\n", err, strerror(err)); + close(fd); + exit(err); + } + + fsync(fd); + close(fd); + + printf("reading O_DIRECT\n"); + fd = open("shortfile", O_RDONLY|O_DIRECT); + if (fd < 0) { + err = errno; + printf("error: open() failed with: %d (%s)\n", err, strerror(err)); + exit(err); + } + + r = read(fd, buf, sizeof(buf)); + close(fd); + + printf("got %d\n", (int)r); + if (r != 10000) + return 1; + return 0; +} diff --git a/qa/workunits/direct_io/test_sync_io.c b/qa/workunits/direct_io/test_sync_io.c new file mode 100644 index 00000000..f393fa6e --- /dev/null +++ b/qa/workunits/direct_io/test_sync_io.c @@ -0,0 +1,250 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +//#include "../client/ioctl.h" + +#include +#define CEPH_IOCTL_MAGIC 0x97 +#define CEPH_IOC_SYNCIO _IO(CEPH_IOCTL_MAGIC, 5) + +void write_pattern() +{ + printf("writing pattern\n"); + + uint64_t i; + int r; + + int fd = open("foo", O_CREAT|O_WRONLY, 0644); + if (fd < 0) { + r = errno; + printf("write_pattern: error: open() failed with: %d (%s)\n", r, strerror(r)); + exit(r); + } + for (i=0; i<1048576 * sizeof(i); i += sizeof(i)) { + r = write(fd, &i, sizeof(i)); + if (r == -1) { + r = errno; + printf("write_pattern: error: write() failed with: %d (%s)\n", r, strerror(r)); + break; + } + } + + close(fd); +} + +int verify_pattern(char *buf, size_t len, uint64_t off) +{ + size_t i; + + for (i = 0; i < len; i += sizeof(uint64_t)) { + uint64_t expected = i + off; + uint64_t actual = *(uint64_t*)(buf + i); + if (expected != actual) { + printf("error: offset %llu had %llu\n", (unsigned long long)expected, + (unsigned long long)actual); + exit(1); + } + } + return 0; +} + +void generate_pattern(void *buf, size_t len, uint64_t offset) +{ + uint64_t *v = buf; + size_t i; + + for (i=0; i= 2 && strcmp(argv[1], "read") == 0) + write = 0; + if (argc >= 2 && strcmp(argv[1], "write") == 0) + read = 0; + + if (read) { + write_pattern(); + + for (i = 0; i < 4096; i += 512) + for (j = 4*1024*1024 - 4096; j < 4*1024*1024 + 4096; j += 512) + for (k = 1024; k <= 16384; k *= 2) { + read_direct(i, j, k); + read_sync(i, j, k); + } + + } + unlink("foo"); + if (write) { + for (i = 0; i < 4096; i += 512) + for (j = 4*1024*1024 - 4096 + 512; j < 4*1024*1024 + 4096; j += 512) + for (k = 1024; k <= 16384; k *= 2) { + write_direct(i, j, k); + write_sync(i, j, k); + } + } + + + return 0; +} diff --git a/qa/workunits/erasure-code/.gitignore b/qa/workunits/erasure-code/.gitignore new file mode 100644 index 00000000..7e563b8b --- /dev/null +++ b/qa/workunits/erasure-code/.gitignore @@ -0,0 +1,2 @@ +*.log +*.trs diff --git a/qa/workunits/erasure-code/bench.html b/qa/workunits/erasure-code/bench.html new file mode 100644 index 00000000..3b4b6c74 --- /dev/null +++ b/qa/workunits/erasure-code/bench.html @@ -0,0 +1,34 @@ + + + + + Erasure Code Plugins Benchmarks + + + + + + + + + + + +
+ +
+
+
+

encode: Y = GB/s, X = K/M

+ +
+
+
+

decode: Y = GB/s, X = K/M/erasures

+ +
+ + + diff --git a/qa/workunits/erasure-code/bench.sh b/qa/workunits/erasure-code/bench.sh new file mode 100755 index 00000000..23914ef8 --- /dev/null +++ b/qa/workunits/erasure-code/bench.sh @@ -0,0 +1,188 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2015 Red Hat +# Copyright (C) 2013,2014 Cloudwatt +# +# Author: Loic Dachary +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Library Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library Public License for more details. +# +# Test that it works from sources with: +# +# CEPH_ERASURE_CODE_BENCHMARK=src/ceph_erasure_code_benchmark \ +# PLUGIN_DIRECTORY=build/lib \ +# qa/workunits/erasure-code/bench.sh fplot jerasure | +# tee qa/workunits/erasure-code/bench.js +# +# This should start immediately and display: +# +# ... +# [ '2/1', .48035538612887358583 ], +# [ '3/2', .21648470405675016626 ], +# etc. +# +# and complete within a few seconds. The result can then be displayed with: +# +# firefox qa/workunits/erasure-code/bench.html +# +# Once it is confirmed to work, it can be run with a more significant +# volume of data so that the measures are more reliable: +# +# TOTAL_SIZE=$((4 * 1024 * 1024 * 1024)) \ +# CEPH_ERASURE_CODE_BENCHMARK=src/ceph_erasure_code_benchmark \ +# PLUGIN_DIRECTORY=build/lib \ +# qa/workunits/erasure-code/bench.sh fplot jerasure | +# tee qa/workunits/erasure-code/bench.js +# +set -e + +export PATH=/sbin:$PATH + +: ${VERBOSE:=false} +: ${CEPH_ERASURE_CODE_BENCHMARK:=ceph_erasure_code_benchmark} +: ${PLUGIN_DIRECTORY:=/usr/lib/ceph/erasure-code} +: ${PLUGINS:=isa jerasure} +: ${TECHNIQUES:=vandermonde cauchy} +: ${TOTAL_SIZE:=$((1024 * 1024))} +: ${SIZE:=4096} +: ${PARAMETERS:=--parameter jerasure-per-chunk-alignment=true} + +function bench_header() { + echo -e "seconds\tKB\tplugin\tk\tm\twork.\titer.\tsize\teras.\tcommand." +} + +function bench() { + local plugin=$1 + shift + local k=$1 + shift + local m=$1 + shift + local workload=$1 + shift + local iterations=$1 + shift + local size=$1 + shift + local erasures=$1 + shift + command=$(echo $CEPH_ERASURE_CODE_BENCHMARK \ + --plugin $plugin \ + --workload $workload \ + --iterations $iterations \ + --size $size \ + --erasures $erasures \ + --parameter k=$k \ + --parameter m=$m \ + --erasure-code-dir $PLUGIN_DIRECTORY) + result=$($command "$@") + echo -e "$result\t$plugin\t$k\t$m\t$workload\t$iterations\t$size\t$erasures\t$command ""$@" +} + +function packetsize() { + local k=$1 + local w=$2 + local vector_wordsize=$3 + local size=$4 + + local p=$(( ($size / $k / $w / $vector_wordsize ) * $vector_wordsize)) + if [ $p -gt 3100 ] ; then + p=3100 + fi + echo $p +} + +function bench_run() { + local plugin=jerasure + local w=8 + local VECTOR_WORDSIZE=16 + local ks="2 3 4 6 10" + declare -A k2ms + k2ms[2]="1" + k2ms[3]="2" + k2ms[4]="2 3" + k2ms[6]="2 3 4" + k2ms[10]="3 4" + for technique in ${TECHNIQUES} ; do + for plugin in ${PLUGINS} ; do + eval technique_parameter=\$${plugin}2technique_${technique} + echo "serie encode_${technique}_${plugin}" + for k in $ks ; do + for m in ${k2ms[$k]} ; do + bench $plugin $k $m encode $(($TOTAL_SIZE / $SIZE)) $SIZE 0 \ + --parameter packetsize=$(packetsize $k $w $VECTOR_WORDSIZE $SIZE) \ + ${PARAMETERS} \ + --parameter technique=$technique_parameter + + done + done + done + done + for technique in ${TECHNIQUES} ; do + for plugin in ${PLUGINS} ; do + eval technique_parameter=\$${plugin}2technique_${technique} + echo "serie decode_${technique}_${plugin}" + for k in $ks ; do + for m in ${k2ms[$k]} ; do + echo + for erasures in $(seq 1 $m) ; do + bench $plugin $k $m decode $(($TOTAL_SIZE / $SIZE)) $SIZE $erasures \ + --parameter packetsize=$(packetsize $k $w $VECTOR_WORDSIZE $SIZE) \ + ${PARAMETERS} \ + --parameter technique=$technique_parameter + done + done + done + done + done +} + +function fplot() { + local serie + bench_run | while read seconds total plugin k m workload iteration size erasures rest ; do + if [ -z $seconds ] ; then + echo null, + elif [ $seconds = serie ] ; then + if [ "$serie" ] ; then + echo '];' + fi + local serie=`echo $total | sed 's/cauchy_\([0-9]\)/cauchy_good_\1/g'` + echo "var $serie = [" + else + local x + if [ $workload = encode ] ; then + x=$k/$m + else + x=$k/$m/$erasures + fi + echo "[ '$x', " $(echo "( $total / 1024 / 1024 ) / $seconds" | bc -ql) " ], " + fi + done + echo '];' +} + +function main() { + bench_header + bench_run +} + +if [ "$1" = fplot ] ; then + "$@" +else + main +fi +# Local Variables: +# compile-command: "\ +# CEPH_ERASURE_CODE_BENCHMARK=../../../src/ceph_erasure_code_benchmark \ +# PLUGIN_DIRECTORY=../../../build/lib \ +# ./bench.sh +# " +# End: diff --git a/qa/workunits/erasure-code/encode-decode-non-regression.sh b/qa/workunits/erasure-code/encode-decode-non-regression.sh new file mode 100755 index 00000000..7f36c91c --- /dev/null +++ b/qa/workunits/erasure-code/encode-decode-non-regression.sh @@ -0,0 +1,40 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2014 Red Hat +# +# Author: Loic Dachary +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Library Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library Public License for more details. +# +set -ex + +: ${CORPUS:=https://github.com/ceph/ceph-erasure-code-corpus.git} +: ${DIRECTORY:=$CEPH_ROOT/ceph-erasure-code-corpus} + +# when running from sources, the current directory must have precedence +export PATH=:$PATH + +if ! test -d $DIRECTORY ; then + git clone $CORPUS $DIRECTORY +fi + +my_version=v$(ceph --version | cut -f3 -d ' ') + +all_versions=$((ls -d $DIRECTORY/v* ; echo $DIRECTORY/$my_version ) | sort) + +for version in $all_versions ; do + if test -d $version ; then + $version/non-regression.sh + fi + if test $version = $DIRECTORY/$my_version ; then + break + fi +done diff --git a/qa/workunits/erasure-code/examples.css b/qa/workunits/erasure-code/examples.css new file mode 100644 index 00000000..ee472477 --- /dev/null +++ b/qa/workunits/erasure-code/examples.css @@ -0,0 +1,97 @@ +* { padding: 0; margin: 0; vertical-align: top; } + +body { + background: url(background.png) repeat-x; + font: 18px/1.5em "proxima-nova", Helvetica, Arial, sans-serif; +} + +a { color: #069; } +a:hover { color: #28b; } + +h2 { + margin-top: 15px; + font: normal 32px "omnes-pro", Helvetica, Arial, sans-serif; +} + +h3 { + margin-left: 30px; + font: normal 26px "omnes-pro", Helvetica, Arial, sans-serif; + color: #666; +} + +p { + margin-top: 10px; +} + +button { + font-size: 18px; + padding: 1px 7px; +} + +input { + font-size: 18px; +} + +input[type=checkbox] { + margin: 7px; +} + +#header { + position: relative; + width: 900px; + margin: auto; +} + +#header h2 { + margin-left: 10px; + vertical-align: middle; + font-size: 42px; + font-weight: bold; + text-decoration: none; + color: #000; +} + +#content { + width: 880px; + margin: 0 auto; + padding: 10px; +} + +#footer { + margin-top: 25px; + margin-bottom: 10px; + text-align: center; + font-size: 12px; + color: #999; +} + +.demo-container { + box-sizing: border-box; + width: 850px; + height: 450px; + padding: 20px 15px 15px 15px; + margin: 15px auto 30px auto; + border: 1px solid #ddd; + background: #fff; + background: linear-gradient(#f6f6f6 0, #fff 50px); + background: -o-linear-gradient(#f6f6f6 0, #fff 50px); + background: -ms-linear-gradient(#f6f6f6 0, #fff 50px); + background: -moz-linear-gradient(#f6f6f6 0, #fff 50px); + background: -webkit-linear-gradient(#f6f6f6 0, #fff 50px); + box-shadow: 0 3px 10px rgba(0,0,0,0.15); + -o-box-shadow: 0 3px 10px rgba(0,0,0,0.1); + -ms-box-shadow: 0 3px 10px rgba(0,0,0,0.1); + -moz-box-shadow: 0 3px 10px rgba(0,0,0,0.1); + -webkit-box-shadow: 0 3px 10px rgba(0,0,0,0.1); +} + +.demo-placeholder { + width: 100%; + height: 100%; + font-size: 14px; + line-height: 1.2em; +} + +.legend table { + border-spacing: 5px; +} \ No newline at end of file diff --git a/qa/workunits/erasure-code/jquery.flot.categories.js b/qa/workunits/erasure-code/jquery.flot.categories.js new file mode 100644 index 00000000..2f9b2579 --- /dev/null +++ b/qa/workunits/erasure-code/jquery.flot.categories.js @@ -0,0 +1,190 @@ +/* Flot plugin for plotting textual data or categories. + +Copyright (c) 2007-2014 IOLA and Ole Laursen. +Licensed under the MIT license. + +Consider a dataset like [["February", 34], ["March", 20], ...]. This plugin +allows you to plot such a dataset directly. + +To enable it, you must specify mode: "categories" on the axis with the textual +labels, e.g. + + $.plot("#placeholder", data, { xaxis: { mode: "categories" } }); + +By default, the labels are ordered as they are met in the data series. If you +need a different ordering, you can specify "categories" on the axis options +and list the categories there: + + xaxis: { + mode: "categories", + categories: ["February", "March", "April"] + } + +If you need to customize the distances between the categories, you can specify +"categories" as an object mapping labels to values + + xaxis: { + mode: "categories", + categories: { "February": 1, "March": 3, "April": 4 } + } + +If you don't specify all categories, the remaining categories will be numbered +from the max value plus 1 (with a spacing of 1 between each). + +Internally, the plugin works by transforming the input data through an auto- +generated mapping where the first category becomes 0, the second 1, etc. +Hence, a point like ["February", 34] becomes [0, 34] internally in Flot (this +is visible in hover and click events that return numbers rather than the +category labels). The plugin also overrides the tick generator to spit out the +categories as ticks instead of the values. + +If you need to map a value back to its label, the mapping is always accessible +as "categories" on the axis object, e.g. plot.getAxes().xaxis.categories. + +*/ + +(function ($) { + var options = { + xaxis: { + categories: null + }, + yaxis: { + categories: null + } + }; + + function processRawData(plot, series, data, datapoints) { + // if categories are enabled, we need to disable + // auto-transformation to numbers so the strings are intact + // for later processing + + var xCategories = series.xaxis.options.mode == "categories", + yCategories = series.yaxis.options.mode == "categories"; + + if (!(xCategories || yCategories)) + return; + + var format = datapoints.format; + + if (!format) { + // FIXME: auto-detection should really not be defined here + var s = series; + format = []; + format.push({ x: true, number: true, required: true }); + format.push({ y: true, number: true, required: true }); + + if (s.bars.show || (s.lines.show && s.lines.fill)) { + var autoscale = !!((s.bars.show && s.bars.zero) || (s.lines.show && s.lines.zero)); + format.push({ y: true, number: true, required: false, defaultValue: 0, autoscale: autoscale }); + if (s.bars.horizontal) { + delete format[format.length - 1].y; + format[format.length - 1].x = true; + } + } + + datapoints.format = format; + } + + for (var m = 0; m < format.length; ++m) { + if (format[m].x && xCategories) + format[m].number = false; + + if (format[m].y && yCategories) + format[m].number = false; + } + } + + function getNextIndex(categories) { + var index = -1; + + for (var v in categories) + if (categories[v] > index) + index = categories[v]; + + return index + 1; + } + + function categoriesTickGenerator(axis) { + var res = []; + for (var label in axis.categories) { + var v = axis.categories[label]; + if (v >= axis.min && v <= axis.max) + res.push([v, label]); + } + + res.sort(function (a, b) { return a[0] - b[0]; }); + + return res; + } + + function setupCategoriesForAxis(series, axis, datapoints) { + if (series[axis].options.mode != "categories") + return; + + if (!series[axis].categories) { + // parse options + var c = {}, o = series[axis].options.categories || {}; + if ($.isArray(o)) { + for (var i = 0; i < o.length; ++i) + c[o[i]] = i; + } + else { + for (var v in o) + c[v] = o[v]; + } + + series[axis].categories = c; + } + + // fix ticks + if (!series[axis].options.ticks) + series[axis].options.ticks = categoriesTickGenerator; + + transformPointsOnAxis(datapoints, axis, series[axis].categories); + } + + function transformPointsOnAxis(datapoints, axis, categories) { + // go through the points, transforming them + var points = datapoints.points, + ps = datapoints.pointsize, + format = datapoints.format, + formatColumn = axis.charAt(0), + index = getNextIndex(categories); + + for (var i = 0; i < points.length; i += ps) { + if (points[i] == null) + continue; + + for (var m = 0; m < ps; ++m) { + var val = points[i + m]; + + if (val == null || !format[m][formatColumn]) + continue; + + if (!(val in categories)) { + categories[val] = index; + ++index; + } + + points[i + m] = categories[val]; + } + } + } + + function processDatapoints(plot, series, datapoints) { + setupCategoriesForAxis(series, "xaxis", datapoints); + setupCategoriesForAxis(series, "yaxis", datapoints); + } + + function init(plot) { + plot.hooks.processRawData.push(processRawData); + plot.hooks.processDatapoints.push(processDatapoints); + } + + $.plot.plugins.push({ + init: init, + options: options, + name: 'categories', + version: '1.0' + }); +})(jQuery); diff --git a/qa/workunits/erasure-code/jquery.flot.js b/qa/workunits/erasure-code/jquery.flot.js new file mode 100644 index 00000000..39f3e4cf --- /dev/null +++ b/qa/workunits/erasure-code/jquery.flot.js @@ -0,0 +1,3168 @@ +/* Javascript plotting library for jQuery, version 0.8.3. + +Copyright (c) 2007-2014 IOLA and Ole Laursen. +Licensed under the MIT license. + +*/ + +// first an inline dependency, jquery.colorhelpers.js, we inline it here +// for convenience + +/* Plugin for jQuery for working with colors. + * + * Version 1.1. + * + * Inspiration from jQuery color animation plugin by John Resig. + * + * Released under the MIT license by Ole Laursen, October 2009. + * + * Examples: + * + * $.color.parse("#fff").scale('rgb', 0.25).add('a', -0.5).toString() + * var c = $.color.extract($("#mydiv"), 'background-color'); + * console.log(c.r, c.g, c.b, c.a); + * $.color.make(100, 50, 25, 0.4).toString() // returns "rgba(100,50,25,0.4)" + * + * Note that .scale() and .add() return the same modified object + * instead of making a new one. + * + * V. 1.1: Fix error handling so e.g. parsing an empty string does + * produce a color rather than just crashing. + */ +(function($){$.color={};$.color.make=function(r,g,b,a){var o={};o.r=r||0;o.g=g||0;o.b=b||0;o.a=a!=null?a:1;o.add=function(c,d){for(var i=0;i=1){return"rgb("+[o.r,o.g,o.b].join(",")+")"}else{return"rgba("+[o.r,o.g,o.b,o.a].join(",")+")"}};o.normalize=function(){function clamp(min,value,max){return valuemax?max:value}o.r=clamp(0,parseInt(o.r),255);o.g=clamp(0,parseInt(o.g),255);o.b=clamp(0,parseInt(o.b),255);o.a=clamp(0,o.a,1);return o};o.clone=function(){return $.color.make(o.r,o.b,o.g,o.a)};return o.normalize()};$.color.extract=function(elem,css){var c;do{c=elem.css(css).toLowerCase();if(c!=""&&c!="transparent")break;elem=elem.parent()}while(elem.length&&!$.nodeName(elem.get(0),"body"));if(c=="rgba(0, 0, 0, 0)")c="transparent";return $.color.parse(c)};$.color.parse=function(str){var res,m=$.color.make;if(res=/rgb\(\s*([0-9]{1,3})\s*,\s*([0-9]{1,3})\s*,\s*([0-9]{1,3})\s*\)/.exec(str))return m(parseInt(res[1],10),parseInt(res[2],10),parseInt(res[3],10));if(res=/rgba\(\s*([0-9]{1,3})\s*,\s*([0-9]{1,3})\s*,\s*([0-9]{1,3})\s*,\s*([0-9]+(?:\.[0-9]+)?)\s*\)/.exec(str))return m(parseInt(res[1],10),parseInt(res[2],10),parseInt(res[3],10),parseFloat(res[4]));if(res=/rgb\(\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\%\s*\)/.exec(str))return m(parseFloat(res[1])*2.55,parseFloat(res[2])*2.55,parseFloat(res[3])*2.55);if(res=/rgba\(\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\%\s*,\s*([0-9]+(?:\.[0-9]+)?)\s*\)/.exec(str))return m(parseFloat(res[1])*2.55,parseFloat(res[2])*2.55,parseFloat(res[3])*2.55,parseFloat(res[4]));if(res=/#([a-fA-F0-9]{2})([a-fA-F0-9]{2})([a-fA-F0-9]{2})/.exec(str))return m(parseInt(res[1],16),parseInt(res[2],16),parseInt(res[3],16));if(res=/#([a-fA-F0-9])([a-fA-F0-9])([a-fA-F0-9])/.exec(str))return m(parseInt(res[1]+res[1],16),parseInt(res[2]+res[2],16),parseInt(res[3]+res[3],16));var name=$.trim(str).toLowerCase();if(name=="transparent")return m(255,255,255,0);else{res=lookupColors[name]||[0,0,0];return m(res[0],res[1],res[2])}};var lookupColors={aqua:[0,255,255],azure:[240,255,255],beige:[245,245,220],black:[0,0,0],blue:[0,0,255],brown:[165,42,42],cyan:[0,255,255],darkblue:[0,0,139],darkcyan:[0,139,139],darkgrey:[169,169,169],darkgreen:[0,100,0],darkkhaki:[189,183,107],darkmagenta:[139,0,139],darkolivegreen:[85,107,47],darkorange:[255,140,0],darkorchid:[153,50,204],darkred:[139,0,0],darksalmon:[233,150,122],darkviolet:[148,0,211],fuchsia:[255,0,255],gold:[255,215,0],green:[0,128,0],indigo:[75,0,130],khaki:[240,230,140],lightblue:[173,216,230],lightcyan:[224,255,255],lightgreen:[144,238,144],lightgrey:[211,211,211],lightpink:[255,182,193],lightyellow:[255,255,224],lime:[0,255,0],magenta:[255,0,255],maroon:[128,0,0],navy:[0,0,128],olive:[128,128,0],orange:[255,165,0],pink:[255,192,203],purple:[128,0,128],violet:[128,0,128],red:[255,0,0],silver:[192,192,192],white:[255,255,255],yellow:[255,255,0]}})(jQuery); + +// the actual Flot code +(function($) { + + // Cache the prototype hasOwnProperty for faster access + + var hasOwnProperty = Object.prototype.hasOwnProperty; + + // A shim to provide 'detach' to jQuery versions prior to 1.4. Using a DOM + // operation produces the same effect as detach, i.e. removing the element + // without touching its jQuery data. + + // Do not merge this into Flot 0.9, since it requires jQuery 1.4.4+. + + if (!$.fn.detach) { + $.fn.detach = function() { + return this.each(function() { + if (this.parentNode) { + this.parentNode.removeChild( this ); + } + }); + }; + } + + /////////////////////////////////////////////////////////////////////////// + // The Canvas object is a wrapper around an HTML5 tag. + // + // @constructor + // @param {string} cls List of classes to apply to the canvas. + // @param {element} container Element onto which to append the canvas. + // + // Requiring a container is a little iffy, but unfortunately canvas + // operations don't work unless the canvas is attached to the DOM. + + function Canvas(cls, container) { + + var element = container.children("." + cls)[0]; + + if (element == null) { + + element = document.createElement("canvas"); + element.className = cls; + + $(element).css({ direction: "ltr", position: "absolute", left: 0, top: 0 }) + .appendTo(container); + + // If HTML5 Canvas isn't available, fall back to [Ex|Flash]canvas + + if (!element.getContext) { + if (window.G_vmlCanvasManager) { + element = window.G_vmlCanvasManager.initElement(element); + } else { + throw new Error("Canvas is not available. If you're using IE with a fall-back such as Excanvas, then there's either a mistake in your conditional include, or the page has no DOCTYPE and is rendering in Quirks Mode."); + } + } + } + + this.element = element; + + var context = this.context = element.getContext("2d"); + + // Determine the screen's ratio of physical to device-independent + // pixels. This is the ratio between the canvas width that the browser + // advertises and the number of pixels actually present in that space. + + // The iPhone 4, for example, has a device-independent width of 320px, + // but its screen is actually 640px wide. It therefore has a pixel + // ratio of 2, while most normal devices have a ratio of 1. + + var devicePixelRatio = window.devicePixelRatio || 1, + backingStoreRatio = + context.webkitBackingStorePixelRatio || + context.mozBackingStorePixelRatio || + context.msBackingStorePixelRatio || + context.oBackingStorePixelRatio || + context.backingStorePixelRatio || 1; + + this.pixelRatio = devicePixelRatio / backingStoreRatio; + + // Size the canvas to match the internal dimensions of its container + + this.resize(container.width(), container.height()); + + // Collection of HTML div layers for text overlaid onto the canvas + + this.textContainer = null; + this.text = {}; + + // Cache of text fragments and metrics, so we can avoid expensively + // re-calculating them when the plot is re-rendered in a loop. + + this._textCache = {}; + } + + // Resizes the canvas to the given dimensions. + // + // @param {number} width New width of the canvas, in pixels. + // @param {number} width New height of the canvas, in pixels. + + Canvas.prototype.resize = function(width, height) { + + if (width <= 0 || height <= 0) { + throw new Error("Invalid dimensions for plot, width = " + width + ", height = " + height); + } + + var element = this.element, + context = this.context, + pixelRatio = this.pixelRatio; + + // Resize the canvas, increasing its density based on the display's + // pixel ratio; basically giving it more pixels without increasing the + // size of its element, to take advantage of the fact that retina + // displays have that many more pixels in the same advertised space. + + // Resizing should reset the state (excanvas seems to be buggy though) + + if (this.width != width) { + element.width = width * pixelRatio; + element.style.width = width + "px"; + this.width = width; + } + + if (this.height != height) { + element.height = height * pixelRatio; + element.style.height = height + "px"; + this.height = height; + } + + // Save the context, so we can reset in case we get replotted. The + // restore ensure that we're really back at the initial state, and + // should be safe even if we haven't saved the initial state yet. + + context.restore(); + context.save(); + + // Scale the coordinate space to match the display density; so even though we + // may have twice as many pixels, we still want lines and other drawing to + // appear at the same size; the extra pixels will just make them crisper. + + context.scale(pixelRatio, pixelRatio); + }; + + // Clears the entire canvas area, not including any overlaid HTML text + + Canvas.prototype.clear = function() { + this.context.clearRect(0, 0, this.width, this.height); + }; + + // Finishes rendering the canvas, including managing the text overlay. + + Canvas.prototype.render = function() { + + var cache = this._textCache; + + // For each text layer, add elements marked as active that haven't + // already been rendered, and remove those that are no longer active. + + for (var layerKey in cache) { + if (hasOwnProperty.call(cache, layerKey)) { + + var layer = this.getTextLayer(layerKey), + layerCache = cache[layerKey]; + + layer.hide(); + + for (var styleKey in layerCache) { + if (hasOwnProperty.call(layerCache, styleKey)) { + var styleCache = layerCache[styleKey]; + for (var key in styleCache) { + if (hasOwnProperty.call(styleCache, key)) { + + var positions = styleCache[key].positions; + + for (var i = 0, position; position = positions[i]; i++) { + if (position.active) { + if (!position.rendered) { + layer.append(position.element); + position.rendered = true; + } + } else { + positions.splice(i--, 1); + if (position.rendered) { + position.element.detach(); + } + } + } + + if (positions.length == 0) { + delete styleCache[key]; + } + } + } + } + } + + layer.show(); + } + } + }; + + // Creates (if necessary) and returns the text overlay container. + // + // @param {string} classes String of space-separated CSS classes used to + // uniquely identify the text layer. + // @return {object} The jQuery-wrapped text-layer div. + + Canvas.prototype.getTextLayer = function(classes) { + + var layer = this.text[classes]; + + // Create the text layer if it doesn't exist + + if (layer == null) { + + // Create the text layer container, if it doesn't exist + + if (this.textContainer == null) { + this.textContainer = $("
") + .css({ + position: "absolute", + top: 0, + left: 0, + bottom: 0, + right: 0, + 'font-size': "smaller", + color: "#545454" + }) + .insertAfter(this.element); + } + + layer = this.text[classes] = $("
") + .addClass(classes) + .css({ + position: "absolute", + top: 0, + left: 0, + bottom: 0, + right: 0 + }) + .appendTo(this.textContainer); + } + + return layer; + }; + + // Creates (if necessary) and returns a text info object. + // + // The object looks like this: + // + // { + // width: Width of the text's wrapper div. + // height: Height of the text's wrapper div. + // element: The jQuery-wrapped HTML div containing the text. + // positions: Array of positions at which this text is drawn. + // } + // + // The positions array contains objects that look like this: + // + // { + // active: Flag indicating whether the text should be visible. + // rendered: Flag indicating whether the text is currently visible. + // element: The jQuery-wrapped HTML div containing the text. + // x: X coordinate at which to draw the text. + // y: Y coordinate at which to draw the text. + // } + // + // Each position after the first receives a clone of the original element. + // + // The idea is that that the width, height, and general 'identity' of the + // text is constant no matter where it is placed; the placements are a + // secondary property. + // + // Canvas maintains a cache of recently-used text info objects; getTextInfo + // either returns the cached element or creates a new entry. + // + // @param {string} layer A string of space-separated CSS classes uniquely + // identifying the layer containing this text. + // @param {string} text Text string to retrieve info for. + // @param {(string|object)=} font Either a string of space-separated CSS + // classes or a font-spec object, defining the text's font and style. + // @param {number=} angle Angle at which to rotate the text, in degrees. + // Angle is currently unused, it will be implemented in the future. + // @param {number=} width Maximum width of the text before it wraps. + // @return {object} a text info object. + + Canvas.prototype.getTextInfo = function(layer, text, font, angle, width) { + + var textStyle, layerCache, styleCache, info; + + // Cast the value to a string, in case we were given a number or such + + text = "" + text; + + // If the font is a font-spec object, generate a CSS font definition + + if (typeof font === "object") { + textStyle = font.style + " " + font.variant + " " + font.weight + " " + font.size + "px/" + font.lineHeight + "px " + font.family; + } else { + textStyle = font; + } + + // Retrieve (or create) the cache for the text's layer and styles + + layerCache = this._textCache[layer]; + + if (layerCache == null) { + layerCache = this._textCache[layer] = {}; + } + + styleCache = layerCache[textStyle]; + + if (styleCache == null) { + styleCache = layerCache[textStyle] = {}; + } + + info = styleCache[text]; + + // If we can't find a matching element in our cache, create a new one + + if (info == null) { + + var element = $("
").html(text) + .css({ + position: "absolute", + 'max-width': width, + top: -9999 + }) + .appendTo(this.getTextLayer(layer)); + + if (typeof font === "object") { + element.css({ + font: textStyle, + color: font.color + }); + } else if (typeof font === "string") { + element.addClass(font); + } + + info = styleCache[text] = { + width: element.outerWidth(true), + height: element.outerHeight(true), + element: element, + positions: [] + }; + + element.detach(); + } + + return info; + }; + + // Adds a text string to the canvas text overlay. + // + // The text isn't drawn immediately; it is marked as rendering, which will + // result in its addition to the canvas on the next render pass. + // + // @param {string} layer A string of space-separated CSS classes uniquely + // identifying the layer containing this text. + // @param {number} x X coordinate at which to draw the text. + // @param {number} y Y coordinate at which to draw the text. + // @param {string} text Text string to draw. + // @param {(string|object)=} font Either a string of space-separated CSS + // classes or a font-spec object, defining the text's font and style. + // @param {number=} angle Angle at which to rotate the text, in degrees. + // Angle is currently unused, it will be implemented in the future. + // @param {number=} width Maximum width of the text before it wraps. + // @param {string=} halign Horizontal alignment of the text; either "left", + // "center" or "right". + // @param {string=} valign Vertical alignment of the text; either "top", + // "middle" or "bottom". + + Canvas.prototype.addText = function(layer, x, y, text, font, angle, width, halign, valign) { + + var info = this.getTextInfo(layer, text, font, angle, width), + positions = info.positions; + + // Tweak the div's position to match the text's alignment + + if (halign == "center") { + x -= info.width / 2; + } else if (halign == "right") { + x -= info.width; + } + + if (valign == "middle") { + y -= info.height / 2; + } else if (valign == "bottom") { + y -= info.height; + } + + // Determine whether this text already exists at this position. + // If so, mark it for inclusion in the next render pass. + + for (var i = 0, position; position = positions[i]; i++) { + if (position.x == x && position.y == y) { + position.active = true; + return; + } + } + + // If the text doesn't exist at this position, create a new entry + + // For the very first position we'll re-use the original element, + // while for subsequent ones we'll clone it. + + position = { + active: true, + rendered: false, + element: positions.length ? info.element.clone() : info.element, + x: x, + y: y + }; + + positions.push(position); + + // Move the element to its final position within the container + + position.element.css({ + top: Math.round(y), + left: Math.round(x), + 'text-align': halign // In case the text wraps + }); + }; + + // Removes one or more text strings from the canvas text overlay. + // + // If no parameters are given, all text within the layer is removed. + // + // Note that the text is not immediately removed; it is simply marked as + // inactive, which will result in its removal on the next render pass. + // This avoids the performance penalty for 'clear and redraw' behavior, + // where we potentially get rid of all text on a layer, but will likely + // add back most or all of it later, as when redrawing axes, for example. + // + // @param {string} layer A string of space-separated CSS classes uniquely + // identifying the layer containing this text. + // @param {number=} x X coordinate of the text. + // @param {number=} y Y coordinate of the text. + // @param {string=} text Text string to remove. + // @param {(string|object)=} font Either a string of space-separated CSS + // classes or a font-spec object, defining the text's font and style. + // @param {number=} angle Angle at which the text is rotated, in degrees. + // Angle is currently unused, it will be implemented in the future. + + Canvas.prototype.removeText = function(layer, x, y, text, font, angle) { + if (text == null) { + var layerCache = this._textCache[layer]; + if (layerCache != null) { + for (var styleKey in layerCache) { + if (hasOwnProperty.call(layerCache, styleKey)) { + var styleCache = layerCache[styleKey]; + for (var key in styleCache) { + if (hasOwnProperty.call(styleCache, key)) { + var positions = styleCache[key].positions; + for (var i = 0, position; position = positions[i]; i++) { + position.active = false; + } + } + } + } + } + } + } else { + var positions = this.getTextInfo(layer, text, font, angle).positions; + for (var i = 0, position; position = positions[i]; i++) { + if (position.x == x && position.y == y) { + position.active = false; + } + } + } + }; + + /////////////////////////////////////////////////////////////////////////// + // The top-level container for the entire plot. + + function Plot(placeholder, data_, options_, plugins) { + // data is on the form: + // [ series1, series2 ... ] + // where series is either just the data as [ [x1, y1], [x2, y2], ... ] + // or { data: [ [x1, y1], [x2, y2], ... ], label: "some label", ... } + + var series = [], + options = { + // the color theme used for graphs + colors: ["#edc240", "#afd8f8", "#cb4b4b", "#4da74d", "#9440ed"], + legend: { + show: true, + noColumns: 1, // number of colums in legend table + labelFormatter: null, // fn: string -> string + labelBoxBorderColor: "#ccc", // border color for the little label boxes + container: null, // container (as jQuery object) to put legend in, null means default on top of graph + position: "ne", // position of default legend container within plot + margin: 5, // distance from grid edge to default legend container within plot + backgroundColor: null, // null means auto-detect + backgroundOpacity: 0.85, // set to 0 to avoid background + sorted: null // default to no legend sorting + }, + xaxis: { + show: null, // null = auto-detect, true = always, false = never + position: "bottom", // or "top" + mode: null, // null or "time" + font: null, // null (derived from CSS in placeholder) or object like { size: 11, lineHeight: 13, style: "italic", weight: "bold", family: "sans-serif", variant: "small-caps" } + color: null, // base color, labels, ticks + tickColor: null, // possibly different color of ticks, e.g. "rgba(0,0,0,0.15)" + transform: null, // null or f: number -> number to transform axis + inverseTransform: null, // if transform is set, this should be the inverse function + min: null, // min. value to show, null means set automatically + max: null, // max. value to show, null means set automatically + autoscaleMargin: null, // margin in % to add if auto-setting min/max + ticks: null, // either [1, 3] or [[1, "a"], 3] or (fn: axis info -> ticks) or app. number of ticks for auto-ticks + tickFormatter: null, // fn: number -> string + labelWidth: null, // size of tick labels in pixels + labelHeight: null, + reserveSpace: null, // whether to reserve space even if axis isn't shown + tickLength: null, // size in pixels of ticks, or "full" for whole line + alignTicksWithAxis: null, // axis number or null for no sync + tickDecimals: null, // no. of decimals, null means auto + tickSize: null, // number or [number, "unit"] + minTickSize: null // number or [number, "unit"] + }, + yaxis: { + autoscaleMargin: 0.02, + position: "left" // or "right" + }, + xaxes: [], + yaxes: [], + series: { + points: { + show: false, + radius: 3, + lineWidth: 2, // in pixels + fill: true, + fillColor: "#ffffff", + symbol: "circle" // or callback + }, + lines: { + // we don't put in show: false so we can see + // whether lines were actively disabled + lineWidth: 2, // in pixels + fill: false, + fillColor: null, + steps: false + // Omit 'zero', so we can later default its value to + // match that of the 'fill' option. + }, + bars: { + show: false, + lineWidth: 2, // in pixels + barWidth: 1, // in units of the x axis + fill: true, + fillColor: null, + align: "left", // "left", "right", or "center" + horizontal: false, + zero: true + }, + shadowSize: 3, + highlightColor: null + }, + grid: { + show: true, + aboveData: false, + color: "#545454", // primary color used for outline and labels + backgroundColor: null, // null for transparent, else color + borderColor: null, // set if different from the grid color + tickColor: null, // color for the ticks, e.g. "rgba(0,0,0,0.15)" + margin: 0, // distance from the canvas edge to the grid + labelMargin: 5, // in pixels + axisMargin: 8, // in pixels + borderWidth: 2, // in pixels + minBorderMargin: null, // in pixels, null means taken from points radius + markings: null, // array of ranges or fn: axes -> array of ranges + markingsColor: "#f4f4f4", + markingsLineWidth: 2, + // interactive stuff + clickable: false, + hoverable: false, + autoHighlight: true, // highlight in case mouse is near + mouseActiveRadius: 10 // how far the mouse can be away to activate an item + }, + interaction: { + redrawOverlayInterval: 1000/60 // time between updates, -1 means in same flow + }, + hooks: {} + }, + surface = null, // the canvas for the plot itself + overlay = null, // canvas for interactive stuff on top of plot + eventHolder = null, // jQuery object that events should be bound to + ctx = null, octx = null, + xaxes = [], yaxes = [], + plotOffset = { left: 0, right: 0, top: 0, bottom: 0}, + plotWidth = 0, plotHeight = 0, + hooks = { + processOptions: [], + processRawData: [], + processDatapoints: [], + processOffset: [], + drawBackground: [], + drawSeries: [], + draw: [], + bindEvents: [], + drawOverlay: [], + shutdown: [] + }, + plot = this; + + // public functions + plot.setData = setData; + plot.setupGrid = setupGrid; + plot.draw = draw; + plot.getPlaceholder = function() { return placeholder; }; + plot.getCanvas = function() { return surface.element; }; + plot.getPlotOffset = function() { return plotOffset; }; + plot.width = function () { return plotWidth; }; + plot.height = function () { return plotHeight; }; + plot.offset = function () { + var o = eventHolder.offset(); + o.left += plotOffset.left; + o.top += plotOffset.top; + return o; + }; + plot.getData = function () { return series; }; + plot.getAxes = function () { + var res = {}, i; + $.each(xaxes.concat(yaxes), function (_, axis) { + if (axis) + res[axis.direction + (axis.n != 1 ? axis.n : "") + "axis"] = axis; + }); + return res; + }; + plot.getXAxes = function () { return xaxes; }; + plot.getYAxes = function () { return yaxes; }; + plot.c2p = canvasToAxisCoords; + plot.p2c = axisToCanvasCoords; + plot.getOptions = function () { return options; }; + plot.highlight = highlight; + plot.unhighlight = unhighlight; + plot.triggerRedrawOverlay = triggerRedrawOverlay; + plot.pointOffset = function(point) { + return { + left: parseInt(xaxes[axisNumber(point, "x") - 1].p2c(+point.x) + plotOffset.left, 10), + top: parseInt(yaxes[axisNumber(point, "y") - 1].p2c(+point.y) + plotOffset.top, 10) + }; + }; + plot.shutdown = shutdown; + plot.destroy = function () { + shutdown(); + placeholder.removeData("plot").empty(); + + series = []; + options = null; + surface = null; + overlay = null; + eventHolder = null; + ctx = null; + octx = null; + xaxes = []; + yaxes = []; + hooks = null; + highlights = []; + plot = null; + }; + plot.resize = function () { + var width = placeholder.width(), + height = placeholder.height(); + surface.resize(width, height); + overlay.resize(width, height); + }; + + // public attributes + plot.hooks = hooks; + + // initialize + initPlugins(plot); + parseOptions(options_); + setupCanvases(); + setData(data_); + setupGrid(); + draw(); + bindEvents(); + + + function executeHooks(hook, args) { + args = [plot].concat(args); + for (var i = 0; i < hook.length; ++i) + hook[i].apply(this, args); + } + + function initPlugins() { + + // References to key classes, allowing plugins to modify them + + var classes = { + Canvas: Canvas + }; + + for (var i = 0; i < plugins.length; ++i) { + var p = plugins[i]; + p.init(plot, classes); + if (p.options) + $.extend(true, options, p.options); + } + } + + function parseOptions(opts) { + + $.extend(true, options, opts); + + // $.extend merges arrays, rather than replacing them. When less + // colors are provided than the size of the default palette, we + // end up with those colors plus the remaining defaults, which is + // not expected behavior; avoid it by replacing them here. + + if (opts && opts.colors) { + options.colors = opts.colors; + } + + if (options.xaxis.color == null) + options.xaxis.color = $.color.parse(options.grid.color).scale('a', 0.22).toString(); + if (options.yaxis.color == null) + options.yaxis.color = $.color.parse(options.grid.color).scale('a', 0.22).toString(); + + if (options.xaxis.tickColor == null) // grid.tickColor for back-compatibility + options.xaxis.tickColor = options.grid.tickColor || options.xaxis.color; + if (options.yaxis.tickColor == null) // grid.tickColor for back-compatibility + options.yaxis.tickColor = options.grid.tickColor || options.yaxis.color; + + if (options.grid.borderColor == null) + options.grid.borderColor = options.grid.color; + if (options.grid.tickColor == null) + options.grid.tickColor = $.color.parse(options.grid.color).scale('a', 0.22).toString(); + + // Fill in defaults for axis options, including any unspecified + // font-spec fields, if a font-spec was provided. + + // If no x/y axis options were provided, create one of each anyway, + // since the rest of the code assumes that they exist. + + var i, axisOptions, axisCount, + fontSize = placeholder.css("font-size"), + fontSizeDefault = fontSize ? +fontSize.replace("px", "") : 13, + fontDefaults = { + style: placeholder.css("font-style"), + size: Math.round(0.8 * fontSizeDefault), + variant: placeholder.css("font-variant"), + weight: placeholder.css("font-weight"), + family: placeholder.css("font-family") + }; + + axisCount = options.xaxes.length || 1; + for (i = 0; i < axisCount; ++i) { + + axisOptions = options.xaxes[i]; + if (axisOptions && !axisOptions.tickColor) { + axisOptions.tickColor = axisOptions.color; + } + + axisOptions = $.extend(true, {}, options.xaxis, axisOptions); + options.xaxes[i] = axisOptions; + + if (axisOptions.font) { + axisOptions.font = $.extend({}, fontDefaults, axisOptions.font); + if (!axisOptions.font.color) { + axisOptions.font.color = axisOptions.color; + } + if (!axisOptions.font.lineHeight) { + axisOptions.font.lineHeight = Math.round(axisOptions.font.size * 1.15); + } + } + } + + axisCount = options.yaxes.length || 1; + for (i = 0; i < axisCount; ++i) { + + axisOptions = options.yaxes[i]; + if (axisOptions && !axisOptions.tickColor) { + axisOptions.tickColor = axisOptions.color; + } + + axisOptions = $.extend(true, {}, options.yaxis, axisOptions); + options.yaxes[i] = axisOptions; + + if (axisOptions.font) { + axisOptions.font = $.extend({}, fontDefaults, axisOptions.font); + if (!axisOptions.font.color) { + axisOptions.font.color = axisOptions.color; + } + if (!axisOptions.font.lineHeight) { + axisOptions.font.lineHeight = Math.round(axisOptions.font.size * 1.15); + } + } + } + + // backwards compatibility, to be removed in future + if (options.xaxis.noTicks && options.xaxis.ticks == null) + options.xaxis.ticks = options.xaxis.noTicks; + if (options.yaxis.noTicks && options.yaxis.ticks == null) + options.yaxis.ticks = options.yaxis.noTicks; + if (options.x2axis) { + options.xaxes[1] = $.extend(true, {}, options.xaxis, options.x2axis); + options.xaxes[1].position = "top"; + // Override the inherit to allow the axis to auto-scale + if (options.x2axis.min == null) { + options.xaxes[1].min = null; + } + if (options.x2axis.max == null) { + options.xaxes[1].max = null; + } + } + if (options.y2axis) { + options.yaxes[1] = $.extend(true, {}, options.yaxis, options.y2axis); + options.yaxes[1].position = "right"; + // Override the inherit to allow the axis to auto-scale + if (options.y2axis.min == null) { + options.yaxes[1].min = null; + } + if (options.y2axis.max == null) { + options.yaxes[1].max = null; + } + } + if (options.grid.coloredAreas) + options.grid.markings = options.grid.coloredAreas; + if (options.grid.coloredAreasColor) + options.grid.markingsColor = options.grid.coloredAreasColor; + if (options.lines) + $.extend(true, options.series.lines, options.lines); + if (options.points) + $.extend(true, options.series.points, options.points); + if (options.bars) + $.extend(true, options.series.bars, options.bars); + if (options.shadowSize != null) + options.series.shadowSize = options.shadowSize; + if (options.highlightColor != null) + options.series.highlightColor = options.highlightColor; + + // save options on axes for future reference + for (i = 0; i < options.xaxes.length; ++i) + getOrCreateAxis(xaxes, i + 1).options = options.xaxes[i]; + for (i = 0; i < options.yaxes.length; ++i) + getOrCreateAxis(yaxes, i + 1).options = options.yaxes[i]; + + // add hooks from options + for (var n in hooks) + if (options.hooks[n] && options.hooks[n].length) + hooks[n] = hooks[n].concat(options.hooks[n]); + + executeHooks(hooks.processOptions, [options]); + } + + function setData(d) { + series = parseData(d); + fillInSeriesOptions(); + processData(); + } + + function parseData(d) { + var res = []; + for (var i = 0; i < d.length; ++i) { + var s = $.extend(true, {}, options.series); + + if (d[i].data != null) { + s.data = d[i].data; // move the data instead of deep-copy + delete d[i].data; + + $.extend(true, s, d[i]); + + d[i].data = s.data; + } + else + s.data = d[i]; + res.push(s); + } + + return res; + } + + function axisNumber(obj, coord) { + var a = obj[coord + "axis"]; + if (typeof a == "object") // if we got a real axis, extract number + a = a.n; + if (typeof a != "number") + a = 1; // default to first axis + return a; + } + + function allAxes() { + // return flat array without annoying null entries + return $.grep(xaxes.concat(yaxes), function (a) { return a; }); + } + + function canvasToAxisCoords(pos) { + // return an object with x/y corresponding to all used axes + var res = {}, i, axis; + for (i = 0; i < xaxes.length; ++i) { + axis = xaxes[i]; + if (axis && axis.used) + res["x" + axis.n] = axis.c2p(pos.left); + } + + for (i = 0; i < yaxes.length; ++i) { + axis = yaxes[i]; + if (axis && axis.used) + res["y" + axis.n] = axis.c2p(pos.top); + } + + if (res.x1 !== undefined) + res.x = res.x1; + if (res.y1 !== undefined) + res.y = res.y1; + + return res; + } + + function axisToCanvasCoords(pos) { + // get canvas coords from the first pair of x/y found in pos + var res = {}, i, axis, key; + + for (i = 0; i < xaxes.length; ++i) { + axis = xaxes[i]; + if (axis && axis.used) { + key = "x" + axis.n; + if (pos[key] == null && axis.n == 1) + key = "x"; + + if (pos[key] != null) { + res.left = axis.p2c(pos[key]); + break; + } + } + } + + for (i = 0; i < yaxes.length; ++i) { + axis = yaxes[i]; + if (axis && axis.used) { + key = "y" + axis.n; + if (pos[key] == null && axis.n == 1) + key = "y"; + + if (pos[key] != null) { + res.top = axis.p2c(pos[key]); + break; + } + } + } + + return res; + } + + function getOrCreateAxis(axes, number) { + if (!axes[number - 1]) + axes[number - 1] = { + n: number, // save the number for future reference + direction: axes == xaxes ? "x" : "y", + options: $.extend(true, {}, axes == xaxes ? options.xaxis : options.yaxis) + }; + + return axes[number - 1]; + } + + function fillInSeriesOptions() { + + var neededColors = series.length, maxIndex = -1, i; + + // Subtract the number of series that already have fixed colors or + // color indexes from the number that we still need to generate. + + for (i = 0; i < series.length; ++i) { + var sc = series[i].color; + if (sc != null) { + neededColors--; + if (typeof sc == "number" && sc > maxIndex) { + maxIndex = sc; + } + } + } + + // If any of the series have fixed color indexes, then we need to + // generate at least as many colors as the highest index. + + if (neededColors <= maxIndex) { + neededColors = maxIndex + 1; + } + + // Generate all the colors, using first the option colors and then + // variations on those colors once they're exhausted. + + var c, colors = [], colorPool = options.colors, + colorPoolSize = colorPool.length, variation = 0; + + for (i = 0; i < neededColors; i++) { + + c = $.color.parse(colorPool[i % colorPoolSize] || "#666"); + + // Each time we exhaust the colors in the pool we adjust + // a scaling factor used to produce more variations on + // those colors. The factor alternates negative/positive + // to produce lighter/darker colors. + + // Reset the variation after every few cycles, or else + // it will end up producing only white or black colors. + + if (i % colorPoolSize == 0 && i) { + if (variation >= 0) { + if (variation < 0.5) { + variation = -variation - 0.2; + } else variation = 0; + } else variation = -variation; + } + + colors[i] = c.scale('rgb', 1 + variation); + } + + // Finalize the series options, filling in their colors + + var colori = 0, s; + for (i = 0; i < series.length; ++i) { + s = series[i]; + + // assign colors + if (s.color == null) { + s.color = colors[colori].toString(); + ++colori; + } + else if (typeof s.color == "number") + s.color = colors[s.color].toString(); + + // turn on lines automatically in case nothing is set + if (s.lines.show == null) { + var v, show = true; + for (v in s) + if (s[v] && s[v].show) { + show = false; + break; + } + if (show) + s.lines.show = true; + } + + // If nothing was provided for lines.zero, default it to match + // lines.fill, since areas by default should extend to zero. + + if (s.lines.zero == null) { + s.lines.zero = !!s.lines.fill; + } + + // setup axes + s.xaxis = getOrCreateAxis(xaxes, axisNumber(s, "x")); + s.yaxis = getOrCreateAxis(yaxes, axisNumber(s, "y")); + } + } + + function processData() { + var topSentry = Number.POSITIVE_INFINITY, + bottomSentry = Number.NEGATIVE_INFINITY, + fakeInfinity = Number.MAX_VALUE, + i, j, k, m, length, + s, points, ps, x, y, axis, val, f, p, + data, format; + + function updateAxis(axis, min, max) { + if (min < axis.datamin && min != -fakeInfinity) + axis.datamin = min; + if (max > axis.datamax && max != fakeInfinity) + axis.datamax = max; + } + + $.each(allAxes(), function (_, axis) { + // init axis + axis.datamin = topSentry; + axis.datamax = bottomSentry; + axis.used = false; + }); + + for (i = 0; i < series.length; ++i) { + s = series[i]; + s.datapoints = { points: [] }; + + executeHooks(hooks.processRawData, [ s, s.data, s.datapoints ]); + } + + // first pass: clean and copy data + for (i = 0; i < series.length; ++i) { + s = series[i]; + + data = s.data; + format = s.datapoints.format; + + if (!format) { + format = []; + // find out how to copy + format.push({ x: true, number: true, required: true }); + format.push({ y: true, number: true, required: true }); + + if (s.bars.show || (s.lines.show && s.lines.fill)) { + var autoscale = !!((s.bars.show && s.bars.zero) || (s.lines.show && s.lines.zero)); + format.push({ y: true, number: true, required: false, defaultValue: 0, autoscale: autoscale }); + if (s.bars.horizontal) { + delete format[format.length - 1].y; + format[format.length - 1].x = true; + } + } + + s.datapoints.format = format; + } + + if (s.datapoints.pointsize != null) + continue; // already filled in + + s.datapoints.pointsize = format.length; + + ps = s.datapoints.pointsize; + points = s.datapoints.points; + + var insertSteps = s.lines.show && s.lines.steps; + s.xaxis.used = s.yaxis.used = true; + + for (j = k = 0; j < data.length; ++j, k += ps) { + p = data[j]; + + var nullify = p == null; + if (!nullify) { + for (m = 0; m < ps; ++m) { + val = p[m]; + f = format[m]; + + if (f) { + if (f.number && val != null) { + val = +val; // convert to number + if (isNaN(val)) + val = null; + else if (val == Infinity) + val = fakeInfinity; + else if (val == -Infinity) + val = -fakeInfinity; + } + + if (val == null) { + if (f.required) + nullify = true; + + if (f.defaultValue != null) + val = f.defaultValue; + } + } + + points[k + m] = val; + } + } + + if (nullify) { + for (m = 0; m < ps; ++m) { + val = points[k + m]; + if (val != null) { + f = format[m]; + // extract min/max info + if (f.autoscale !== false) { + if (f.x) { + updateAxis(s.xaxis, val, val); + } + if (f.y) { + updateAxis(s.yaxis, val, val); + } + } + } + points[k + m] = null; + } + } + else { + // a little bit of line specific stuff that + // perhaps shouldn't be here, but lacking + // better means... + if (insertSteps && k > 0 + && points[k - ps] != null + && points[k - ps] != points[k] + && points[k - ps + 1] != points[k + 1]) { + // copy the point to make room for a middle point + for (m = 0; m < ps; ++m) + points[k + ps + m] = points[k + m]; + + // middle point has same y + points[k + 1] = points[k - ps + 1]; + + // we've added a point, better reflect that + k += ps; + } + } + } + } + + // give the hooks a chance to run + for (i = 0; i < series.length; ++i) { + s = series[i]; + + executeHooks(hooks.processDatapoints, [ s, s.datapoints]); + } + + // second pass: find datamax/datamin for auto-scaling + for (i = 0; i < series.length; ++i) { + s = series[i]; + points = s.datapoints.points; + ps = s.datapoints.pointsize; + format = s.datapoints.format; + + var xmin = topSentry, ymin = topSentry, + xmax = bottomSentry, ymax = bottomSentry; + + for (j = 0; j < points.length; j += ps) { + if (points[j] == null) + continue; + + for (m = 0; m < ps; ++m) { + val = points[j + m]; + f = format[m]; + if (!f || f.autoscale === false || val == fakeInfinity || val == -fakeInfinity) + continue; + + if (f.x) { + if (val < xmin) + xmin = val; + if (val > xmax) + xmax = val; + } + if (f.y) { + if (val < ymin) + ymin = val; + if (val > ymax) + ymax = val; + } + } + } + + if (s.bars.show) { + // make sure we got room for the bar on the dancing floor + var delta; + + switch (s.bars.align) { + case "left": + delta = 0; + break; + case "right": + delta = -s.bars.barWidth; + break; + default: + delta = -s.bars.barWidth / 2; + } + + if (s.bars.horizontal) { + ymin += delta; + ymax += delta + s.bars.barWidth; + } + else { + xmin += delta; + xmax += delta + s.bars.barWidth; + } + } + + updateAxis(s.xaxis, xmin, xmax); + updateAxis(s.yaxis, ymin, ymax); + } + + $.each(allAxes(), function (_, axis) { + if (axis.datamin == topSentry) + axis.datamin = null; + if (axis.datamax == bottomSentry) + axis.datamax = null; + }); + } + + function setupCanvases() { + + // Make sure the placeholder is clear of everything except canvases + // from a previous plot in this container that we'll try to re-use. + + placeholder.css("padding", 0) // padding messes up the positioning + .children().filter(function(){ + return !$(this).hasClass("flot-overlay") && !$(this).hasClass('flot-base'); + }).remove(); + + if (placeholder.css("position") == 'static') + placeholder.css("position", "relative"); // for positioning labels and overlay + + surface = new Canvas("flot-base", placeholder); + overlay = new Canvas("flot-overlay", placeholder); // overlay canvas for interactive features + + ctx = surface.context; + octx = overlay.context; + + // define which element we're listening for events on + eventHolder = $(overlay.element).unbind(); + + // If we're re-using a plot object, shut down the old one + + var existing = placeholder.data("plot"); + + if (existing) { + existing.shutdown(); + overlay.clear(); + } + + // save in case we get replotted + placeholder.data("plot", plot); + } + + function bindEvents() { + // bind events + if (options.grid.hoverable) { + eventHolder.mousemove(onMouseMove); + + // Use bind, rather than .mouseleave, because we officially + // still support jQuery 1.2.6, which doesn't define a shortcut + // for mouseenter or mouseleave. This was a bug/oversight that + // was fixed somewhere around 1.3.x. We can return to using + // .mouseleave when we drop support for 1.2.6. + + eventHolder.bind("mouseleave", onMouseLeave); + } + + if (options.grid.clickable) + eventHolder.click(onClick); + + executeHooks(hooks.bindEvents, [eventHolder]); + } + + function shutdown() { + if (redrawTimeout) + clearTimeout(redrawTimeout); + + eventHolder.unbind("mousemove", onMouseMove); + eventHolder.unbind("mouseleave", onMouseLeave); + eventHolder.unbind("click", onClick); + + executeHooks(hooks.shutdown, [eventHolder]); + } + + function setTransformationHelpers(axis) { + // set helper functions on the axis, assumes plot area + // has been computed already + + function identity(x) { return x; } + + var s, m, t = axis.options.transform || identity, + it = axis.options.inverseTransform; + + // precompute how much the axis is scaling a point + // in canvas space + if (axis.direction == "x") { + s = axis.scale = plotWidth / Math.abs(t(axis.max) - t(axis.min)); + m = Math.min(t(axis.max), t(axis.min)); + } + else { + s = axis.scale = plotHeight / Math.abs(t(axis.max) - t(axis.min)); + s = -s; + m = Math.max(t(axis.max), t(axis.min)); + } + + // data point to canvas coordinate + if (t == identity) // slight optimization + axis.p2c = function (p) { return (p - m) * s; }; + else + axis.p2c = function (p) { return (t(p) - m) * s; }; + // canvas coordinate to data point + if (!it) + axis.c2p = function (c) { return m + c / s; }; + else + axis.c2p = function (c) { return it(m + c / s); }; + } + + function measureTickLabels(axis) { + + var opts = axis.options, + ticks = axis.ticks || [], + labelWidth = opts.labelWidth || 0, + labelHeight = opts.labelHeight || 0, + maxWidth = labelWidth || (axis.direction == "x" ? Math.floor(surface.width / (ticks.length || 1)) : null), + legacyStyles = axis.direction + "Axis " + axis.direction + axis.n + "Axis", + layer = "flot-" + axis.direction + "-axis flot-" + axis.direction + axis.n + "-axis " + legacyStyles, + font = opts.font || "flot-tick-label tickLabel"; + + for (var i = 0; i < ticks.length; ++i) { + + var t = ticks[i]; + + if (!t.label) + continue; + + var info = surface.getTextInfo(layer, t.label, font, null, maxWidth); + + labelWidth = Math.max(labelWidth, info.width); + labelHeight = Math.max(labelHeight, info.height); + } + + axis.labelWidth = opts.labelWidth || labelWidth; + axis.labelHeight = opts.labelHeight || labelHeight; + } + + function allocateAxisBoxFirstPhase(axis) { + // find the bounding box of the axis by looking at label + // widths/heights and ticks, make room by diminishing the + // plotOffset; this first phase only looks at one + // dimension per axis, the other dimension depends on the + // other axes so will have to wait + + var lw = axis.labelWidth, + lh = axis.labelHeight, + pos = axis.options.position, + isXAxis = axis.direction === "x", + tickLength = axis.options.tickLength, + axisMargin = options.grid.axisMargin, + padding = options.grid.labelMargin, + innermost = true, + outermost = true, + first = true, + found = false; + + // Determine the axis's position in its direction and on its side + + $.each(isXAxis ? xaxes : yaxes, function(i, a) { + if (a && (a.show || a.reserveSpace)) { + if (a === axis) { + found = true; + } else if (a.options.position === pos) { + if (found) { + outermost = false; + } else { + innermost = false; + } + } + if (!found) { + first = false; + } + } + }); + + // The outermost axis on each side has no margin + + if (outermost) { + axisMargin = 0; + } + + // The ticks for the first axis in each direction stretch across + + if (tickLength == null) { + tickLength = first ? "full" : 5; + } + + if (!isNaN(+tickLength)) + padding += +tickLength; + + if (isXAxis) { + lh += padding; + + if (pos == "bottom") { + plotOffset.bottom += lh + axisMargin; + axis.box = { top: surface.height - plotOffset.bottom, height: lh }; + } + else { + axis.box = { top: plotOffset.top + axisMargin, height: lh }; + plotOffset.top += lh + axisMargin; + } + } + else { + lw += padding; + + if (pos == "left") { + axis.box = { left: plotOffset.left + axisMargin, width: lw }; + plotOffset.left += lw + axisMargin; + } + else { + plotOffset.right += lw + axisMargin; + axis.box = { left: surface.width - plotOffset.right, width: lw }; + } + } + + // save for future reference + axis.position = pos; + axis.tickLength = tickLength; + axis.box.padding = padding; + axis.innermost = innermost; + } + + function allocateAxisBoxSecondPhase(axis) { + // now that all axis boxes have been placed in one + // dimension, we can set the remaining dimension coordinates + if (axis.direction == "x") { + axis.box.left = plotOffset.left - axis.labelWidth / 2; + axis.box.width = surface.width - plotOffset.left - plotOffset.right + axis.labelWidth; + } + else { + axis.box.top = plotOffset.top - axis.labelHeight / 2; + axis.box.height = surface.height - plotOffset.bottom - plotOffset.top + axis.labelHeight; + } + } + + function adjustLayoutForThingsStickingOut() { + // possibly adjust plot offset to ensure everything stays + // inside the canvas and isn't clipped off + + var minMargin = options.grid.minBorderMargin, + axis, i; + + // check stuff from the plot (FIXME: this should just read + // a value from the series, otherwise it's impossible to + // customize) + if (minMargin == null) { + minMargin = 0; + for (i = 0; i < series.length; ++i) + minMargin = Math.max(minMargin, 2 * (series[i].points.radius + series[i].points.lineWidth/2)); + } + + var margins = { + left: minMargin, + right: minMargin, + top: minMargin, + bottom: minMargin + }; + + // check axis labels, note we don't check the actual + // labels but instead use the overall width/height to not + // jump as much around with replots + $.each(allAxes(), function (_, axis) { + if (axis.reserveSpace && axis.ticks && axis.ticks.length) { + if (axis.direction === "x") { + margins.left = Math.max(margins.left, axis.labelWidth / 2); + margins.right = Math.max(margins.right, axis.labelWidth / 2); + } else { + margins.bottom = Math.max(margins.bottom, axis.labelHeight / 2); + margins.top = Math.max(margins.top, axis.labelHeight / 2); + } + } + }); + + plotOffset.left = Math.ceil(Math.max(margins.left, plotOffset.left)); + plotOffset.right = Math.ceil(Math.max(margins.right, plotOffset.right)); + plotOffset.top = Math.ceil(Math.max(margins.top, plotOffset.top)); + plotOffset.bottom = Math.ceil(Math.max(margins.bottom, plotOffset.bottom)); + } + + function setupGrid() { + var i, axes = allAxes(), showGrid = options.grid.show; + + // Initialize the plot's offset from the edge of the canvas + + for (var a in plotOffset) { + var margin = options.grid.margin || 0; + plotOffset[a] = typeof margin == "number" ? margin : margin[a] || 0; + } + + executeHooks(hooks.processOffset, [plotOffset]); + + // If the grid is visible, add its border width to the offset + + for (var a in plotOffset) { + if(typeof(options.grid.borderWidth) == "object") { + plotOffset[a] += showGrid ? options.grid.borderWidth[a] : 0; + } + else { + plotOffset[a] += showGrid ? options.grid.borderWidth : 0; + } + } + + $.each(axes, function (_, axis) { + var axisOpts = axis.options; + axis.show = axisOpts.show == null ? axis.used : axisOpts.show; + axis.reserveSpace = axisOpts.reserveSpace == null ? axis.show : axisOpts.reserveSpace; + setRange(axis); + }); + + if (showGrid) { + + var allocatedAxes = $.grep(axes, function (axis) { + return axis.show || axis.reserveSpace; + }); + + $.each(allocatedAxes, function (_, axis) { + // make the ticks + setupTickGeneration(axis); + setTicks(axis); + snapRangeToTicks(axis, axis.ticks); + // find labelWidth/Height for axis + measureTickLabels(axis); + }); + + // with all dimensions calculated, we can compute the + // axis bounding boxes, start from the outside + // (reverse order) + for (i = allocatedAxes.length - 1; i >= 0; --i) + allocateAxisBoxFirstPhase(allocatedAxes[i]); + + // make sure we've got enough space for things that + // might stick out + adjustLayoutForThingsStickingOut(); + + $.each(allocatedAxes, function (_, axis) { + allocateAxisBoxSecondPhase(axis); + }); + } + + plotWidth = surface.width - plotOffset.left - plotOffset.right; + plotHeight = surface.height - plotOffset.bottom - plotOffset.top; + + // now we got the proper plot dimensions, we can compute the scaling + $.each(axes, function (_, axis) { + setTransformationHelpers(axis); + }); + + if (showGrid) { + drawAxisLabels(); + } + + insertLegend(); + } + + function setRange(axis) { + var opts = axis.options, + min = +(opts.min != null ? opts.min : axis.datamin), + max = +(opts.max != null ? opts.max : axis.datamax), + delta = max - min; + + if (delta == 0.0) { + // degenerate case + var widen = max == 0 ? 1 : 0.01; + + if (opts.min == null) + min -= widen; + // always widen max if we couldn't widen min to ensure we + // don't fall into min == max which doesn't work + if (opts.max == null || opts.min != null) + max += widen; + } + else { + // consider autoscaling + var margin = opts.autoscaleMargin; + if (margin != null) { + if (opts.min == null) { + min -= delta * margin; + // make sure we don't go below zero if all values + // are positive + if (min < 0 && axis.datamin != null && axis.datamin >= 0) + min = 0; + } + if (opts.max == null) { + max += delta * margin; + if (max > 0 && axis.datamax != null && axis.datamax <= 0) + max = 0; + } + } + } + axis.min = min; + axis.max = max; + } + + function setupTickGeneration(axis) { + var opts = axis.options; + + // estimate number of ticks + var noTicks; + if (typeof opts.ticks == "number" && opts.ticks > 0) + noTicks = opts.ticks; + else + // heuristic based on the model a*sqrt(x) fitted to + // some data points that seemed reasonable + noTicks = 0.3 * Math.sqrt(axis.direction == "x" ? surface.width : surface.height); + + var delta = (axis.max - axis.min) / noTicks, + dec = -Math.floor(Math.log(delta) / Math.LN10), + maxDec = opts.tickDecimals; + + if (maxDec != null && dec > maxDec) { + dec = maxDec; + } + + var magn = Math.pow(10, -dec), + norm = delta / magn, // norm is between 1.0 and 10.0 + size; + + if (norm < 1.5) { + size = 1; + } else if (norm < 3) { + size = 2; + // special case for 2.5, requires an extra decimal + if (norm > 2.25 && (maxDec == null || dec + 1 <= maxDec)) { + size = 2.5; + ++dec; + } + } else if (norm < 7.5) { + size = 5; + } else { + size = 10; + } + + size *= magn; + + if (opts.minTickSize != null && size < opts.minTickSize) { + size = opts.minTickSize; + } + + axis.delta = delta; + axis.tickDecimals = Math.max(0, maxDec != null ? maxDec : dec); + axis.tickSize = opts.tickSize || size; + + // Time mode was moved to a plug-in in 0.8, and since so many people use it + // we'll add an especially friendly reminder to make sure they included it. + + if (opts.mode == "time" && !axis.tickGenerator) { + throw new Error("Time mode requires the flot.time plugin."); + } + + // Flot supports base-10 axes; any other mode else is handled by a plug-in, + // like flot.time.js. + + if (!axis.tickGenerator) { + + axis.tickGenerator = function (axis) { + + var ticks = [], + start = floorInBase(axis.min, axis.tickSize), + i = 0, + v = Number.NaN, + prev; + + do { + prev = v; + v = start + i * axis.tickSize; + ticks.push(v); + ++i; + } while (v < axis.max && v != prev); + return ticks; + }; + + axis.tickFormatter = function (value, axis) { + + var factor = axis.tickDecimals ? Math.pow(10, axis.tickDecimals) : 1; + var formatted = "" + Math.round(value * factor) / factor; + + // If tickDecimals was specified, ensure that we have exactly that + // much precision; otherwise default to the value's own precision. + + if (axis.tickDecimals != null) { + var decimal = formatted.indexOf("."); + var precision = decimal == -1 ? 0 : formatted.length - decimal - 1; + if (precision < axis.tickDecimals) { + return (precision ? formatted : formatted + ".") + ("" + factor).substr(1, axis.tickDecimals - precision); + } + } + + return formatted; + }; + } + + if ($.isFunction(opts.tickFormatter)) + axis.tickFormatter = function (v, axis) { return "" + opts.tickFormatter(v, axis); }; + + if (opts.alignTicksWithAxis != null) { + var otherAxis = (axis.direction == "x" ? xaxes : yaxes)[opts.alignTicksWithAxis - 1]; + if (otherAxis && otherAxis.used && otherAxis != axis) { + // consider snapping min/max to outermost nice ticks + var niceTicks = axis.tickGenerator(axis); + if (niceTicks.length > 0) { + if (opts.min == null) + axis.min = Math.min(axis.min, niceTicks[0]); + if (opts.max == null && niceTicks.length > 1) + axis.max = Math.max(axis.max, niceTicks[niceTicks.length - 1]); + } + + axis.tickGenerator = function (axis) { + // copy ticks, scaled to this axis + var ticks = [], v, i; + for (i = 0; i < otherAxis.ticks.length; ++i) { + v = (otherAxis.ticks[i].v - otherAxis.min) / (otherAxis.max - otherAxis.min); + v = axis.min + v * (axis.max - axis.min); + ticks.push(v); + } + return ticks; + }; + + // we might need an extra decimal since forced + // ticks don't necessarily fit naturally + if (!axis.mode && opts.tickDecimals == null) { + var extraDec = Math.max(0, -Math.floor(Math.log(axis.delta) / Math.LN10) + 1), + ts = axis.tickGenerator(axis); + + // only proceed if the tick interval rounded + // with an extra decimal doesn't give us a + // zero at end + if (!(ts.length > 1 && /\..*0$/.test((ts[1] - ts[0]).toFixed(extraDec)))) + axis.tickDecimals = extraDec; + } + } + } + } + + function setTicks(axis) { + var oticks = axis.options.ticks, ticks = []; + if (oticks == null || (typeof oticks == "number" && oticks > 0)) + ticks = axis.tickGenerator(axis); + else if (oticks) { + if ($.isFunction(oticks)) + // generate the ticks + ticks = oticks(axis); + else + ticks = oticks; + } + + // clean up/labelify the supplied ticks, copy them over + var i, v; + axis.ticks = []; + for (i = 0; i < ticks.length; ++i) { + var label = null; + var t = ticks[i]; + if (typeof t == "object") { + v = +t[0]; + if (t.length > 1) + label = t[1]; + } + else + v = +t; + if (label == null) + label = axis.tickFormatter(v, axis); + if (!isNaN(v)) + axis.ticks.push({ v: v, label: label }); + } + } + + function snapRangeToTicks(axis, ticks) { + if (axis.options.autoscaleMargin && ticks.length > 0) { + // snap to ticks + if (axis.options.min == null) + axis.min = Math.min(axis.min, ticks[0].v); + if (axis.options.max == null && ticks.length > 1) + axis.max = Math.max(axis.max, ticks[ticks.length - 1].v); + } + } + + function draw() { + + surface.clear(); + + executeHooks(hooks.drawBackground, [ctx]); + + var grid = options.grid; + + // draw background, if any + if (grid.show && grid.backgroundColor) + drawBackground(); + + if (grid.show && !grid.aboveData) { + drawGrid(); + } + + for (var i = 0; i < series.length; ++i) { + executeHooks(hooks.drawSeries, [ctx, series[i]]); + drawSeries(series[i]); + } + + executeHooks(hooks.draw, [ctx]); + + if (grid.show && grid.aboveData) { + drawGrid(); + } + + surface.render(); + + // A draw implies that either the axes or data have changed, so we + // should probably update the overlay highlights as well. + + triggerRedrawOverlay(); + } + + function extractRange(ranges, coord) { + var axis, from, to, key, axes = allAxes(); + + for (var i = 0; i < axes.length; ++i) { + axis = axes[i]; + if (axis.direction == coord) { + key = coord + axis.n + "axis"; + if (!ranges[key] && axis.n == 1) + key = coord + "axis"; // support x1axis as xaxis + if (ranges[key]) { + from = ranges[key].from; + to = ranges[key].to; + break; + } + } + } + + // backwards-compat stuff - to be removed in future + if (!ranges[key]) { + axis = coord == "x" ? xaxes[0] : yaxes[0]; + from = ranges[coord + "1"]; + to = ranges[coord + "2"]; + } + + // auto-reverse as an added bonus + if (from != null && to != null && from > to) { + var tmp = from; + from = to; + to = tmp; + } + + return { from: from, to: to, axis: axis }; + } + + function drawBackground() { + ctx.save(); + ctx.translate(plotOffset.left, plotOffset.top); + + ctx.fillStyle = getColorOrGradient(options.grid.backgroundColor, plotHeight, 0, "rgba(255, 255, 255, 0)"); + ctx.fillRect(0, 0, plotWidth, plotHeight); + ctx.restore(); + } + + function drawGrid() { + var i, axes, bw, bc; + + ctx.save(); + ctx.translate(plotOffset.left, plotOffset.top); + + // draw markings + var markings = options.grid.markings; + if (markings) { + if ($.isFunction(markings)) { + axes = plot.getAxes(); + // xmin etc. is backwards compatibility, to be + // removed in the future + axes.xmin = axes.xaxis.min; + axes.xmax = axes.xaxis.max; + axes.ymin = axes.yaxis.min; + axes.ymax = axes.yaxis.max; + + markings = markings(axes); + } + + for (i = 0; i < markings.length; ++i) { + var m = markings[i], + xrange = extractRange(m, "x"), + yrange = extractRange(m, "y"); + + // fill in missing + if (xrange.from == null) + xrange.from = xrange.axis.min; + if (xrange.to == null) + xrange.to = xrange.axis.max; + if (yrange.from == null) + yrange.from = yrange.axis.min; + if (yrange.to == null) + yrange.to = yrange.axis.max; + + // clip + if (xrange.to < xrange.axis.min || xrange.from > xrange.axis.max || + yrange.to < yrange.axis.min || yrange.from > yrange.axis.max) + continue; + + xrange.from = Math.max(xrange.from, xrange.axis.min); + xrange.to = Math.min(xrange.to, xrange.axis.max); + yrange.from = Math.max(yrange.from, yrange.axis.min); + yrange.to = Math.min(yrange.to, yrange.axis.max); + + var xequal = xrange.from === xrange.to, + yequal = yrange.from === yrange.to; + + if (xequal && yequal) { + continue; + } + + // then draw + xrange.from = Math.floor(xrange.axis.p2c(xrange.from)); + xrange.to = Math.floor(xrange.axis.p2c(xrange.to)); + yrange.from = Math.floor(yrange.axis.p2c(yrange.from)); + yrange.to = Math.floor(yrange.axis.p2c(yrange.to)); + + if (xequal || yequal) { + var lineWidth = m.lineWidth || options.grid.markingsLineWidth, + subPixel = lineWidth % 2 ? 0.5 : 0; + ctx.beginPath(); + ctx.strokeStyle = m.color || options.grid.markingsColor; + ctx.lineWidth = lineWidth; + if (xequal) { + ctx.moveTo(xrange.to + subPixel, yrange.from); + ctx.lineTo(xrange.to + subPixel, yrange.to); + } else { + ctx.moveTo(xrange.from, yrange.to + subPixel); + ctx.lineTo(xrange.to, yrange.to + subPixel); + } + ctx.stroke(); + } else { + ctx.fillStyle = m.color || options.grid.markingsColor; + ctx.fillRect(xrange.from, yrange.to, + xrange.to - xrange.from, + yrange.from - yrange.to); + } + } + } + + // draw the ticks + axes = allAxes(); + bw = options.grid.borderWidth; + + for (var j = 0; j < axes.length; ++j) { + var axis = axes[j], box = axis.box, + t = axis.tickLength, x, y, xoff, yoff; + if (!axis.show || axis.ticks.length == 0) + continue; + + ctx.lineWidth = 1; + + // find the edges + if (axis.direction == "x") { + x = 0; + if (t == "full") + y = (axis.position == "top" ? 0 : plotHeight); + else + y = box.top - plotOffset.top + (axis.position == "top" ? box.height : 0); + } + else { + y = 0; + if (t == "full") + x = (axis.position == "left" ? 0 : plotWidth); + else + x = box.left - plotOffset.left + (axis.position == "left" ? box.width : 0); + } + + // draw tick bar + if (!axis.innermost) { + ctx.strokeStyle = axis.options.color; + ctx.beginPath(); + xoff = yoff = 0; + if (axis.direction == "x") + xoff = plotWidth + 1; + else + yoff = plotHeight + 1; + + if (ctx.lineWidth == 1) { + if (axis.direction == "x") { + y = Math.floor(y) + 0.5; + } else { + x = Math.floor(x) + 0.5; + } + } + + ctx.moveTo(x, y); + ctx.lineTo(x + xoff, y + yoff); + ctx.stroke(); + } + + // draw ticks + + ctx.strokeStyle = axis.options.tickColor; + + ctx.beginPath(); + for (i = 0; i < axis.ticks.length; ++i) { + var v = axis.ticks[i].v; + + xoff = yoff = 0; + + if (isNaN(v) || v < axis.min || v > axis.max + // skip those lying on the axes if we got a border + || (t == "full" + && ((typeof bw == "object" && bw[axis.position] > 0) || bw > 0) + && (v == axis.min || v == axis.max))) + continue; + + if (axis.direction == "x") { + x = axis.p2c(v); + yoff = t == "full" ? -plotHeight : t; + + if (axis.position == "top") + yoff = -yoff; + } + else { + y = axis.p2c(v); + xoff = t == "full" ? -plotWidth : t; + + if (axis.position == "left") + xoff = -xoff; + } + + if (ctx.lineWidth == 1) { + if (axis.direction == "x") + x = Math.floor(x) + 0.5; + else + y = Math.floor(y) + 0.5; + } + + ctx.moveTo(x, y); + ctx.lineTo(x + xoff, y + yoff); + } + + ctx.stroke(); + } + + + // draw border + if (bw) { + // If either borderWidth or borderColor is an object, then draw the border + // line by line instead of as one rectangle + bc = options.grid.borderColor; + if(typeof bw == "object" || typeof bc == "object") { + if (typeof bw !== "object") { + bw = {top: bw, right: bw, bottom: bw, left: bw}; + } + if (typeof bc !== "object") { + bc = {top: bc, right: bc, bottom: bc, left: bc}; + } + + if (bw.top > 0) { + ctx.strokeStyle = bc.top; + ctx.lineWidth = bw.top; + ctx.beginPath(); + ctx.moveTo(0 - bw.left, 0 - bw.top/2); + ctx.lineTo(plotWidth, 0 - bw.top/2); + ctx.stroke(); + } + + if (bw.right > 0) { + ctx.strokeStyle = bc.right; + ctx.lineWidth = bw.right; + ctx.beginPath(); + ctx.moveTo(plotWidth + bw.right / 2, 0 - bw.top); + ctx.lineTo(plotWidth + bw.right / 2, plotHeight); + ctx.stroke(); + } + + if (bw.bottom > 0) { + ctx.strokeStyle = bc.bottom; + ctx.lineWidth = bw.bottom; + ctx.beginPath(); + ctx.moveTo(plotWidth + bw.right, plotHeight + bw.bottom / 2); + ctx.lineTo(0, plotHeight + bw.bottom / 2); + ctx.stroke(); + } + + if (bw.left > 0) { + ctx.strokeStyle = bc.left; + ctx.lineWidth = bw.left; + ctx.beginPath(); + ctx.moveTo(0 - bw.left/2, plotHeight + bw.bottom); + ctx.lineTo(0- bw.left/2, 0); + ctx.stroke(); + } + } + else { + ctx.lineWidth = bw; + ctx.strokeStyle = options.grid.borderColor; + ctx.strokeRect(-bw/2, -bw/2, plotWidth + bw, plotHeight + bw); + } + } + + ctx.restore(); + } + + function drawAxisLabels() { + + $.each(allAxes(), function (_, axis) { + var box = axis.box, + legacyStyles = axis.direction + "Axis " + axis.direction + axis.n + "Axis", + layer = "flot-" + axis.direction + "-axis flot-" + axis.direction + axis.n + "-axis " + legacyStyles, + font = axis.options.font || "flot-tick-label tickLabel", + tick, x, y, halign, valign; + + // Remove text before checking for axis.show and ticks.length; + // otherwise plugins, like flot-tickrotor, that draw their own + // tick labels will end up with both theirs and the defaults. + + surface.removeText(layer); + + if (!axis.show || axis.ticks.length == 0) + return; + + for (var i = 0; i < axis.ticks.length; ++i) { + + tick = axis.ticks[i]; + if (!tick.label || tick.v < axis.min || tick.v > axis.max) + continue; + + if (axis.direction == "x") { + halign = "center"; + x = plotOffset.left + axis.p2c(tick.v); + if (axis.position == "bottom") { + y = box.top + box.padding; + } else { + y = box.top + box.height - box.padding; + valign = "bottom"; + } + } else { + valign = "middle"; + y = plotOffset.top + axis.p2c(tick.v); + if (axis.position == "left") { + x = box.left + box.width - box.padding; + halign = "right"; + } else { + x = box.left + box.padding; + } + } + + surface.addText(layer, x, y, tick.label, font, null, null, halign, valign); + } + }); + } + + function drawSeries(series) { + if (series.lines.show) + drawSeriesLines(series); + if (series.bars.show) + drawSeriesBars(series); + if (series.points.show) + drawSeriesPoints(series); + } + + function drawSeriesLines(series) { + function plotLine(datapoints, xoffset, yoffset, axisx, axisy) { + var points = datapoints.points, + ps = datapoints.pointsize, + prevx = null, prevy = null; + + ctx.beginPath(); + for (var i = ps; i < points.length; i += ps) { + var x1 = points[i - ps], y1 = points[i - ps + 1], + x2 = points[i], y2 = points[i + 1]; + + if (x1 == null || x2 == null) + continue; + + // clip with ymin + if (y1 <= y2 && y1 < axisy.min) { + if (y2 < axisy.min) + continue; // line segment is outside + // compute new intersection point + x1 = (axisy.min - y1) / (y2 - y1) * (x2 - x1) + x1; + y1 = axisy.min; + } + else if (y2 <= y1 && y2 < axisy.min) { + if (y1 < axisy.min) + continue; + x2 = (axisy.min - y1) / (y2 - y1) * (x2 - x1) + x1; + y2 = axisy.min; + } + + // clip with ymax + if (y1 >= y2 && y1 > axisy.max) { + if (y2 > axisy.max) + continue; + x1 = (axisy.max - y1) / (y2 - y1) * (x2 - x1) + x1; + y1 = axisy.max; + } + else if (y2 >= y1 && y2 > axisy.max) { + if (y1 > axisy.max) + continue; + x2 = (axisy.max - y1) / (y2 - y1) * (x2 - x1) + x1; + y2 = axisy.max; + } + + // clip with xmin + if (x1 <= x2 && x1 < axisx.min) { + if (x2 < axisx.min) + continue; + y1 = (axisx.min - x1) / (x2 - x1) * (y2 - y1) + y1; + x1 = axisx.min; + } + else if (x2 <= x1 && x2 < axisx.min) { + if (x1 < axisx.min) + continue; + y2 = (axisx.min - x1) / (x2 - x1) * (y2 - y1) + y1; + x2 = axisx.min; + } + + // clip with xmax + if (x1 >= x2 && x1 > axisx.max) { + if (x2 > axisx.max) + continue; + y1 = (axisx.max - x1) / (x2 - x1) * (y2 - y1) + y1; + x1 = axisx.max; + } + else if (x2 >= x1 && x2 > axisx.max) { + if (x1 > axisx.max) + continue; + y2 = (axisx.max - x1) / (x2 - x1) * (y2 - y1) + y1; + x2 = axisx.max; + } + + if (x1 != prevx || y1 != prevy) + ctx.moveTo(axisx.p2c(x1) + xoffset, axisy.p2c(y1) + yoffset); + + prevx = x2; + prevy = y2; + ctx.lineTo(axisx.p2c(x2) + xoffset, axisy.p2c(y2) + yoffset); + } + ctx.stroke(); + } + + function plotLineArea(datapoints, axisx, axisy) { + var points = datapoints.points, + ps = datapoints.pointsize, + bottom = Math.min(Math.max(0, axisy.min), axisy.max), + i = 0, top, areaOpen = false, + ypos = 1, segmentStart = 0, segmentEnd = 0; + + // we process each segment in two turns, first forward + // direction to sketch out top, then once we hit the + // end we go backwards to sketch the bottom + while (true) { + if (ps > 0 && i > points.length + ps) + break; + + i += ps; // ps is negative if going backwards + + var x1 = points[i - ps], + y1 = points[i - ps + ypos], + x2 = points[i], y2 = points[i + ypos]; + + if (areaOpen) { + if (ps > 0 && x1 != null && x2 == null) { + // at turning point + segmentEnd = i; + ps = -ps; + ypos = 2; + continue; + } + + if (ps < 0 && i == segmentStart + ps) { + // done with the reverse sweep + ctx.fill(); + areaOpen = false; + ps = -ps; + ypos = 1; + i = segmentStart = segmentEnd + ps; + continue; + } + } + + if (x1 == null || x2 == null) + continue; + + // clip x values + + // clip with xmin + if (x1 <= x2 && x1 < axisx.min) { + if (x2 < axisx.min) + continue; + y1 = (axisx.min - x1) / (x2 - x1) * (y2 - y1) + y1; + x1 = axisx.min; + } + else if (x2 <= x1 && x2 < axisx.min) { + if (x1 < axisx.min) + continue; + y2 = (axisx.min - x1) / (x2 - x1) * (y2 - y1) + y1; + x2 = axisx.min; + } + + // clip with xmax + if (x1 >= x2 && x1 > axisx.max) { + if (x2 > axisx.max) + continue; + y1 = (axisx.max - x1) / (x2 - x1) * (y2 - y1) + y1; + x1 = axisx.max; + } + else if (x2 >= x1 && x2 > axisx.max) { + if (x1 > axisx.max) + continue; + y2 = (axisx.max - x1) / (x2 - x1) * (y2 - y1) + y1; + x2 = axisx.max; + } + + if (!areaOpen) { + // open area + ctx.beginPath(); + ctx.moveTo(axisx.p2c(x1), axisy.p2c(bottom)); + areaOpen = true; + } + + // now first check the case where both is outside + if (y1 >= axisy.max && y2 >= axisy.max) { + ctx.lineTo(axisx.p2c(x1), axisy.p2c(axisy.max)); + ctx.lineTo(axisx.p2c(x2), axisy.p2c(axisy.max)); + continue; + } + else if (y1 <= axisy.min && y2 <= axisy.min) { + ctx.lineTo(axisx.p2c(x1), axisy.p2c(axisy.min)); + ctx.lineTo(axisx.p2c(x2), axisy.p2c(axisy.min)); + continue; + } + + // else it's a bit more complicated, there might + // be a flat maxed out rectangle first, then a + // triangular cutout or reverse; to find these + // keep track of the current x values + var x1old = x1, x2old = x2; + + // clip the y values, without shortcutting, we + // go through all cases in turn + + // clip with ymin + if (y1 <= y2 && y1 < axisy.min && y2 >= axisy.min) { + x1 = (axisy.min - y1) / (y2 - y1) * (x2 - x1) + x1; + y1 = axisy.min; + } + else if (y2 <= y1 && y2 < axisy.min && y1 >= axisy.min) { + x2 = (axisy.min - y1) / (y2 - y1) * (x2 - x1) + x1; + y2 = axisy.min; + } + + // clip with ymax + if (y1 >= y2 && y1 > axisy.max && y2 <= axisy.max) { + x1 = (axisy.max - y1) / (y2 - y1) * (x2 - x1) + x1; + y1 = axisy.max; + } + else if (y2 >= y1 && y2 > axisy.max && y1 <= axisy.max) { + x2 = (axisy.max - y1) / (y2 - y1) * (x2 - x1) + x1; + y2 = axisy.max; + } + + // if the x value was changed we got a rectangle + // to fill + if (x1 != x1old) { + ctx.lineTo(axisx.p2c(x1old), axisy.p2c(y1)); + // it goes to (x1, y1), but we fill that below + } + + // fill triangular section, this sometimes result + // in redundant points if (x1, y1) hasn't changed + // from previous line to, but we just ignore that + ctx.lineTo(axisx.p2c(x1), axisy.p2c(y1)); + ctx.lineTo(axisx.p2c(x2), axisy.p2c(y2)); + + // fill the other rectangle if it's there + if (x2 != x2old) { + ctx.lineTo(axisx.p2c(x2), axisy.p2c(y2)); + ctx.lineTo(axisx.p2c(x2old), axisy.p2c(y2)); + } + } + } + + ctx.save(); + ctx.translate(plotOffset.left, plotOffset.top); + ctx.lineJoin = "round"; + + var lw = series.lines.lineWidth, + sw = series.shadowSize; + // FIXME: consider another form of shadow when filling is turned on + if (lw > 0 && sw > 0) { + // draw shadow as a thick and thin line with transparency + ctx.lineWidth = sw; + ctx.strokeStyle = "rgba(0,0,0,0.1)"; + // position shadow at angle from the mid of line + var angle = Math.PI/18; + plotLine(series.datapoints, Math.sin(angle) * (lw/2 + sw/2), Math.cos(angle) * (lw/2 + sw/2), series.xaxis, series.yaxis); + ctx.lineWidth = sw/2; + plotLine(series.datapoints, Math.sin(angle) * (lw/2 + sw/4), Math.cos(angle) * (lw/2 + sw/4), series.xaxis, series.yaxis); + } + + ctx.lineWidth = lw; + ctx.strokeStyle = series.color; + var fillStyle = getFillStyle(series.lines, series.color, 0, plotHeight); + if (fillStyle) { + ctx.fillStyle = fillStyle; + plotLineArea(series.datapoints, series.xaxis, series.yaxis); + } + + if (lw > 0) + plotLine(series.datapoints, 0, 0, series.xaxis, series.yaxis); + ctx.restore(); + } + + function drawSeriesPoints(series) { + function plotPoints(datapoints, radius, fillStyle, offset, shadow, axisx, axisy, symbol) { + var points = datapoints.points, ps = datapoints.pointsize; + + for (var i = 0; i < points.length; i += ps) { + var x = points[i], y = points[i + 1]; + if (x == null || x < axisx.min || x > axisx.max || y < axisy.min || y > axisy.max) + continue; + + ctx.beginPath(); + x = axisx.p2c(x); + y = axisy.p2c(y) + offset; + if (symbol == "circle") + ctx.arc(x, y, radius, 0, shadow ? Math.PI : Math.PI * 2, false); + else + symbol(ctx, x, y, radius, shadow); + ctx.closePath(); + + if (fillStyle) { + ctx.fillStyle = fillStyle; + ctx.fill(); + } + ctx.stroke(); + } + } + + ctx.save(); + ctx.translate(plotOffset.left, plotOffset.top); + + var lw = series.points.lineWidth, + sw = series.shadowSize, + radius = series.points.radius, + symbol = series.points.symbol; + + // If the user sets the line width to 0, we change it to a very + // small value. A line width of 0 seems to force the default of 1. + // Doing the conditional here allows the shadow setting to still be + // optional even with a lineWidth of 0. + + if( lw == 0 ) + lw = 0.0001; + + if (lw > 0 && sw > 0) { + // draw shadow in two steps + var w = sw / 2; + ctx.lineWidth = w; + ctx.strokeStyle = "rgba(0,0,0,0.1)"; + plotPoints(series.datapoints, radius, null, w + w/2, true, + series.xaxis, series.yaxis, symbol); + + ctx.strokeStyle = "rgba(0,0,0,0.2)"; + plotPoints(series.datapoints, radius, null, w/2, true, + series.xaxis, series.yaxis, symbol); + } + + ctx.lineWidth = lw; + ctx.strokeStyle = series.color; + plotPoints(series.datapoints, radius, + getFillStyle(series.points, series.color), 0, false, + series.xaxis, series.yaxis, symbol); + ctx.restore(); + } + + function drawBar(x, y, b, barLeft, barRight, fillStyleCallback, axisx, axisy, c, horizontal, lineWidth) { + var left, right, bottom, top, + drawLeft, drawRight, drawTop, drawBottom, + tmp; + + // in horizontal mode, we start the bar from the left + // instead of from the bottom so it appears to be + // horizontal rather than vertical + if (horizontal) { + drawBottom = drawRight = drawTop = true; + drawLeft = false; + left = b; + right = x; + top = y + barLeft; + bottom = y + barRight; + + // account for negative bars + if (right < left) { + tmp = right; + right = left; + left = tmp; + drawLeft = true; + drawRight = false; + } + } + else { + drawLeft = drawRight = drawTop = true; + drawBottom = false; + left = x + barLeft; + right = x + barRight; + bottom = b; + top = y; + + // account for negative bars + if (top < bottom) { + tmp = top; + top = bottom; + bottom = tmp; + drawBottom = true; + drawTop = false; + } + } + + // clip + if (right < axisx.min || left > axisx.max || + top < axisy.min || bottom > axisy.max) + return; + + if (left < axisx.min) { + left = axisx.min; + drawLeft = false; + } + + if (right > axisx.max) { + right = axisx.max; + drawRight = false; + } + + if (bottom < axisy.min) { + bottom = axisy.min; + drawBottom = false; + } + + if (top > axisy.max) { + top = axisy.max; + drawTop = false; + } + + left = axisx.p2c(left); + bottom = axisy.p2c(bottom); + right = axisx.p2c(right); + top = axisy.p2c(top); + + // fill the bar + if (fillStyleCallback) { + c.fillStyle = fillStyleCallback(bottom, top); + c.fillRect(left, top, right - left, bottom - top) + } + + // draw outline + if (lineWidth > 0 && (drawLeft || drawRight || drawTop || drawBottom)) { + c.beginPath(); + + // FIXME: inline moveTo is buggy with excanvas + c.moveTo(left, bottom); + if (drawLeft) + c.lineTo(left, top); + else + c.moveTo(left, top); + if (drawTop) + c.lineTo(right, top); + else + c.moveTo(right, top); + if (drawRight) + c.lineTo(right, bottom); + else + c.moveTo(right, bottom); + if (drawBottom) + c.lineTo(left, bottom); + else + c.moveTo(left, bottom); + c.stroke(); + } + } + + function drawSeriesBars(series) { + function plotBars(datapoints, barLeft, barRight, fillStyleCallback, axisx, axisy) { + var points = datapoints.points, ps = datapoints.pointsize; + + for (var i = 0; i < points.length; i += ps) { + if (points[i] == null) + continue; + drawBar(points[i], points[i + 1], points[i + 2], barLeft, barRight, fillStyleCallback, axisx, axisy, ctx, series.bars.horizontal, series.bars.lineWidth); + } + } + + ctx.save(); + ctx.translate(plotOffset.left, plotOffset.top); + + // FIXME: figure out a way to add shadows (for instance along the right edge) + ctx.lineWidth = series.bars.lineWidth; + ctx.strokeStyle = series.color; + + var barLeft; + + switch (series.bars.align) { + case "left": + barLeft = 0; + break; + case "right": + barLeft = -series.bars.barWidth; + break; + default: + barLeft = -series.bars.barWidth / 2; + } + + var fillStyleCallback = series.bars.fill ? function (bottom, top) { return getFillStyle(series.bars, series.color, bottom, top); } : null; + plotBars(series.datapoints, barLeft, barLeft + series.bars.barWidth, fillStyleCallback, series.xaxis, series.yaxis); + ctx.restore(); + } + + function getFillStyle(filloptions, seriesColor, bottom, top) { + var fill = filloptions.fill; + if (!fill) + return null; + + if (filloptions.fillColor) + return getColorOrGradient(filloptions.fillColor, bottom, top, seriesColor); + + var c = $.color.parse(seriesColor); + c.a = typeof fill == "number" ? fill : 0.4; + c.normalize(); + return c.toString(); + } + + function insertLegend() { + + if (options.legend.container != null) { + $(options.legend.container).html(""); + } else { + placeholder.find(".legend").remove(); + } + + if (!options.legend.show) { + return; + } + + var fragments = [], entries = [], rowStarted = false, + lf = options.legend.labelFormatter, s, label; + + // Build a list of legend entries, with each having a label and a color + + for (var i = 0; i < series.length; ++i) { + s = series[i]; + if (s.label) { + label = lf ? lf(s.label, s) : s.label; + if (label) { + entries.push({ + label: label, + color: s.color + }); + } + } + } + + // Sort the legend using either the default or a custom comparator + + if (options.legend.sorted) { + if ($.isFunction(options.legend.sorted)) { + entries.sort(options.legend.sorted); + } else if (options.legend.sorted == "reverse") { + entries.reverse(); + } else { + var ascending = options.legend.sorted != "descending"; + entries.sort(function(a, b) { + return a.label == b.label ? 0 : ( + (a.label < b.label) != ascending ? 1 : -1 // Logical XOR + ); + }); + } + } + + // Generate markup for the list of entries, in their final order + + for (var i = 0; i < entries.length; ++i) { + + var entry = entries[i]; + + if (i % options.legend.noColumns == 0) { + if (rowStarted) + fragments.push(''); + fragments.push(''); + rowStarted = true; + } + + fragments.push( + '
' + + '' + entry.label + '' + ); + } + + if (rowStarted) + fragments.push(''); + + if (fragments.length == 0) + return; + + var table = '' + fragments.join("") + '
'; + if (options.legend.container != null) + $(options.legend.container).html(table); + else { + var pos = "", + p = options.legend.position, + m = options.legend.margin; + if (m[0] == null) + m = [m, m]; + if (p.charAt(0) == "n") + pos += 'top:' + (m[1] + plotOffset.top) + 'px;'; + else if (p.charAt(0) == "s") + pos += 'bottom:' + (m[1] + plotOffset.bottom) + 'px;'; + if (p.charAt(1) == "e") + pos += 'right:' + (m[0] + plotOffset.right) + 'px;'; + else if (p.charAt(1) == "w") + pos += 'left:' + (m[0] + plotOffset.left) + 'px;'; + var legend = $('
' + table.replace('style="', 'style="position:absolute;' + pos +';') + '
').appendTo(placeholder); + if (options.legend.backgroundOpacity != 0.0) { + // put in the transparent background + // separately to avoid blended labels and + // label boxes + var c = options.legend.backgroundColor; + if (c == null) { + c = options.grid.backgroundColor; + if (c && typeof c == "string") + c = $.color.parse(c); + else + c = $.color.extract(legend, 'background-color'); + c.a = 1; + c = c.toString(); + } + var div = legend.children(); + $('
').prependTo(legend).css('opacity', options.legend.backgroundOpacity); + } + } + } + + + // interactive features + + var highlights = [], + redrawTimeout = null; + + // returns the data item the mouse is over, or null if none is found + function findNearbyItem(mouseX, mouseY, seriesFilter) { + var maxDistance = options.grid.mouseActiveRadius, + smallestDistance = maxDistance * maxDistance + 1, + item = null, foundPoint = false, i, j, ps; + + for (i = series.length - 1; i >= 0; --i) { + if (!seriesFilter(series[i])) + continue; + + var s = series[i], + axisx = s.xaxis, + axisy = s.yaxis, + points = s.datapoints.points, + mx = axisx.c2p(mouseX), // precompute some stuff to make the loop faster + my = axisy.c2p(mouseY), + maxx = maxDistance / axisx.scale, + maxy = maxDistance / axisy.scale; + + ps = s.datapoints.pointsize; + // with inverse transforms, we can't use the maxx/maxy + // optimization, sadly + if (axisx.options.inverseTransform) + maxx = Number.MAX_VALUE; + if (axisy.options.inverseTransform) + maxy = Number.MAX_VALUE; + + if (s.lines.show || s.points.show) { + for (j = 0; j < points.length; j += ps) { + var x = points[j], y = points[j + 1]; + if (x == null) + continue; + + // For points and lines, the cursor must be within a + // certain distance to the data point + if (x - mx > maxx || x - mx < -maxx || + y - my > maxy || y - my < -maxy) + continue; + + // We have to calculate distances in pixels, not in + // data units, because the scales of the axes may be different + var dx = Math.abs(axisx.p2c(x) - mouseX), + dy = Math.abs(axisy.p2c(y) - mouseY), + dist = dx * dx + dy * dy; // we save the sqrt + + // use <= to ensure last point takes precedence + // (last generally means on top of) + if (dist < smallestDistance) { + smallestDistance = dist; + item = [i, j / ps]; + } + } + } + + if (s.bars.show && !item) { // no other point can be nearby + + var barLeft, barRight; + + switch (s.bars.align) { + case "left": + barLeft = 0; + break; + case "right": + barLeft = -s.bars.barWidth; + break; + default: + barLeft = -s.bars.barWidth / 2; + } + + barRight = barLeft + s.bars.barWidth; + + for (j = 0; j < points.length; j += ps) { + var x = points[j], y = points[j + 1], b = points[j + 2]; + if (x == null) + continue; + + // for a bar graph, the cursor must be inside the bar + if (series[i].bars.horizontal ? + (mx <= Math.max(b, x) && mx >= Math.min(b, x) && + my >= y + barLeft && my <= y + barRight) : + (mx >= x + barLeft && mx <= x + barRight && + my >= Math.min(b, y) && my <= Math.max(b, y))) + item = [i, j / ps]; + } + } + } + + if (item) { + i = item[0]; + j = item[1]; + ps = series[i].datapoints.pointsize; + + return { datapoint: series[i].datapoints.points.slice(j * ps, (j + 1) * ps), + dataIndex: j, + series: series[i], + seriesIndex: i }; + } + + return null; + } + + function onMouseMove(e) { + if (options.grid.hoverable) + triggerClickHoverEvent("plothover", e, + function (s) { return s["hoverable"] != false; }); + } + + function onMouseLeave(e) { + if (options.grid.hoverable) + triggerClickHoverEvent("plothover", e, + function (s) { return false; }); + } + + function onClick(e) { + triggerClickHoverEvent("plotclick", e, + function (s) { return s["clickable"] != false; }); + } + + // trigger click or hover event (they send the same parameters + // so we share their code) + function triggerClickHoverEvent(eventname, event, seriesFilter) { + var offset = eventHolder.offset(), + canvasX = event.pageX - offset.left - plotOffset.left, + canvasY = event.pageY - offset.top - plotOffset.top, + pos = canvasToAxisCoords({ left: canvasX, top: canvasY }); + + pos.pageX = event.pageX; + pos.pageY = event.pageY; + + var item = findNearbyItem(canvasX, canvasY, seriesFilter); + + if (item) { + // fill in mouse pos for any listeners out there + item.pageX = parseInt(item.series.xaxis.p2c(item.datapoint[0]) + offset.left + plotOffset.left, 10); + item.pageY = parseInt(item.series.yaxis.p2c(item.datapoint[1]) + offset.top + plotOffset.top, 10); + } + + if (options.grid.autoHighlight) { + // clear auto-highlights + for (var i = 0; i < highlights.length; ++i) { + var h = highlights[i]; + if (h.auto == eventname && + !(item && h.series == item.series && + h.point[0] == item.datapoint[0] && + h.point[1] == item.datapoint[1])) + unhighlight(h.series, h.point); + } + + if (item) + highlight(item.series, item.datapoint, eventname); + } + + placeholder.trigger(eventname, [ pos, item ]); + } + + function triggerRedrawOverlay() { + var t = options.interaction.redrawOverlayInterval; + if (t == -1) { // skip event queue + drawOverlay(); + return; + } + + if (!redrawTimeout) + redrawTimeout = setTimeout(drawOverlay, t); + } + + function drawOverlay() { + redrawTimeout = null; + + // draw highlights + octx.save(); + overlay.clear(); + octx.translate(plotOffset.left, plotOffset.top); + + var i, hi; + for (i = 0; i < highlights.length; ++i) { + hi = highlights[i]; + + if (hi.series.bars.show) + drawBarHighlight(hi.series, hi.point); + else + drawPointHighlight(hi.series, hi.point); + } + octx.restore(); + + executeHooks(hooks.drawOverlay, [octx]); + } + + function highlight(s, point, auto) { + if (typeof s == "number") + s = series[s]; + + if (typeof point == "number") { + var ps = s.datapoints.pointsize; + point = s.datapoints.points.slice(ps * point, ps * (point + 1)); + } + + var i = indexOfHighlight(s, point); + if (i == -1) { + highlights.push({ series: s, point: point, auto: auto }); + + triggerRedrawOverlay(); + } + else if (!auto) + highlights[i].auto = false; + } + + function unhighlight(s, point) { + if (s == null && point == null) { + highlights = []; + triggerRedrawOverlay(); + return; + } + + if (typeof s == "number") + s = series[s]; + + if (typeof point == "number") { + var ps = s.datapoints.pointsize; + point = s.datapoints.points.slice(ps * point, ps * (point + 1)); + } + + var i = indexOfHighlight(s, point); + if (i != -1) { + highlights.splice(i, 1); + + triggerRedrawOverlay(); + } + } + + function indexOfHighlight(s, p) { + for (var i = 0; i < highlights.length; ++i) { + var h = highlights[i]; + if (h.series == s && h.point[0] == p[0] + && h.point[1] == p[1]) + return i; + } + return -1; + } + + function drawPointHighlight(series, point) { + var x = point[0], y = point[1], + axisx = series.xaxis, axisy = series.yaxis, + highlightColor = (typeof series.highlightColor === "string") ? series.highlightColor : $.color.parse(series.color).scale('a', 0.5).toString(); + + if (x < axisx.min || x > axisx.max || y < axisy.min || y > axisy.max) + return; + + var pointRadius = series.points.radius + series.points.lineWidth / 2; + octx.lineWidth = pointRadius; + octx.strokeStyle = highlightColor; + var radius = 1.5 * pointRadius; + x = axisx.p2c(x); + y = axisy.p2c(y); + + octx.beginPath(); + if (series.points.symbol == "circle") + octx.arc(x, y, radius, 0, 2 * Math.PI, false); + else + series.points.symbol(octx, x, y, radius, false); + octx.closePath(); + octx.stroke(); + } + + function drawBarHighlight(series, point) { + var highlightColor = (typeof series.highlightColor === "string") ? series.highlightColor : $.color.parse(series.color).scale('a', 0.5).toString(), + fillStyle = highlightColor, + barLeft; + + switch (series.bars.align) { + case "left": + barLeft = 0; + break; + case "right": + barLeft = -series.bars.barWidth; + break; + default: + barLeft = -series.bars.barWidth / 2; + } + + octx.lineWidth = series.bars.lineWidth; + octx.strokeStyle = highlightColor; + + drawBar(point[0], point[1], point[2] || 0, barLeft, barLeft + series.bars.barWidth, + function () { return fillStyle; }, series.xaxis, series.yaxis, octx, series.bars.horizontal, series.bars.lineWidth); + } + + function getColorOrGradient(spec, bottom, top, defaultColor) { + if (typeof spec == "string") + return spec; + else { + // assume this is a gradient spec; IE currently only + // supports a simple vertical gradient properly, so that's + // what we support too + var gradient = ctx.createLinearGradient(0, top, 0, bottom); + + for (var i = 0, l = spec.colors.length; i < l; ++i) { + var c = spec.colors[i]; + if (typeof c != "string") { + var co = $.color.parse(defaultColor); + if (c.brightness != null) + co = co.scale('rgb', c.brightness); + if (c.opacity != null) + co.a *= c.opacity; + c = co.toString(); + } + gradient.addColorStop(i / (l - 1), c); + } + + return gradient; + } + } + } + + // Add the plot function to the top level of the jQuery object + + $.plot = function(placeholder, data, options) { + //var t0 = new Date(); + var plot = new Plot($(placeholder), data, options, $.plot.plugins); + //(window.console ? console.log : alert)("time used (msecs): " + ((new Date()).getTime() - t0.getTime())); + return plot; + }; + + $.plot.version = "0.8.3"; + + $.plot.plugins = []; + + // Also add the plot function as a chainable property + + $.fn.plot = function(data, options) { + return this.each(function() { + $.plot(this, data, options); + }); + }; + + // round to nearby lower multiple of base + function floorInBase(n, base) { + return base * Math.floor(n / base); + } + +})(jQuery); diff --git a/qa/workunits/erasure-code/jquery.js b/qa/workunits/erasure-code/jquery.js new file mode 100644 index 00000000..8c24ffc6 --- /dev/null +++ b/qa/workunits/erasure-code/jquery.js @@ -0,0 +1,9472 @@ +/*! + * jQuery JavaScript Library v1.8.3 + * http://jquery.com/ + * + * Includes Sizzle.js + * http://sizzlejs.com/ + * + * Copyright 2012 jQuery Foundation and other contributors + * Released under the MIT license + * http://jquery.org/license + * + * Date: Tue Nov 13 2012 08:20:33 GMT-0500 (Eastern Standard Time) + */ +(function( window, undefined ) { +var + // A central reference to the root jQuery(document) + rootjQuery, + + // The deferred used on DOM ready + readyList, + + // Use the correct document accordingly with window argument (sandbox) + document = window.document, + location = window.location, + navigator = window.navigator, + + // Map over jQuery in case of overwrite + _jQuery = window.jQuery, + + // Map over the $ in case of overwrite + _$ = window.$, + + // Save a reference to some core methods + core_push = Array.prototype.push, + core_slice = Array.prototype.slice, + core_indexOf = Array.prototype.indexOf, + core_toString = Object.prototype.toString, + core_hasOwn = Object.prototype.hasOwnProperty, + core_trim = String.prototype.trim, + + // Define a local copy of jQuery + jQuery = function( selector, context ) { + // The jQuery object is actually just the init constructor 'enhanced' + return new jQuery.fn.init( selector, context, rootjQuery ); + }, + + // Used for matching numbers + core_pnum = /[\-+]?(?:\d*\.|)\d+(?:[eE][\-+]?\d+|)/.source, + + // Used for detecting and trimming whitespace + core_rnotwhite = /\S/, + core_rspace = /\s+/, + + // Make sure we trim BOM and NBSP (here's looking at you, Safari 5.0 and IE) + rtrim = /^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g, + + // A simple way to check for HTML strings + // Prioritize #id over to avoid XSS via location.hash (#9521) + rquickExpr = /^(?:[^#<]*(<[\w\W]+>)[^>]*$|#([\w\-]*)$)/, + + // Match a standalone tag + rsingleTag = /^<(\w+)\s*\/?>(?:<\/\1>|)$/, + + // JSON RegExp + rvalidchars = /^[\],:{}\s]*$/, + rvalidbraces = /(?:^|:|,)(?:\s*\[)+/g, + rvalidescape = /\\(?:["\\\/bfnrt]|u[\da-fA-F]{4})/g, + rvalidtokens = /"[^"\\\r\n]*"|true|false|null|-?(?:\d\d*\.|)\d+(?:[eE][\-+]?\d+|)/g, + + // Matches dashed string for camelizing + rmsPrefix = /^-ms-/, + rdashAlpha = /-([\da-z])/gi, + + // Used by jQuery.camelCase as callback to replace() + fcamelCase = function( all, letter ) { + return ( letter + "" ).toUpperCase(); + }, + + // The ready event handler and self cleanup method + DOMContentLoaded = function() { + if ( document.addEventListener ) { + document.removeEventListener( "DOMContentLoaded", DOMContentLoaded, false ); + jQuery.ready(); + } else if ( document.readyState === "complete" ) { + // we're here because readyState === "complete" in oldIE + // which is good enough for us to call the dom ready! + document.detachEvent( "onreadystatechange", DOMContentLoaded ); + jQuery.ready(); + } + }, + + // [[Class]] -> type pairs + class2type = {}; + +jQuery.fn = jQuery.prototype = { + constructor: jQuery, + init: function( selector, context, rootjQuery ) { + var match, elem, ret, doc; + + // Handle $(""), $(null), $(undefined), $(false) + if ( !selector ) { + return this; + } + + // Handle $(DOMElement) + if ( selector.nodeType ) { + this.context = this[0] = selector; + this.length = 1; + return this; + } + + // Handle HTML strings + if ( typeof selector === "string" ) { + if ( selector.charAt(0) === "<" && selector.charAt( selector.length - 1 ) === ">" && selector.length >= 3 ) { + // Assume that strings that start and end with <> are HTML and skip the regex check + match = [ null, selector, null ]; + + } else { + match = rquickExpr.exec( selector ); + } + + // Match html or make sure no context is specified for #id + if ( match && (match[1] || !context) ) { + + // HANDLE: $(html) -> $(array) + if ( match[1] ) { + context = context instanceof jQuery ? context[0] : context; + doc = ( context && context.nodeType ? context.ownerDocument || context : document ); + + // scripts is true for back-compat + selector = jQuery.parseHTML( match[1], doc, true ); + if ( rsingleTag.test( match[1] ) && jQuery.isPlainObject( context ) ) { + this.attr.call( selector, context, true ); + } + + return jQuery.merge( this, selector ); + + // HANDLE: $(#id) + } else { + elem = document.getElementById( match[2] ); + + // Check parentNode to catch when Blackberry 4.6 returns + // nodes that are no longer in the document #6963 + if ( elem && elem.parentNode ) { + // Handle the case where IE and Opera return items + // by name instead of ID + if ( elem.id !== match[2] ) { + return rootjQuery.find( selector ); + } + + // Otherwise, we inject the element directly into the jQuery object + this.length = 1; + this[0] = elem; + } + + this.context = document; + this.selector = selector; + return this; + } + + // HANDLE: $(expr, $(...)) + } else if ( !context || context.jquery ) { + return ( context || rootjQuery ).find( selector ); + + // HANDLE: $(expr, context) + // (which is just equivalent to: $(context).find(expr) + } else { + return this.constructor( context ).find( selector ); + } + + // HANDLE: $(function) + // Shortcut for document ready + } else if ( jQuery.isFunction( selector ) ) { + return rootjQuery.ready( selector ); + } + + if ( selector.selector !== undefined ) { + this.selector = selector.selector; + this.context = selector.context; + } + + return jQuery.makeArray( selector, this ); + }, + + // Start with an empty selector + selector: "", + + // The current version of jQuery being used + jquery: "1.8.3", + + // The default length of a jQuery object is 0 + length: 0, + + // The number of elements contained in the matched element set + size: function() { + return this.length; + }, + + toArray: function() { + return core_slice.call( this ); + }, + + // Get the Nth element in the matched element set OR + // Get the whole matched element set as a clean array + get: function( num ) { + return num == null ? + + // Return a 'clean' array + this.toArray() : + + // Return just the object + ( num < 0 ? this[ this.length + num ] : this[ num ] ); + }, + + // Take an array of elements and push it onto the stack + // (returning the new matched element set) + pushStack: function( elems, name, selector ) { + + // Build a new jQuery matched element set + var ret = jQuery.merge( this.constructor(), elems ); + + // Add the old object onto the stack (as a reference) + ret.prevObject = this; + + ret.context = this.context; + + if ( name === "find" ) { + ret.selector = this.selector + ( this.selector ? " " : "" ) + selector; + } else if ( name ) { + ret.selector = this.selector + "." + name + "(" + selector + ")"; + } + + // Return the newly-formed element set + return ret; + }, + + // Execute a callback for every element in the matched set. + // (You can seed the arguments with an array of args, but this is + // only used internally.) + each: function( callback, args ) { + return jQuery.each( this, callback, args ); + }, + + ready: function( fn ) { + // Add the callback + jQuery.ready.promise().done( fn ); + + return this; + }, + + eq: function( i ) { + i = +i; + return i === -1 ? + this.slice( i ) : + this.slice( i, i + 1 ); + }, + + first: function() { + return this.eq( 0 ); + }, + + last: function() { + return this.eq( -1 ); + }, + + slice: function() { + return this.pushStack( core_slice.apply( this, arguments ), + "slice", core_slice.call(arguments).join(",") ); + }, + + map: function( callback ) { + return this.pushStack( jQuery.map(this, function( elem, i ) { + return callback.call( elem, i, elem ); + })); + }, + + end: function() { + return this.prevObject || this.constructor(null); + }, + + // For internal use only. + // Behaves like an Array's method, not like a jQuery method. + push: core_push, + sort: [].sort, + splice: [].splice +}; + +// Give the init function the jQuery prototype for later instantiation +jQuery.fn.init.prototype = jQuery.fn; + +jQuery.extend = jQuery.fn.extend = function() { + var options, name, src, copy, copyIsArray, clone, + target = arguments[0] || {}, + i = 1, + length = arguments.length, + deep = false; + + // Handle a deep copy situation + if ( typeof target === "boolean" ) { + deep = target; + target = arguments[1] || {}; + // skip the boolean and the target + i = 2; + } + + // Handle case when target is a string or something (possible in deep copy) + if ( typeof target !== "object" && !jQuery.isFunction(target) ) { + target = {}; + } + + // extend jQuery itself if only one argument is passed + if ( length === i ) { + target = this; + --i; + } + + for ( ; i < length; i++ ) { + // Only deal with non-null/undefined values + if ( (options = arguments[ i ]) != null ) { + // Extend the base object + for ( name in options ) { + src = target[ name ]; + copy = options[ name ]; + + // Prevent never-ending loop + if ( target === copy ) { + continue; + } + + // Recurse if we're merging plain objects or arrays + if ( deep && copy && ( jQuery.isPlainObject(copy) || (copyIsArray = jQuery.isArray(copy)) ) ) { + if ( copyIsArray ) { + copyIsArray = false; + clone = src && jQuery.isArray(src) ? src : []; + + } else { + clone = src && jQuery.isPlainObject(src) ? src : {}; + } + + // Never move original objects, clone them + target[ name ] = jQuery.extend( deep, clone, copy ); + + // Don't bring in undefined values + } else if ( copy !== undefined ) { + target[ name ] = copy; + } + } + } + } + + // Return the modified object + return target; +}; + +jQuery.extend({ + noConflict: function( deep ) { + if ( window.$ === jQuery ) { + window.$ = _$; + } + + if ( deep && window.jQuery === jQuery ) { + window.jQuery = _jQuery; + } + + return jQuery; + }, + + // Is the DOM ready to be used? Set to true once it occurs. + isReady: false, + + // A counter to track how many items to wait for before + // the ready event fires. See #6781 + readyWait: 1, + + // Hold (or release) the ready event + holdReady: function( hold ) { + if ( hold ) { + jQuery.readyWait++; + } else { + jQuery.ready( true ); + } + }, + + // Handle when the DOM is ready + ready: function( wait ) { + + // Abort if there are pending holds or we're already ready + if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) { + return; + } + + // Make sure body exists, at least, in case IE gets a little overzealous (ticket #5443). + if ( !document.body ) { + return setTimeout( jQuery.ready, 1 ); + } + + // Remember that the DOM is ready + jQuery.isReady = true; + + // If a normal DOM Ready event fired, decrement, and wait if need be + if ( wait !== true && --jQuery.readyWait > 0 ) { + return; + } + + // If there are functions bound, to execute + readyList.resolveWith( document, [ jQuery ] ); + + // Trigger any bound ready events + if ( jQuery.fn.trigger ) { + jQuery( document ).trigger("ready").off("ready"); + } + }, + + // See test/unit/core.js for details concerning isFunction. + // Since version 1.3, DOM methods and functions like alert + // aren't supported. They return false on IE (#2968). + isFunction: function( obj ) { + return jQuery.type(obj) === "function"; + }, + + isArray: Array.isArray || function( obj ) { + return jQuery.type(obj) === "array"; + }, + + isWindow: function( obj ) { + return obj != null && obj == obj.window; + }, + + isNumeric: function( obj ) { + return !isNaN( parseFloat(obj) ) && isFinite( obj ); + }, + + type: function( obj ) { + return obj == null ? + String( obj ) : + class2type[ core_toString.call(obj) ] || "object"; + }, + + isPlainObject: function( obj ) { + // Must be an Object. + // Because of IE, we also have to check the presence of the constructor property. + // Make sure that DOM nodes and window objects don't pass through, as well + if ( !obj || jQuery.type(obj) !== "object" || obj.nodeType || jQuery.isWindow( obj ) ) { + return false; + } + + try { + // Not own constructor property must be Object + if ( obj.constructor && + !core_hasOwn.call(obj, "constructor") && + !core_hasOwn.call(obj.constructor.prototype, "isPrototypeOf") ) { + return false; + } + } catch ( e ) { + // IE8,9 Will throw exceptions on certain host objects #9897 + return false; + } + + // Own properties are enumerated firstly, so to speed up, + // if last one is own, then all properties are own. + + var key; + for ( key in obj ) {} + + return key === undefined || core_hasOwn.call( obj, key ); + }, + + isEmptyObject: function( obj ) { + var name; + for ( name in obj ) { + return false; + } + return true; + }, + + error: function( msg ) { + throw new Error( msg ); + }, + + // data: string of html + // context (optional): If specified, the fragment will be created in this context, defaults to document + // scripts (optional): If true, will include scripts passed in the html string + parseHTML: function( data, context, scripts ) { + var parsed; + if ( !data || typeof data !== "string" ) { + return null; + } + if ( typeof context === "boolean" ) { + scripts = context; + context = 0; + } + context = context || document; + + // Single tag + if ( (parsed = rsingleTag.exec( data )) ) { + return [ context.createElement( parsed[1] ) ]; + } + + parsed = jQuery.buildFragment( [ data ], context, scripts ? null : [] ); + return jQuery.merge( [], + (parsed.cacheable ? jQuery.clone( parsed.fragment ) : parsed.fragment).childNodes ); + }, + + parseJSON: function( data ) { + if ( !data || typeof data !== "string") { + return null; + } + + // Make sure leading/trailing whitespace is removed (IE can't handle it) + data = jQuery.trim( data ); + + // Attempt to parse using the native JSON parser first + if ( window.JSON && window.JSON.parse ) { + return window.JSON.parse( data ); + } + + // Make sure the incoming data is actual JSON + // Logic borrowed from http://json.org/json2.js + if ( rvalidchars.test( data.replace( rvalidescape, "@" ) + .replace( rvalidtokens, "]" ) + .replace( rvalidbraces, "")) ) { + + return ( new Function( "return " + data ) )(); + + } + jQuery.error( "Invalid JSON: " + data ); + }, + + // Cross-browser xml parsing + parseXML: function( data ) { + var xml, tmp; + if ( !data || typeof data !== "string" ) { + return null; + } + try { + if ( window.DOMParser ) { // Standard + tmp = new DOMParser(); + xml = tmp.parseFromString( data , "text/xml" ); + } else { // IE + xml = new ActiveXObject( "Microsoft.XMLDOM" ); + xml.async = "false"; + xml.loadXML( data ); + } + } catch( e ) { + xml = undefined; + } + if ( !xml || !xml.documentElement || xml.getElementsByTagName( "parsererror" ).length ) { + jQuery.error( "Invalid XML: " + data ); + } + return xml; + }, + + noop: function() {}, + + // Evaluates a script in a global context + // Workarounds based on findings by Jim Driscoll + // http://weblogs.java.net/blog/driscoll/archive/2009/09/08/eval-javascript-global-context + globalEval: function( data ) { + if ( data && core_rnotwhite.test( data ) ) { + // We use execScript on Internet Explorer + // We use an anonymous function so that context is window + // rather than jQuery in Firefox + ( window.execScript || function( data ) { + window[ "eval" ].call( window, data ); + } )( data ); + } + }, + + // Convert dashed to camelCase; used by the css and data modules + // Microsoft forgot to hump their vendor prefix (#9572) + camelCase: function( string ) { + return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); + }, + + nodeName: function( elem, name ) { + return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); + }, + + // args is for internal usage only + each: function( obj, callback, args ) { + var name, + i = 0, + length = obj.length, + isObj = length === undefined || jQuery.isFunction( obj ); + + if ( args ) { + if ( isObj ) { + for ( name in obj ) { + if ( callback.apply( obj[ name ], args ) === false ) { + break; + } + } + } else { + for ( ; i < length; ) { + if ( callback.apply( obj[ i++ ], args ) === false ) { + break; + } + } + } + + // A special, fast, case for the most common use of each + } else { + if ( isObj ) { + for ( name in obj ) { + if ( callback.call( obj[ name ], name, obj[ name ] ) === false ) { + break; + } + } + } else { + for ( ; i < length; ) { + if ( callback.call( obj[ i ], i, obj[ i++ ] ) === false ) { + break; + } + } + } + } + + return obj; + }, + + // Use native String.trim function wherever possible + trim: core_trim && !core_trim.call("\uFEFF\xA0") ? + function( text ) { + return text == null ? + "" : + core_trim.call( text ); + } : + + // Otherwise use our own trimming functionality + function( text ) { + return text == null ? + "" : + ( text + "" ).replace( rtrim, "" ); + }, + + // results is for internal usage only + makeArray: function( arr, results ) { + var type, + ret = results || []; + + if ( arr != null ) { + // The window, strings (and functions) also have 'length' + // Tweaked logic slightly to handle Blackberry 4.7 RegExp issues #6930 + type = jQuery.type( arr ); + + if ( arr.length == null || type === "string" || type === "function" || type === "regexp" || jQuery.isWindow( arr ) ) { + core_push.call( ret, arr ); + } else { + jQuery.merge( ret, arr ); + } + } + + return ret; + }, + + inArray: function( elem, arr, i ) { + var len; + + if ( arr ) { + if ( core_indexOf ) { + return core_indexOf.call( arr, elem, i ); + } + + len = arr.length; + i = i ? i < 0 ? Math.max( 0, len + i ) : i : 0; + + for ( ; i < len; i++ ) { + // Skip accessing in sparse arrays + if ( i in arr && arr[ i ] === elem ) { + return i; + } + } + } + + return -1; + }, + + merge: function( first, second ) { + var l = second.length, + i = first.length, + j = 0; + + if ( typeof l === "number" ) { + for ( ; j < l; j++ ) { + first[ i++ ] = second[ j ]; + } + + } else { + while ( second[j] !== undefined ) { + first[ i++ ] = second[ j++ ]; + } + } + + first.length = i; + + return first; + }, + + grep: function( elems, callback, inv ) { + var retVal, + ret = [], + i = 0, + length = elems.length; + inv = !!inv; + + // Go through the array, only saving the items + // that pass the validator function + for ( ; i < length; i++ ) { + retVal = !!callback( elems[ i ], i ); + if ( inv !== retVal ) { + ret.push( elems[ i ] ); + } + } + + return ret; + }, + + // arg is for internal usage only + map: function( elems, callback, arg ) { + var value, key, + ret = [], + i = 0, + length = elems.length, + // jquery objects are treated as arrays + isArray = elems instanceof jQuery || length !== undefined && typeof length === "number" && ( ( length > 0 && elems[ 0 ] && elems[ length -1 ] ) || length === 0 || jQuery.isArray( elems ) ) ; + + // Go through the array, translating each of the items to their + if ( isArray ) { + for ( ; i < length; i++ ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret[ ret.length ] = value; + } + } + + // Go through every key on the object, + } else { + for ( key in elems ) { + value = callback( elems[ key ], key, arg ); + + if ( value != null ) { + ret[ ret.length ] = value; + } + } + } + + // Flatten any nested arrays + return ret.concat.apply( [], ret ); + }, + + // A global GUID counter for objects + guid: 1, + + // Bind a function to a context, optionally partially applying any + // arguments. + proxy: function( fn, context ) { + var tmp, args, proxy; + + if ( typeof context === "string" ) { + tmp = fn[ context ]; + context = fn; + fn = tmp; + } + + // Quick check to determine if target is callable, in the spec + // this throws a TypeError, but we will just return undefined. + if ( !jQuery.isFunction( fn ) ) { + return undefined; + } + + // Simulated bind + args = core_slice.call( arguments, 2 ); + proxy = function() { + return fn.apply( context, args.concat( core_slice.call( arguments ) ) ); + }; + + // Set the guid of unique handler to the same of original handler, so it can be removed + proxy.guid = fn.guid = fn.guid || jQuery.guid++; + + return proxy; + }, + + // Multifunctional method to get and set values of a collection + // The value/s can optionally be executed if it's a function + access: function( elems, fn, key, value, chainable, emptyGet, pass ) { + var exec, + bulk = key == null, + i = 0, + length = elems.length; + + // Sets many values + if ( key && typeof key === "object" ) { + for ( i in key ) { + jQuery.access( elems, fn, i, key[i], 1, emptyGet, value ); + } + chainable = 1; + + // Sets one value + } else if ( value !== undefined ) { + // Optionally, function values get executed if exec is true + exec = pass === undefined && jQuery.isFunction( value ); + + if ( bulk ) { + // Bulk operations only iterate when executing function values + if ( exec ) { + exec = fn; + fn = function( elem, key, value ) { + return exec.call( jQuery( elem ), value ); + }; + + // Otherwise they run against the entire set + } else { + fn.call( elems, value ); + fn = null; + } + } + + if ( fn ) { + for (; i < length; i++ ) { + fn( elems[i], key, exec ? value.call( elems[i], i, fn( elems[i], key ) ) : value, pass ); + } + } + + chainable = 1; + } + + return chainable ? + elems : + + // Gets + bulk ? + fn.call( elems ) : + length ? fn( elems[0], key ) : emptyGet; + }, + + now: function() { + return ( new Date() ).getTime(); + } +}); + +jQuery.ready.promise = function( obj ) { + if ( !readyList ) { + + readyList = jQuery.Deferred(); + + // Catch cases where $(document).ready() is called after the browser event has already occurred. + // we once tried to use readyState "interactive" here, but it caused issues like the one + // discovered by ChrisS here: http://bugs.jquery.com/ticket/12282#comment:15 + if ( document.readyState === "complete" ) { + // Handle it asynchronously to allow scripts the opportunity to delay ready + setTimeout( jQuery.ready, 1 ); + + // Standards-based browsers support DOMContentLoaded + } else if ( document.addEventListener ) { + // Use the handy event callback + document.addEventListener( "DOMContentLoaded", DOMContentLoaded, false ); + + // A fallback to window.onload, that will always work + window.addEventListener( "load", jQuery.ready, false ); + + // If IE event model is used + } else { + // Ensure firing before onload, maybe late but safe also for iframes + document.attachEvent( "onreadystatechange", DOMContentLoaded ); + + // A fallback to window.onload, that will always work + window.attachEvent( "onload", jQuery.ready ); + + // If IE and not a frame + // continually check to see if the document is ready + var top = false; + + try { + top = window.frameElement == null && document.documentElement; + } catch(e) {} + + if ( top && top.doScroll ) { + (function doScrollCheck() { + if ( !jQuery.isReady ) { + + try { + // Use the trick by Diego Perini + // http://javascript.nwbox.com/IEContentLoaded/ + top.doScroll("left"); + } catch(e) { + return setTimeout( doScrollCheck, 50 ); + } + + // and execute any waiting functions + jQuery.ready(); + } + })(); + } + } + } + return readyList.promise( obj ); +}; + +// Populate the class2type map +jQuery.each("Boolean Number String Function Array Date RegExp Object".split(" "), function(i, name) { + class2type[ "[object " + name + "]" ] = name.toLowerCase(); +}); + +// All jQuery objects should point back to these +rootjQuery = jQuery(document); +// String to Object options format cache +var optionsCache = {}; + +// Convert String-formatted options into Object-formatted ones and store in cache +function createOptions( options ) { + var object = optionsCache[ options ] = {}; + jQuery.each( options.split( core_rspace ), function( _, flag ) { + object[ flag ] = true; + }); + return object; +} + +/* + * Create a callback list using the following parameters: + * + * options: an optional list of space-separated options that will change how + * the callback list behaves or a more traditional option object + * + * By default a callback list will act like an event callback list and can be + * "fired" multiple times. + * + * Possible options: + * + * once: will ensure the callback list can only be fired once (like a Deferred) + * + * memory: will keep track of previous values and will call any callback added + * after the list has been fired right away with the latest "memorized" + * values (like a Deferred) + * + * unique: will ensure a callback can only be added once (no duplicate in the list) + * + * stopOnFalse: interrupt callings when a callback returns false + * + */ +jQuery.Callbacks = function( options ) { + + // Convert options from String-formatted to Object-formatted if needed + // (we check in cache first) + options = typeof options === "string" ? + ( optionsCache[ options ] || createOptions( options ) ) : + jQuery.extend( {}, options ); + + var // Last fire value (for non-forgettable lists) + memory, + // Flag to know if list was already fired + fired, + // Flag to know if list is currently firing + firing, + // First callback to fire (used internally by add and fireWith) + firingStart, + // End of the loop when firing + firingLength, + // Index of currently firing callback (modified by remove if needed) + firingIndex, + // Actual callback list + list = [], + // Stack of fire calls for repeatable lists + stack = !options.once && [], + // Fire callbacks + fire = function( data ) { + memory = options.memory && data; + fired = true; + firingIndex = firingStart || 0; + firingStart = 0; + firingLength = list.length; + firing = true; + for ( ; list && firingIndex < firingLength; firingIndex++ ) { + if ( list[ firingIndex ].apply( data[ 0 ], data[ 1 ] ) === false && options.stopOnFalse ) { + memory = false; // To prevent further calls using add + break; + } + } + firing = false; + if ( list ) { + if ( stack ) { + if ( stack.length ) { + fire( stack.shift() ); + } + } else if ( memory ) { + list = []; + } else { + self.disable(); + } + } + }, + // Actual Callbacks object + self = { + // Add a callback or a collection of callbacks to the list + add: function() { + if ( list ) { + // First, we save the current length + var start = list.length; + (function add( args ) { + jQuery.each( args, function( _, arg ) { + var type = jQuery.type( arg ); + if ( type === "function" ) { + if ( !options.unique || !self.has( arg ) ) { + list.push( arg ); + } + } else if ( arg && arg.length && type !== "string" ) { + // Inspect recursively + add( arg ); + } + }); + })( arguments ); + // Do we need to add the callbacks to the + // current firing batch? + if ( firing ) { + firingLength = list.length; + // With memory, if we're not firing then + // we should call right away + } else if ( memory ) { + firingStart = start; + fire( memory ); + } + } + return this; + }, + // Remove a callback from the list + remove: function() { + if ( list ) { + jQuery.each( arguments, function( _, arg ) { + var index; + while( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) { + list.splice( index, 1 ); + // Handle firing indexes + if ( firing ) { + if ( index <= firingLength ) { + firingLength--; + } + if ( index <= firingIndex ) { + firingIndex--; + } + } + } + }); + } + return this; + }, + // Control if a given callback is in the list + has: function( fn ) { + return jQuery.inArray( fn, list ) > -1; + }, + // Remove all callbacks from the list + empty: function() { + list = []; + return this; + }, + // Have the list do nothing anymore + disable: function() { + list = stack = memory = undefined; + return this; + }, + // Is it disabled? + disabled: function() { + return !list; + }, + // Lock the list in its current state + lock: function() { + stack = undefined; + if ( !memory ) { + self.disable(); + } + return this; + }, + // Is it locked? + locked: function() { + return !stack; + }, + // Call all callbacks with the given context and arguments + fireWith: function( context, args ) { + args = args || []; + args = [ context, args.slice ? args.slice() : args ]; + if ( list && ( !fired || stack ) ) { + if ( firing ) { + stack.push( args ); + } else { + fire( args ); + } + } + return this; + }, + // Call all the callbacks with the given arguments + fire: function() { + self.fireWith( this, arguments ); + return this; + }, + // To know if the callbacks have already been called at least once + fired: function() { + return !!fired; + } + }; + + return self; +}; +jQuery.extend({ + + Deferred: function( func ) { + var tuples = [ + // action, add listener, listener list, final state + [ "resolve", "done", jQuery.Callbacks("once memory"), "resolved" ], + [ "reject", "fail", jQuery.Callbacks("once memory"), "rejected" ], + [ "notify", "progress", jQuery.Callbacks("memory") ] + ], + state = "pending", + promise = { + state: function() { + return state; + }, + always: function() { + deferred.done( arguments ).fail( arguments ); + return this; + }, + then: function( /* fnDone, fnFail, fnProgress */ ) { + var fns = arguments; + return jQuery.Deferred(function( newDefer ) { + jQuery.each( tuples, function( i, tuple ) { + var action = tuple[ 0 ], + fn = fns[ i ]; + // deferred[ done | fail | progress ] for forwarding actions to newDefer + deferred[ tuple[1] ]( jQuery.isFunction( fn ) ? + function() { + var returned = fn.apply( this, arguments ); + if ( returned && jQuery.isFunction( returned.promise ) ) { + returned.promise() + .done( newDefer.resolve ) + .fail( newDefer.reject ) + .progress( newDefer.notify ); + } else { + newDefer[ action + "With" ]( this === deferred ? newDefer : this, [ returned ] ); + } + } : + newDefer[ action ] + ); + }); + fns = null; + }).promise(); + }, + // Get a promise for this deferred + // If obj is provided, the promise aspect is added to the object + promise: function( obj ) { + return obj != null ? jQuery.extend( obj, promise ) : promise; + } + }, + deferred = {}; + + // Keep pipe for back-compat + promise.pipe = promise.then; + + // Add list-specific methods + jQuery.each( tuples, function( i, tuple ) { + var list = tuple[ 2 ], + stateString = tuple[ 3 ]; + + // promise[ done | fail | progress ] = list.add + promise[ tuple[1] ] = list.add; + + // Handle state + if ( stateString ) { + list.add(function() { + // state = [ resolved | rejected ] + state = stateString; + + // [ reject_list | resolve_list ].disable; progress_list.lock + }, tuples[ i ^ 1 ][ 2 ].disable, tuples[ 2 ][ 2 ].lock ); + } + + // deferred[ resolve | reject | notify ] = list.fire + deferred[ tuple[0] ] = list.fire; + deferred[ tuple[0] + "With" ] = list.fireWith; + }); + + // Make the deferred a promise + promise.promise( deferred ); + + // Call given func if any + if ( func ) { + func.call( deferred, deferred ); + } + + // All done! + return deferred; + }, + + // Deferred helper + when: function( subordinate /* , ..., subordinateN */ ) { + var i = 0, + resolveValues = core_slice.call( arguments ), + length = resolveValues.length, + + // the count of uncompleted subordinates + remaining = length !== 1 || ( subordinate && jQuery.isFunction( subordinate.promise ) ) ? length : 0, + + // the master Deferred. If resolveValues consist of only a single Deferred, just use that. + deferred = remaining === 1 ? subordinate : jQuery.Deferred(), + + // Update function for both resolve and progress values + updateFunc = function( i, contexts, values ) { + return function( value ) { + contexts[ i ] = this; + values[ i ] = arguments.length > 1 ? core_slice.call( arguments ) : value; + if( values === progressValues ) { + deferred.notifyWith( contexts, values ); + } else if ( !( --remaining ) ) { + deferred.resolveWith( contexts, values ); + } + }; + }, + + progressValues, progressContexts, resolveContexts; + + // add listeners to Deferred subordinates; treat others as resolved + if ( length > 1 ) { + progressValues = new Array( length ); + progressContexts = new Array( length ); + resolveContexts = new Array( length ); + for ( ; i < length; i++ ) { + if ( resolveValues[ i ] && jQuery.isFunction( resolveValues[ i ].promise ) ) { + resolveValues[ i ].promise() + .done( updateFunc( i, resolveContexts, resolveValues ) ) + .fail( deferred.reject ) + .progress( updateFunc( i, progressContexts, progressValues ) ); + } else { + --remaining; + } + } + } + + // if we're not waiting on anything, resolve the master + if ( !remaining ) { + deferred.resolveWith( resolveContexts, resolveValues ); + } + + return deferred.promise(); + } +}); +jQuery.support = (function() { + + var support, + all, + a, + select, + opt, + input, + fragment, + eventName, + i, + isSupported, + clickFn, + div = document.createElement("div"); + + // Setup + div.setAttribute( "className", "t" ); + div.innerHTML = "
a"; + + // Support tests won't run in some limited or non-browser environments + all = div.getElementsByTagName("*"); + a = div.getElementsByTagName("a")[ 0 ]; + if ( !all || !a || !all.length ) { + return {}; + } + + // First batch of tests + select = document.createElement("select"); + opt = select.appendChild( document.createElement("option") ); + input = div.getElementsByTagName("input")[ 0 ]; + + a.style.cssText = "top:1px;float:left;opacity:.5"; + support = { + // IE strips leading whitespace when .innerHTML is used + leadingWhitespace: ( div.firstChild.nodeType === 3 ), + + // Make sure that tbody elements aren't automatically inserted + // IE will insert them into empty tables + tbody: !div.getElementsByTagName("tbody").length, + + // Make sure that link elements get serialized correctly by innerHTML + // This requires a wrapper element in IE + htmlSerialize: !!div.getElementsByTagName("link").length, + + // Get the style information from getAttribute + // (IE uses .cssText instead) + style: /top/.test( a.getAttribute("style") ), + + // Make sure that URLs aren't manipulated + // (IE normalizes it by default) + hrefNormalized: ( a.getAttribute("href") === "/a" ), + + // Make sure that element opacity exists + // (IE uses filter instead) + // Use a regex to work around a WebKit issue. See #5145 + opacity: /^0.5/.test( a.style.opacity ), + + // Verify style float existence + // (IE uses styleFloat instead of cssFloat) + cssFloat: !!a.style.cssFloat, + + // Make sure that if no value is specified for a checkbox + // that it defaults to "on". + // (WebKit defaults to "" instead) + checkOn: ( input.value === "on" ), + + // Make sure that a selected-by-default option has a working selected property. + // (WebKit defaults to false instead of true, IE too, if it's in an optgroup) + optSelected: opt.selected, + + // Test setAttribute on camelCase class. If it works, we need attrFixes when doing get/setAttribute (ie6/7) + getSetAttribute: div.className !== "t", + + // Tests for enctype support on a form (#6743) + enctype: !!document.createElement("form").enctype, + + // Makes sure cloning an html5 element does not cause problems + // Where outerHTML is undefined, this still works + html5Clone: document.createElement("nav").cloneNode( true ).outerHTML !== "<:nav>", + + // jQuery.support.boxModel DEPRECATED in 1.8 since we don't support Quirks Mode + boxModel: ( document.compatMode === "CSS1Compat" ), + + // Will be defined later + submitBubbles: true, + changeBubbles: true, + focusinBubbles: false, + deleteExpando: true, + noCloneEvent: true, + inlineBlockNeedsLayout: false, + shrinkWrapBlocks: false, + reliableMarginRight: true, + boxSizingReliable: true, + pixelPosition: false + }; + + // Make sure checked status is properly cloned + input.checked = true; + support.noCloneChecked = input.cloneNode( true ).checked; + + // Make sure that the options inside disabled selects aren't marked as disabled + // (WebKit marks them as disabled) + select.disabled = true; + support.optDisabled = !opt.disabled; + + // Test to see if it's possible to delete an expando from an element + // Fails in Internet Explorer + try { + delete div.test; + } catch( e ) { + support.deleteExpando = false; + } + + if ( !div.addEventListener && div.attachEvent && div.fireEvent ) { + div.attachEvent( "onclick", clickFn = function() { + // Cloning a node shouldn't copy over any + // bound event handlers (IE does this) + support.noCloneEvent = false; + }); + div.cloneNode( true ).fireEvent("onclick"); + div.detachEvent( "onclick", clickFn ); + } + + // Check if a radio maintains its value + // after being appended to the DOM + input = document.createElement("input"); + input.value = "t"; + input.setAttribute( "type", "radio" ); + support.radioValue = input.value === "t"; + + input.setAttribute( "checked", "checked" ); + + // #11217 - WebKit loses check when the name is after the checked attribute + input.setAttribute( "name", "t" ); + + div.appendChild( input ); + fragment = document.createDocumentFragment(); + fragment.appendChild( div.lastChild ); + + // WebKit doesn't clone checked state correctly in fragments + support.checkClone = fragment.cloneNode( true ).cloneNode( true ).lastChild.checked; + + // Check if a disconnected checkbox will retain its checked + // value of true after appended to the DOM (IE6/7) + support.appendChecked = input.checked; + + fragment.removeChild( input ); + fragment.appendChild( div ); + + // Technique from Juriy Zaytsev + // http://perfectionkills.com/detecting-event-support-without-browser-sniffing/ + // We only care about the case where non-standard event systems + // are used, namely in IE. Short-circuiting here helps us to + // avoid an eval call (in setAttribute) which can cause CSP + // to go haywire. See: https://developer.mozilla.org/en/Security/CSP + if ( div.attachEvent ) { + for ( i in { + submit: true, + change: true, + focusin: true + }) { + eventName = "on" + i; + isSupported = ( eventName in div ); + if ( !isSupported ) { + div.setAttribute( eventName, "return;" ); + isSupported = ( typeof div[ eventName ] === "function" ); + } + support[ i + "Bubbles" ] = isSupported; + } + } + + // Run tests that need a body at doc ready + jQuery(function() { + var container, div, tds, marginDiv, + divReset = "padding:0;margin:0;border:0;display:block;overflow:hidden;", + body = document.getElementsByTagName("body")[0]; + + if ( !body ) { + // Return for frameset docs that don't have a body + return; + } + + container = document.createElement("div"); + container.style.cssText = "visibility:hidden;border:0;width:0;height:0;position:static;top:0;margin-top:1px"; + body.insertBefore( container, body.firstChild ); + + // Construct the test element + div = document.createElement("div"); + container.appendChild( div ); + + // Check if table cells still have offsetWidth/Height when they are set + // to display:none and there are still other visible table cells in a + // table row; if so, offsetWidth/Height are not reliable for use when + // determining if an element has been hidden directly using + // display:none (it is still safe to use offsets if a parent element is + // hidden; don safety goggles and see bug #4512 for more information). + // (only IE 8 fails this test) + div.innerHTML = "
t
"; + tds = div.getElementsByTagName("td"); + tds[ 0 ].style.cssText = "padding:0;margin:0;border:0;display:none"; + isSupported = ( tds[ 0 ].offsetHeight === 0 ); + + tds[ 0 ].style.display = ""; + tds[ 1 ].style.display = "none"; + + // Check if empty table cells still have offsetWidth/Height + // (IE <= 8 fail this test) + support.reliableHiddenOffsets = isSupported && ( tds[ 0 ].offsetHeight === 0 ); + + // Check box-sizing and margin behavior + div.innerHTML = ""; + div.style.cssText = "box-sizing:border-box;-moz-box-sizing:border-box;-webkit-box-sizing:border-box;padding:1px;border:1px;display:block;width:4px;margin-top:1%;position:absolute;top:1%;"; + support.boxSizing = ( div.offsetWidth === 4 ); + support.doesNotIncludeMarginInBodyOffset = ( body.offsetTop !== 1 ); + + // NOTE: To any future maintainer, we've window.getComputedStyle + // because jsdom on node.js will break without it. + if ( window.getComputedStyle ) { + support.pixelPosition = ( window.getComputedStyle( div, null ) || {} ).top !== "1%"; + support.boxSizingReliable = ( window.getComputedStyle( div, null ) || { width: "4px" } ).width === "4px"; + + // Check if div with explicit width and no margin-right incorrectly + // gets computed margin-right based on width of container. For more + // info see bug #3333 + // Fails in WebKit before Feb 2011 nightlies + // WebKit Bug 13343 - getComputedStyle returns wrong value for margin-right + marginDiv = document.createElement("div"); + marginDiv.style.cssText = div.style.cssText = divReset; + marginDiv.style.marginRight = marginDiv.style.width = "0"; + div.style.width = "1px"; + div.appendChild( marginDiv ); + support.reliableMarginRight = + !parseFloat( ( window.getComputedStyle( marginDiv, null ) || {} ).marginRight ); + } + + if ( typeof div.style.zoom !== "undefined" ) { + // Check if natively block-level elements act like inline-block + // elements when setting their display to 'inline' and giving + // them layout + // (IE < 8 does this) + div.innerHTML = ""; + div.style.cssText = divReset + "width:1px;padding:1px;display:inline;zoom:1"; + support.inlineBlockNeedsLayout = ( div.offsetWidth === 3 ); + + // Check if elements with layout shrink-wrap their children + // (IE 6 does this) + div.style.display = "block"; + div.style.overflow = "visible"; + div.innerHTML = "
"; + div.firstChild.style.width = "5px"; + support.shrinkWrapBlocks = ( div.offsetWidth !== 3 ); + + container.style.zoom = 1; + } + + // Null elements to avoid leaks in IE + body.removeChild( container ); + container = div = tds = marginDiv = null; + }); + + // Null elements to avoid leaks in IE + fragment.removeChild( div ); + all = a = select = opt = input = fragment = div = null; + + return support; +})(); +var rbrace = /(?:\{[\s\S]*\}|\[[\s\S]*\])$/, + rmultiDash = /([A-Z])/g; + +jQuery.extend({ + cache: {}, + + deletedIds: [], + + // Remove at next major release (1.9/2.0) + uuid: 0, + + // Unique for each copy of jQuery on the page + // Non-digits removed to match rinlinejQuery + expando: "jQuery" + ( jQuery.fn.jquery + Math.random() ).replace( /\D/g, "" ), + + // The following elements throw uncatchable exceptions if you + // attempt to add expando properties to them. + noData: { + "embed": true, + // Ban all objects except for Flash (which handle expandos) + "object": "clsid:D27CDB6E-AE6D-11cf-96B8-444553540000", + "applet": true + }, + + hasData: function( elem ) { + elem = elem.nodeType ? jQuery.cache[ elem[jQuery.expando] ] : elem[ jQuery.expando ]; + return !!elem && !isEmptyDataObject( elem ); + }, + + data: function( elem, name, data, pvt /* Internal Use Only */ ) { + if ( !jQuery.acceptData( elem ) ) { + return; + } + + var thisCache, ret, + internalKey = jQuery.expando, + getByName = typeof name === "string", + + // We have to handle DOM nodes and JS objects differently because IE6-7 + // can't GC object references properly across the DOM-JS boundary + isNode = elem.nodeType, + + // Only DOM nodes need the global jQuery cache; JS object data is + // attached directly to the object so GC can occur automatically + cache = isNode ? jQuery.cache : elem, + + // Only defining an ID for JS objects if its cache already exists allows + // the code to shortcut on the same path as a DOM node with no cache + id = isNode ? elem[ internalKey ] : elem[ internalKey ] && internalKey; + + // Avoid doing any more work than we need to when trying to get data on an + // object that has no data at all + if ( (!id || !cache[id] || (!pvt && !cache[id].data)) && getByName && data === undefined ) { + return; + } + + if ( !id ) { + // Only DOM nodes need a new unique ID for each element since their data + // ends up in the global cache + if ( isNode ) { + elem[ internalKey ] = id = jQuery.deletedIds.pop() || jQuery.guid++; + } else { + id = internalKey; + } + } + + if ( !cache[ id ] ) { + cache[ id ] = {}; + + // Avoids exposing jQuery metadata on plain JS objects when the object + // is serialized using JSON.stringify + if ( !isNode ) { + cache[ id ].toJSON = jQuery.noop; + } + } + + // An object can be passed to jQuery.data instead of a key/value pair; this gets + // shallow copied over onto the existing cache + if ( typeof name === "object" || typeof name === "function" ) { + if ( pvt ) { + cache[ id ] = jQuery.extend( cache[ id ], name ); + } else { + cache[ id ].data = jQuery.extend( cache[ id ].data, name ); + } + } + + thisCache = cache[ id ]; + + // jQuery data() is stored in a separate object inside the object's internal data + // cache in order to avoid key collisions between internal data and user-defined + // data. + if ( !pvt ) { + if ( !thisCache.data ) { + thisCache.data = {}; + } + + thisCache = thisCache.data; + } + + if ( data !== undefined ) { + thisCache[ jQuery.camelCase( name ) ] = data; + } + + // Check for both converted-to-camel and non-converted data property names + // If a data property was specified + if ( getByName ) { + + // First Try to find as-is property data + ret = thisCache[ name ]; + + // Test for null|undefined property data + if ( ret == null ) { + + // Try to find the camelCased property + ret = thisCache[ jQuery.camelCase( name ) ]; + } + } else { + ret = thisCache; + } + + return ret; + }, + + removeData: function( elem, name, pvt /* Internal Use Only */ ) { + if ( !jQuery.acceptData( elem ) ) { + return; + } + + var thisCache, i, l, + + isNode = elem.nodeType, + + // See jQuery.data for more information + cache = isNode ? jQuery.cache : elem, + id = isNode ? elem[ jQuery.expando ] : jQuery.expando; + + // If there is already no cache entry for this object, there is no + // purpose in continuing + if ( !cache[ id ] ) { + return; + } + + if ( name ) { + + thisCache = pvt ? cache[ id ] : cache[ id ].data; + + if ( thisCache ) { + + // Support array or space separated string names for data keys + if ( !jQuery.isArray( name ) ) { + + // try the string as a key before any manipulation + if ( name in thisCache ) { + name = [ name ]; + } else { + + // split the camel cased version by spaces unless a key with the spaces exists + name = jQuery.camelCase( name ); + if ( name in thisCache ) { + name = [ name ]; + } else { + name = name.split(" "); + } + } + } + + for ( i = 0, l = name.length; i < l; i++ ) { + delete thisCache[ name[i] ]; + } + + // If there is no data left in the cache, we want to continue + // and let the cache object itself get destroyed + if ( !( pvt ? isEmptyDataObject : jQuery.isEmptyObject )( thisCache ) ) { + return; + } + } + } + + // See jQuery.data for more information + if ( !pvt ) { + delete cache[ id ].data; + + // Don't destroy the parent cache unless the internal data object + // had been the only thing left in it + if ( !isEmptyDataObject( cache[ id ] ) ) { + return; + } + } + + // Destroy the cache + if ( isNode ) { + jQuery.cleanData( [ elem ], true ); + + // Use delete when supported for expandos or `cache` is not a window per isWindow (#10080) + } else if ( jQuery.support.deleteExpando || cache != cache.window ) { + delete cache[ id ]; + + // When all else fails, null + } else { + cache[ id ] = null; + } + }, + + // For internal use only. + _data: function( elem, name, data ) { + return jQuery.data( elem, name, data, true ); + }, + + // A method for determining if a DOM node can handle the data expando + acceptData: function( elem ) { + var noData = elem.nodeName && jQuery.noData[ elem.nodeName.toLowerCase() ]; + + // nodes accept data unless otherwise specified; rejection can be conditional + return !noData || noData !== true && elem.getAttribute("classid") === noData; + } +}); + +jQuery.fn.extend({ + data: function( key, value ) { + var parts, part, attr, name, l, + elem = this[0], + i = 0, + data = null; + + // Gets all values + if ( key === undefined ) { + if ( this.length ) { + data = jQuery.data( elem ); + + if ( elem.nodeType === 1 && !jQuery._data( elem, "parsedAttrs" ) ) { + attr = elem.attributes; + for ( l = attr.length; i < l; i++ ) { + name = attr[i].name; + + if ( !name.indexOf( "data-" ) ) { + name = jQuery.camelCase( name.substring(5) ); + + dataAttr( elem, name, data[ name ] ); + } + } + jQuery._data( elem, "parsedAttrs", true ); + } + } + + return data; + } + + // Sets multiple values + if ( typeof key === "object" ) { + return this.each(function() { + jQuery.data( this, key ); + }); + } + + parts = key.split( ".", 2 ); + parts[1] = parts[1] ? "." + parts[1] : ""; + part = parts[1] + "!"; + + return jQuery.access( this, function( value ) { + + if ( value === undefined ) { + data = this.triggerHandler( "getData" + part, [ parts[0] ] ); + + // Try to fetch any internally stored data first + if ( data === undefined && elem ) { + data = jQuery.data( elem, key ); + data = dataAttr( elem, key, data ); + } + + return data === undefined && parts[1] ? + this.data( parts[0] ) : + data; + } + + parts[1] = value; + this.each(function() { + var self = jQuery( this ); + + self.triggerHandler( "setData" + part, parts ); + jQuery.data( this, key, value ); + self.triggerHandler( "changeData" + part, parts ); + }); + }, null, value, arguments.length > 1, null, false ); + }, + + removeData: function( key ) { + return this.each(function() { + jQuery.removeData( this, key ); + }); + } +}); + +function dataAttr( elem, key, data ) { + // If nothing was found internally, try to fetch any + // data from the HTML5 data-* attribute + if ( data === undefined && elem.nodeType === 1 ) { + + var name = "data-" + key.replace( rmultiDash, "-$1" ).toLowerCase(); + + data = elem.getAttribute( name ); + + if ( typeof data === "string" ) { + try { + data = data === "true" ? true : + data === "false" ? false : + data === "null" ? null : + // Only convert to a number if it doesn't change the string + +data + "" === data ? +data : + rbrace.test( data ) ? jQuery.parseJSON( data ) : + data; + } catch( e ) {} + + // Make sure we set the data so it isn't changed later + jQuery.data( elem, key, data ); + + } else { + data = undefined; + } + } + + return data; +} + +// checks a cache object for emptiness +function isEmptyDataObject( obj ) { + var name; + for ( name in obj ) { + + // if the public data object is empty, the private is still empty + if ( name === "data" && jQuery.isEmptyObject( obj[name] ) ) { + continue; + } + if ( name !== "toJSON" ) { + return false; + } + } + + return true; +} +jQuery.extend({ + queue: function( elem, type, data ) { + var queue; + + if ( elem ) { + type = ( type || "fx" ) + "queue"; + queue = jQuery._data( elem, type ); + + // Speed up dequeue by getting out quickly if this is just a lookup + if ( data ) { + if ( !queue || jQuery.isArray(data) ) { + queue = jQuery._data( elem, type, jQuery.makeArray(data) ); + } else { + queue.push( data ); + } + } + return queue || []; + } + }, + + dequeue: function( elem, type ) { + type = type || "fx"; + + var queue = jQuery.queue( elem, type ), + startLength = queue.length, + fn = queue.shift(), + hooks = jQuery._queueHooks( elem, type ), + next = function() { + jQuery.dequeue( elem, type ); + }; + + // If the fx queue is dequeued, always remove the progress sentinel + if ( fn === "inprogress" ) { + fn = queue.shift(); + startLength--; + } + + if ( fn ) { + + // Add a progress sentinel to prevent the fx queue from being + // automatically dequeued + if ( type === "fx" ) { + queue.unshift( "inprogress" ); + } + + // clear up the last queue stop function + delete hooks.stop; + fn.call( elem, next, hooks ); + } + + if ( !startLength && hooks ) { + hooks.empty.fire(); + } + }, + + // not intended for public consumption - generates a queueHooks object, or returns the current one + _queueHooks: function( elem, type ) { + var key = type + "queueHooks"; + return jQuery._data( elem, key ) || jQuery._data( elem, key, { + empty: jQuery.Callbacks("once memory").add(function() { + jQuery.removeData( elem, type + "queue", true ); + jQuery.removeData( elem, key, true ); + }) + }); + } +}); + +jQuery.fn.extend({ + queue: function( type, data ) { + var setter = 2; + + if ( typeof type !== "string" ) { + data = type; + type = "fx"; + setter--; + } + + if ( arguments.length < setter ) { + return jQuery.queue( this[0], type ); + } + + return data === undefined ? + this : + this.each(function() { + var queue = jQuery.queue( this, type, data ); + + // ensure a hooks for this queue + jQuery._queueHooks( this, type ); + + if ( type === "fx" && queue[0] !== "inprogress" ) { + jQuery.dequeue( this, type ); + } + }); + }, + dequeue: function( type ) { + return this.each(function() { + jQuery.dequeue( this, type ); + }); + }, + // Based off of the plugin by Clint Helfers, with permission. + // http://blindsignals.com/index.php/2009/07/jquery-delay/ + delay: function( time, type ) { + time = jQuery.fx ? jQuery.fx.speeds[ time ] || time : time; + type = type || "fx"; + + return this.queue( type, function( next, hooks ) { + var timeout = setTimeout( next, time ); + hooks.stop = function() { + clearTimeout( timeout ); + }; + }); + }, + clearQueue: function( type ) { + return this.queue( type || "fx", [] ); + }, + // Get a promise resolved when queues of a certain type + // are emptied (fx is the type by default) + promise: function( type, obj ) { + var tmp, + count = 1, + defer = jQuery.Deferred(), + elements = this, + i = this.length, + resolve = function() { + if ( !( --count ) ) { + defer.resolveWith( elements, [ elements ] ); + } + }; + + if ( typeof type !== "string" ) { + obj = type; + type = undefined; + } + type = type || "fx"; + + while( i-- ) { + tmp = jQuery._data( elements[ i ], type + "queueHooks" ); + if ( tmp && tmp.empty ) { + count++; + tmp.empty.add( resolve ); + } + } + resolve(); + return defer.promise( obj ); + } +}); +var nodeHook, boolHook, fixSpecified, + rclass = /[\t\r\n]/g, + rreturn = /\r/g, + rtype = /^(?:button|input)$/i, + rfocusable = /^(?:button|input|object|select|textarea)$/i, + rclickable = /^a(?:rea|)$/i, + rboolean = /^(?:autofocus|autoplay|async|checked|controls|defer|disabled|hidden|loop|multiple|open|readonly|required|scoped|selected)$/i, + getSetAttribute = jQuery.support.getSetAttribute; + +jQuery.fn.extend({ + attr: function( name, value ) { + return jQuery.access( this, jQuery.attr, name, value, arguments.length > 1 ); + }, + + removeAttr: function( name ) { + return this.each(function() { + jQuery.removeAttr( this, name ); + }); + }, + + prop: function( name, value ) { + return jQuery.access( this, jQuery.prop, name, value, arguments.length > 1 ); + }, + + removeProp: function( name ) { + name = jQuery.propFix[ name ] || name; + return this.each(function() { + // try/catch handles cases where IE balks (such as removing a property on window) + try { + this[ name ] = undefined; + delete this[ name ]; + } catch( e ) {} + }); + }, + + addClass: function( value ) { + var classNames, i, l, elem, + setClass, c, cl; + + if ( jQuery.isFunction( value ) ) { + return this.each(function( j ) { + jQuery( this ).addClass( value.call(this, j, this.className) ); + }); + } + + if ( value && typeof value === "string" ) { + classNames = value.split( core_rspace ); + + for ( i = 0, l = this.length; i < l; i++ ) { + elem = this[ i ]; + + if ( elem.nodeType === 1 ) { + if ( !elem.className && classNames.length === 1 ) { + elem.className = value; + + } else { + setClass = " " + elem.className + " "; + + for ( c = 0, cl = classNames.length; c < cl; c++ ) { + if ( setClass.indexOf( " " + classNames[ c ] + " " ) < 0 ) { + setClass += classNames[ c ] + " "; + } + } + elem.className = jQuery.trim( setClass ); + } + } + } + } + + return this; + }, + + removeClass: function( value ) { + var removes, className, elem, c, cl, i, l; + + if ( jQuery.isFunction( value ) ) { + return this.each(function( j ) { + jQuery( this ).removeClass( value.call(this, j, this.className) ); + }); + } + if ( (value && typeof value === "string") || value === undefined ) { + removes = ( value || "" ).split( core_rspace ); + + for ( i = 0, l = this.length; i < l; i++ ) { + elem = this[ i ]; + if ( elem.nodeType === 1 && elem.className ) { + + className = (" " + elem.className + " ").replace( rclass, " " ); + + // loop over each item in the removal list + for ( c = 0, cl = removes.length; c < cl; c++ ) { + // Remove until there is nothing to remove, + while ( className.indexOf(" " + removes[ c ] + " ") >= 0 ) { + className = className.replace( " " + removes[ c ] + " " , " " ); + } + } + elem.className = value ? jQuery.trim( className ) : ""; + } + } + } + + return this; + }, + + toggleClass: function( value, stateVal ) { + var type = typeof value, + isBool = typeof stateVal === "boolean"; + + if ( jQuery.isFunction( value ) ) { + return this.each(function( i ) { + jQuery( this ).toggleClass( value.call(this, i, this.className, stateVal), stateVal ); + }); + } + + return this.each(function() { + if ( type === "string" ) { + // toggle individual class names + var className, + i = 0, + self = jQuery( this ), + state = stateVal, + classNames = value.split( core_rspace ); + + while ( (className = classNames[ i++ ]) ) { + // check each className given, space separated list + state = isBool ? state : !self.hasClass( className ); + self[ state ? "addClass" : "removeClass" ]( className ); + } + + } else if ( type === "undefined" || type === "boolean" ) { + if ( this.className ) { + // store className if set + jQuery._data( this, "__className__", this.className ); + } + + // toggle whole className + this.className = this.className || value === false ? "" : jQuery._data( this, "__className__" ) || ""; + } + }); + }, + + hasClass: function( selector ) { + var className = " " + selector + " ", + i = 0, + l = this.length; + for ( ; i < l; i++ ) { + if ( this[i].nodeType === 1 && (" " + this[i].className + " ").replace(rclass, " ").indexOf( className ) >= 0 ) { + return true; + } + } + + return false; + }, + + val: function( value ) { + var hooks, ret, isFunction, + elem = this[0]; + + if ( !arguments.length ) { + if ( elem ) { + hooks = jQuery.valHooks[ elem.type ] || jQuery.valHooks[ elem.nodeName.toLowerCase() ]; + + if ( hooks && "get" in hooks && (ret = hooks.get( elem, "value" )) !== undefined ) { + return ret; + } + + ret = elem.value; + + return typeof ret === "string" ? + // handle most common string cases + ret.replace(rreturn, "") : + // handle cases where value is null/undef or number + ret == null ? "" : ret; + } + + return; + } + + isFunction = jQuery.isFunction( value ); + + return this.each(function( i ) { + var val, + self = jQuery(this); + + if ( this.nodeType !== 1 ) { + return; + } + + if ( isFunction ) { + val = value.call( this, i, self.val() ); + } else { + val = value; + } + + // Treat null/undefined as ""; convert numbers to string + if ( val == null ) { + val = ""; + } else if ( typeof val === "number" ) { + val += ""; + } else if ( jQuery.isArray( val ) ) { + val = jQuery.map(val, function ( value ) { + return value == null ? "" : value + ""; + }); + } + + hooks = jQuery.valHooks[ this.type ] || jQuery.valHooks[ this.nodeName.toLowerCase() ]; + + // If set returns undefined, fall back to normal setting + if ( !hooks || !("set" in hooks) || hooks.set( this, val, "value" ) === undefined ) { + this.value = val; + } + }); + } +}); + +jQuery.extend({ + valHooks: { + option: { + get: function( elem ) { + // attributes.value is undefined in Blackberry 4.7 but + // uses .value. See #6932 + var val = elem.attributes.value; + return !val || val.specified ? elem.value : elem.text; + } + }, + select: { + get: function( elem ) { + var value, option, + options = elem.options, + index = elem.selectedIndex, + one = elem.type === "select-one" || index < 0, + values = one ? null : [], + max = one ? index + 1 : options.length, + i = index < 0 ? + max : + one ? index : 0; + + // Loop through all the selected options + for ( ; i < max; i++ ) { + option = options[ i ]; + + // oldIE doesn't update selected after form reset (#2551) + if ( ( option.selected || i === index ) && + // Don't return options that are disabled or in a disabled optgroup + ( jQuery.support.optDisabled ? !option.disabled : option.getAttribute("disabled") === null ) && + ( !option.parentNode.disabled || !jQuery.nodeName( option.parentNode, "optgroup" ) ) ) { + + // Get the specific value for the option + value = jQuery( option ).val(); + + // We don't need an array for one selects + if ( one ) { + return value; + } + + // Multi-Selects return an array + values.push( value ); + } + } + + return values; + }, + + set: function( elem, value ) { + var values = jQuery.makeArray( value ); + + jQuery(elem).find("option").each(function() { + this.selected = jQuery.inArray( jQuery(this).val(), values ) >= 0; + }); + + if ( !values.length ) { + elem.selectedIndex = -1; + } + return values; + } + } + }, + + // Unused in 1.8, left in so attrFn-stabbers won't die; remove in 1.9 + attrFn: {}, + + attr: function( elem, name, value, pass ) { + var ret, hooks, notxml, + nType = elem.nodeType; + + // don't get/set attributes on text, comment and attribute nodes + if ( !elem || nType === 3 || nType === 8 || nType === 2 ) { + return; + } + + if ( pass && jQuery.isFunction( jQuery.fn[ name ] ) ) { + return jQuery( elem )[ name ]( value ); + } + + // Fallback to prop when attributes are not supported + if ( typeof elem.getAttribute === "undefined" ) { + return jQuery.prop( elem, name, value ); + } + + notxml = nType !== 1 || !jQuery.isXMLDoc( elem ); + + // All attributes are lowercase + // Grab necessary hook if one is defined + if ( notxml ) { + name = name.toLowerCase(); + hooks = jQuery.attrHooks[ name ] || ( rboolean.test( name ) ? boolHook : nodeHook ); + } + + if ( value !== undefined ) { + + if ( value === null ) { + jQuery.removeAttr( elem, name ); + return; + + } else if ( hooks && "set" in hooks && notxml && (ret = hooks.set( elem, value, name )) !== undefined ) { + return ret; + + } else { + elem.setAttribute( name, value + "" ); + return value; + } + + } else if ( hooks && "get" in hooks && notxml && (ret = hooks.get( elem, name )) !== null ) { + return ret; + + } else { + + ret = elem.getAttribute( name ); + + // Non-existent attributes return null, we normalize to undefined + return ret === null ? + undefined : + ret; + } + }, + + removeAttr: function( elem, value ) { + var propName, attrNames, name, isBool, + i = 0; + + if ( value && elem.nodeType === 1 ) { + + attrNames = value.split( core_rspace ); + + for ( ; i < attrNames.length; i++ ) { + name = attrNames[ i ]; + + if ( name ) { + propName = jQuery.propFix[ name ] || name; + isBool = rboolean.test( name ); + + // See #9699 for explanation of this approach (setting first, then removal) + // Do not do this for boolean attributes (see #10870) + if ( !isBool ) { + jQuery.attr( elem, name, "" ); + } + elem.removeAttribute( getSetAttribute ? name : propName ); + + // Set corresponding property to false for boolean attributes + if ( isBool && propName in elem ) { + elem[ propName ] = false; + } + } + } + } + }, + + attrHooks: { + type: { + set: function( elem, value ) { + // We can't allow the type property to be changed (since it causes problems in IE) + if ( rtype.test( elem.nodeName ) && elem.parentNode ) { + jQuery.error( "type property can't be changed" ); + } else if ( !jQuery.support.radioValue && value === "radio" && jQuery.nodeName(elem, "input") ) { + // Setting the type on a radio button after the value resets the value in IE6-9 + // Reset value to it's default in case type is set after value + // This is for element creation + var val = elem.value; + elem.setAttribute( "type", value ); + if ( val ) { + elem.value = val; + } + return value; + } + } + }, + // Use the value property for back compat + // Use the nodeHook for button elements in IE6/7 (#1954) + value: { + get: function( elem, name ) { + if ( nodeHook && jQuery.nodeName( elem, "button" ) ) { + return nodeHook.get( elem, name ); + } + return name in elem ? + elem.value : + null; + }, + set: function( elem, value, name ) { + if ( nodeHook && jQuery.nodeName( elem, "button" ) ) { + return nodeHook.set( elem, value, name ); + } + // Does not return so that setAttribute is also used + elem.value = value; + } + } + }, + + propFix: { + tabindex: "tabIndex", + readonly: "readOnly", + "for": "htmlFor", + "class": "className", + maxlength: "maxLength", + cellspacing: "cellSpacing", + cellpadding: "cellPadding", + rowspan: "rowSpan", + colspan: "colSpan", + usemap: "useMap", + frameborder: "frameBorder", + contenteditable: "contentEditable" + }, + + prop: function( elem, name, value ) { + var ret, hooks, notxml, + nType = elem.nodeType; + + // don't get/set properties on text, comment and attribute nodes + if ( !elem || nType === 3 || nType === 8 || nType === 2 ) { + return; + } + + notxml = nType !== 1 || !jQuery.isXMLDoc( elem ); + + if ( notxml ) { + // Fix name and attach hooks + name = jQuery.propFix[ name ] || name; + hooks = jQuery.propHooks[ name ]; + } + + if ( value !== undefined ) { + if ( hooks && "set" in hooks && (ret = hooks.set( elem, value, name )) !== undefined ) { + return ret; + + } else { + return ( elem[ name ] = value ); + } + + } else { + if ( hooks && "get" in hooks && (ret = hooks.get( elem, name )) !== null ) { + return ret; + + } else { + return elem[ name ]; + } + } + }, + + propHooks: { + tabIndex: { + get: function( elem ) { + // elem.tabIndex doesn't always return the correct value when it hasn't been explicitly set + // http://fluidproject.org/blog/2008/01/09/getting-setting-and-removing-tabindex-values-with-javascript/ + var attributeNode = elem.getAttributeNode("tabindex"); + + return attributeNode && attributeNode.specified ? + parseInt( attributeNode.value, 10 ) : + rfocusable.test( elem.nodeName ) || rclickable.test( elem.nodeName ) && elem.href ? + 0 : + undefined; + } + } + } +}); + +// Hook for boolean attributes +boolHook = { + get: function( elem, name ) { + // Align boolean attributes with corresponding properties + // Fall back to attribute presence where some booleans are not supported + var attrNode, + property = jQuery.prop( elem, name ); + return property === true || typeof property !== "boolean" && ( attrNode = elem.getAttributeNode(name) ) && attrNode.nodeValue !== false ? + name.toLowerCase() : + undefined; + }, + set: function( elem, value, name ) { + var propName; + if ( value === false ) { + // Remove boolean attributes when set to false + jQuery.removeAttr( elem, name ); + } else { + // value is true since we know at this point it's type boolean and not false + // Set boolean attributes to the same name and set the DOM property + propName = jQuery.propFix[ name ] || name; + if ( propName in elem ) { + // Only set the IDL specifically if it already exists on the element + elem[ propName ] = true; + } + + elem.setAttribute( name, name.toLowerCase() ); + } + return name; + } +}; + +// IE6/7 do not support getting/setting some attributes with get/setAttribute +if ( !getSetAttribute ) { + + fixSpecified = { + name: true, + id: true, + coords: true + }; + + // Use this for any attribute in IE6/7 + // This fixes almost every IE6/7 issue + nodeHook = jQuery.valHooks.button = { + get: function( elem, name ) { + var ret; + ret = elem.getAttributeNode( name ); + return ret && ( fixSpecified[ name ] ? ret.value !== "" : ret.specified ) ? + ret.value : + undefined; + }, + set: function( elem, value, name ) { + // Set the existing or create a new attribute node + var ret = elem.getAttributeNode( name ); + if ( !ret ) { + ret = document.createAttribute( name ); + elem.setAttributeNode( ret ); + } + return ( ret.value = value + "" ); + } + }; + + // Set width and height to auto instead of 0 on empty string( Bug #8150 ) + // This is for removals + jQuery.each([ "width", "height" ], function( i, name ) { + jQuery.attrHooks[ name ] = jQuery.extend( jQuery.attrHooks[ name ], { + set: function( elem, value ) { + if ( value === "" ) { + elem.setAttribute( name, "auto" ); + return value; + } + } + }); + }); + + // Set contenteditable to false on removals(#10429) + // Setting to empty string throws an error as an invalid value + jQuery.attrHooks.contenteditable = { + get: nodeHook.get, + set: function( elem, value, name ) { + if ( value === "" ) { + value = "false"; + } + nodeHook.set( elem, value, name ); + } + }; +} + + +// Some attributes require a special call on IE +if ( !jQuery.support.hrefNormalized ) { + jQuery.each([ "href", "src", "width", "height" ], function( i, name ) { + jQuery.attrHooks[ name ] = jQuery.extend( jQuery.attrHooks[ name ], { + get: function( elem ) { + var ret = elem.getAttribute( name, 2 ); + return ret === null ? undefined : ret; + } + }); + }); +} + +if ( !jQuery.support.style ) { + jQuery.attrHooks.style = { + get: function( elem ) { + // Return undefined in the case of empty string + // Normalize to lowercase since IE uppercases css property names + return elem.style.cssText.toLowerCase() || undefined; + }, + set: function( elem, value ) { + return ( elem.style.cssText = value + "" ); + } + }; +} + +// Safari mis-reports the default selected property of an option +// Accessing the parent's selectedIndex property fixes it +if ( !jQuery.support.optSelected ) { + jQuery.propHooks.selected = jQuery.extend( jQuery.propHooks.selected, { + get: function( elem ) { + var parent = elem.parentNode; + + if ( parent ) { + parent.selectedIndex; + + // Make sure that it also works with optgroups, see #5701 + if ( parent.parentNode ) { + parent.parentNode.selectedIndex; + } + } + return null; + } + }); +} + +// IE6/7 call enctype encoding +if ( !jQuery.support.enctype ) { + jQuery.propFix.enctype = "encoding"; +} + +// Radios and checkboxes getter/setter +if ( !jQuery.support.checkOn ) { + jQuery.each([ "radio", "checkbox" ], function() { + jQuery.valHooks[ this ] = { + get: function( elem ) { + // Handle the case where in Webkit "" is returned instead of "on" if a value isn't specified + return elem.getAttribute("value") === null ? "on" : elem.value; + } + }; + }); +} +jQuery.each([ "radio", "checkbox" ], function() { + jQuery.valHooks[ this ] = jQuery.extend( jQuery.valHooks[ this ], { + set: function( elem, value ) { + if ( jQuery.isArray( value ) ) { + return ( elem.checked = jQuery.inArray( jQuery(elem).val(), value ) >= 0 ); + } + } + }); +}); +var rformElems = /^(?:textarea|input|select)$/i, + rtypenamespace = /^([^\.]*|)(?:\.(.+)|)$/, + rhoverHack = /(?:^|\s)hover(\.\S+|)\b/, + rkeyEvent = /^key/, + rmouseEvent = /^(?:mouse|contextmenu)|click/, + rfocusMorph = /^(?:focusinfocus|focusoutblur)$/, + hoverHack = function( events ) { + return jQuery.event.special.hover ? events : events.replace( rhoverHack, "mouseenter$1 mouseleave$1" ); + }; + +/* + * Helper functions for managing events -- not part of the public interface. + * Props to Dean Edwards' addEvent library for many of the ideas. + */ +jQuery.event = { + + add: function( elem, types, handler, data, selector ) { + + var elemData, eventHandle, events, + t, tns, type, namespaces, handleObj, + handleObjIn, handlers, special; + + // Don't attach events to noData or text/comment nodes (allow plain objects tho) + if ( elem.nodeType === 3 || elem.nodeType === 8 || !types || !handler || !(elemData = jQuery._data( elem )) ) { + return; + } + + // Caller can pass in an object of custom data in lieu of the handler + if ( handler.handler ) { + handleObjIn = handler; + handler = handleObjIn.handler; + selector = handleObjIn.selector; + } + + // Make sure that the handler has a unique ID, used to find/remove it later + if ( !handler.guid ) { + handler.guid = jQuery.guid++; + } + + // Init the element's event structure and main handler, if this is the first + events = elemData.events; + if ( !events ) { + elemData.events = events = {}; + } + eventHandle = elemData.handle; + if ( !eventHandle ) { + elemData.handle = eventHandle = function( e ) { + // Discard the second event of a jQuery.event.trigger() and + // when an event is called after a page has unloaded + return typeof jQuery !== "undefined" && (!e || jQuery.event.triggered !== e.type) ? + jQuery.event.dispatch.apply( eventHandle.elem, arguments ) : + undefined; + }; + // Add elem as a property of the handle fn to prevent a memory leak with IE non-native events + eventHandle.elem = elem; + } + + // Handle multiple events separated by a space + // jQuery(...).bind("mouseover mouseout", fn); + types = jQuery.trim( hoverHack(types) ).split( " " ); + for ( t = 0; t < types.length; t++ ) { + + tns = rtypenamespace.exec( types[t] ) || []; + type = tns[1]; + namespaces = ( tns[2] || "" ).split( "." ).sort(); + + // If event changes its type, use the special event handlers for the changed type + special = jQuery.event.special[ type ] || {}; + + // If selector defined, determine special event api type, otherwise given type + type = ( selector ? special.delegateType : special.bindType ) || type; + + // Update special based on newly reset type + special = jQuery.event.special[ type ] || {}; + + // handleObj is passed to all event handlers + handleObj = jQuery.extend({ + type: type, + origType: tns[1], + data: data, + handler: handler, + guid: handler.guid, + selector: selector, + needsContext: selector && jQuery.expr.match.needsContext.test( selector ), + namespace: namespaces.join(".") + }, handleObjIn ); + + // Init the event handler queue if we're the first + handlers = events[ type ]; + if ( !handlers ) { + handlers = events[ type ] = []; + handlers.delegateCount = 0; + + // Only use addEventListener/attachEvent if the special events handler returns false + if ( !special.setup || special.setup.call( elem, data, namespaces, eventHandle ) === false ) { + // Bind the global event handler to the element + if ( elem.addEventListener ) { + elem.addEventListener( type, eventHandle, false ); + + } else if ( elem.attachEvent ) { + elem.attachEvent( "on" + type, eventHandle ); + } + } + } + + if ( special.add ) { + special.add.call( elem, handleObj ); + + if ( !handleObj.handler.guid ) { + handleObj.handler.guid = handler.guid; + } + } + + // Add to the element's handler list, delegates in front + if ( selector ) { + handlers.splice( handlers.delegateCount++, 0, handleObj ); + } else { + handlers.push( handleObj ); + } + + // Keep track of which events have ever been used, for event optimization + jQuery.event.global[ type ] = true; + } + + // Nullify elem to prevent memory leaks in IE + elem = null; + }, + + global: {}, + + // Detach an event or set of events from an element + remove: function( elem, types, handler, selector, mappedTypes ) { + + var t, tns, type, origType, namespaces, origCount, + j, events, special, eventType, handleObj, + elemData = jQuery.hasData( elem ) && jQuery._data( elem ); + + if ( !elemData || !(events = elemData.events) ) { + return; + } + + // Once for each type.namespace in types; type may be omitted + types = jQuery.trim( hoverHack( types || "" ) ).split(" "); + for ( t = 0; t < types.length; t++ ) { + tns = rtypenamespace.exec( types[t] ) || []; + type = origType = tns[1]; + namespaces = tns[2]; + + // Unbind all events (on this namespace, if provided) for the element + if ( !type ) { + for ( type in events ) { + jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); + } + continue; + } + + special = jQuery.event.special[ type ] || {}; + type = ( selector? special.delegateType : special.bindType ) || type; + eventType = events[ type ] || []; + origCount = eventType.length; + namespaces = namespaces ? new RegExp("(^|\\.)" + namespaces.split(".").sort().join("\\.(?:.*\\.|)") + "(\\.|$)") : null; + + // Remove matching events + for ( j = 0; j < eventType.length; j++ ) { + handleObj = eventType[ j ]; + + if ( ( mappedTypes || origType === handleObj.origType ) && + ( !handler || handler.guid === handleObj.guid ) && + ( !namespaces || namespaces.test( handleObj.namespace ) ) && + ( !selector || selector === handleObj.selector || selector === "**" && handleObj.selector ) ) { + eventType.splice( j--, 1 ); + + if ( handleObj.selector ) { + eventType.delegateCount--; + } + if ( special.remove ) { + special.remove.call( elem, handleObj ); + } + } + } + + // Remove generic event handler if we removed something and no more handlers exist + // (avoids potential for endless recursion during removal of special event handlers) + if ( eventType.length === 0 && origCount !== eventType.length ) { + if ( !special.teardown || special.teardown.call( elem, namespaces, elemData.handle ) === false ) { + jQuery.removeEvent( elem, type, elemData.handle ); + } + + delete events[ type ]; + } + } + + // Remove the expando if it's no longer used + if ( jQuery.isEmptyObject( events ) ) { + delete elemData.handle; + + // removeData also checks for emptiness and clears the expando if empty + // so use it instead of delete + jQuery.removeData( elem, "events", true ); + } + }, + + // Events that are safe to short-circuit if no handlers are attached. + // Native DOM events should not be added, they may have inline handlers. + customEvent: { + "getData": true, + "setData": true, + "changeData": true + }, + + trigger: function( event, data, elem, onlyHandlers ) { + // Don't do events on text and comment nodes + if ( elem && (elem.nodeType === 3 || elem.nodeType === 8) ) { + return; + } + + // Event object or event type + var cache, exclusive, i, cur, old, ontype, special, handle, eventPath, bubbleType, + type = event.type || event, + namespaces = []; + + // focus/blur morphs to focusin/out; ensure we're not firing them right now + if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { + return; + } + + if ( type.indexOf( "!" ) >= 0 ) { + // Exclusive events trigger only for the exact event (no namespaces) + type = type.slice(0, -1); + exclusive = true; + } + + if ( type.indexOf( "." ) >= 0 ) { + // Namespaced trigger; create a regexp to match event type in handle() + namespaces = type.split("."); + type = namespaces.shift(); + namespaces.sort(); + } + + if ( (!elem || jQuery.event.customEvent[ type ]) && !jQuery.event.global[ type ] ) { + // No jQuery handlers for this event type, and it can't have inline handlers + return; + } + + // Caller can pass in an Event, Object, or just an event type string + event = typeof event === "object" ? + // jQuery.Event object + event[ jQuery.expando ] ? event : + // Object literal + new jQuery.Event( type, event ) : + // Just the event type (string) + new jQuery.Event( type ); + + event.type = type; + event.isTrigger = true; + event.exclusive = exclusive; + event.namespace = namespaces.join( "." ); + event.namespace_re = event.namespace? new RegExp("(^|\\.)" + namespaces.join("\\.(?:.*\\.|)") + "(\\.|$)") : null; + ontype = type.indexOf( ":" ) < 0 ? "on" + type : ""; + + // Handle a global trigger + if ( !elem ) { + + // TODO: Stop taunting the data cache; remove global events and always attach to document + cache = jQuery.cache; + for ( i in cache ) { + if ( cache[ i ].events && cache[ i ].events[ type ] ) { + jQuery.event.trigger( event, data, cache[ i ].handle.elem, true ); + } + } + return; + } + + // Clean up the event in case it is being reused + event.result = undefined; + if ( !event.target ) { + event.target = elem; + } + + // Clone any incoming data and prepend the event, creating the handler arg list + data = data != null ? jQuery.makeArray( data ) : []; + data.unshift( event ); + + // Allow special events to draw outside the lines + special = jQuery.event.special[ type ] || {}; + if ( special.trigger && special.trigger.apply( elem, data ) === false ) { + return; + } + + // Determine event propagation path in advance, per W3C events spec (#9951) + // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) + eventPath = [[ elem, special.bindType || type ]]; + if ( !onlyHandlers && !special.noBubble && !jQuery.isWindow( elem ) ) { + + bubbleType = special.delegateType || type; + cur = rfocusMorph.test( bubbleType + type ) ? elem : elem.parentNode; + for ( old = elem; cur; cur = cur.parentNode ) { + eventPath.push([ cur, bubbleType ]); + old = cur; + } + + // Only add window if we got to document (e.g., not plain obj or detached DOM) + if ( old === (elem.ownerDocument || document) ) { + eventPath.push([ old.defaultView || old.parentWindow || window, bubbleType ]); + } + } + + // Fire handlers on the event path + for ( i = 0; i < eventPath.length && !event.isPropagationStopped(); i++ ) { + + cur = eventPath[i][0]; + event.type = eventPath[i][1]; + + handle = ( jQuery._data( cur, "events" ) || {} )[ event.type ] && jQuery._data( cur, "handle" ); + if ( handle ) { + handle.apply( cur, data ); + } + // Note that this is a bare JS function and not a jQuery handler + handle = ontype && cur[ ontype ]; + if ( handle && jQuery.acceptData( cur ) && handle.apply && handle.apply( cur, data ) === false ) { + event.preventDefault(); + } + } + event.type = type; + + // If nobody prevented the default action, do it now + if ( !onlyHandlers && !event.isDefaultPrevented() ) { + + if ( (!special._default || special._default.apply( elem.ownerDocument, data ) === false) && + !(type === "click" && jQuery.nodeName( elem, "a" )) && jQuery.acceptData( elem ) ) { + + // Call a native DOM method on the target with the same name name as the event. + // Can't use an .isFunction() check here because IE6/7 fails that test. + // Don't do default actions on window, that's where global variables be (#6170) + // IE<9 dies on focus/blur to hidden element (#1486) + if ( ontype && elem[ type ] && ((type !== "focus" && type !== "blur") || event.target.offsetWidth !== 0) && !jQuery.isWindow( elem ) ) { + + // Don't re-trigger an onFOO event when we call its FOO() method + old = elem[ ontype ]; + + if ( old ) { + elem[ ontype ] = null; + } + + // Prevent re-triggering of the same event, since we already bubbled it above + jQuery.event.triggered = type; + elem[ type ](); + jQuery.event.triggered = undefined; + + if ( old ) { + elem[ ontype ] = old; + } + } + } + } + + return event.result; + }, + + dispatch: function( event ) { + + // Make a writable jQuery.Event from the native event object + event = jQuery.event.fix( event || window.event ); + + var i, j, cur, ret, selMatch, matched, matches, handleObj, sel, related, + handlers = ( (jQuery._data( this, "events" ) || {} )[ event.type ] || []), + delegateCount = handlers.delegateCount, + args = core_slice.call( arguments ), + run_all = !event.exclusive && !event.namespace, + special = jQuery.event.special[ event.type ] || {}, + handlerQueue = []; + + // Use the fix-ed jQuery.Event rather than the (read-only) native event + args[0] = event; + event.delegateTarget = this; + + // Call the preDispatch hook for the mapped type, and let it bail if desired + if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { + return; + } + + // Determine handlers that should run if there are delegated events + // Avoid non-left-click bubbling in Firefox (#3861) + if ( delegateCount && !(event.button && event.type === "click") ) { + + for ( cur = event.target; cur != this; cur = cur.parentNode || this ) { + + // Don't process clicks (ONLY) on disabled elements (#6911, #8165, #11382, #11764) + if ( cur.disabled !== true || event.type !== "click" ) { + selMatch = {}; + matches = []; + for ( i = 0; i < delegateCount; i++ ) { + handleObj = handlers[ i ]; + sel = handleObj.selector; + + if ( selMatch[ sel ] === undefined ) { + selMatch[ sel ] = handleObj.needsContext ? + jQuery( sel, this ).index( cur ) >= 0 : + jQuery.find( sel, this, null, [ cur ] ).length; + } + if ( selMatch[ sel ] ) { + matches.push( handleObj ); + } + } + if ( matches.length ) { + handlerQueue.push({ elem: cur, matches: matches }); + } + } + } + } + + // Add the remaining (directly-bound) handlers + if ( handlers.length > delegateCount ) { + handlerQueue.push({ elem: this, matches: handlers.slice( delegateCount ) }); + } + + // Run delegates first; they may want to stop propagation beneath us + for ( i = 0; i < handlerQueue.length && !event.isPropagationStopped(); i++ ) { + matched = handlerQueue[ i ]; + event.currentTarget = matched.elem; + + for ( j = 0; j < matched.matches.length && !event.isImmediatePropagationStopped(); j++ ) { + handleObj = matched.matches[ j ]; + + // Triggered event must either 1) be non-exclusive and have no namespace, or + // 2) have namespace(s) a subset or equal to those in the bound event (both can have no namespace). + if ( run_all || (!event.namespace && !handleObj.namespace) || event.namespace_re && event.namespace_re.test( handleObj.namespace ) ) { + + event.data = handleObj.data; + event.handleObj = handleObj; + + ret = ( (jQuery.event.special[ handleObj.origType ] || {}).handle || handleObj.handler ) + .apply( matched.elem, args ); + + if ( ret !== undefined ) { + event.result = ret; + if ( ret === false ) { + event.preventDefault(); + event.stopPropagation(); + } + } + } + } + } + + // Call the postDispatch hook for the mapped type + if ( special.postDispatch ) { + special.postDispatch.call( this, event ); + } + + return event.result; + }, + + // Includes some event props shared by KeyEvent and MouseEvent + // *** attrChange attrName relatedNode srcElement are not normalized, non-W3C, deprecated, will be removed in 1.8 *** + props: "attrChange attrName relatedNode srcElement altKey bubbles cancelable ctrlKey currentTarget eventPhase metaKey relatedTarget shiftKey target timeStamp view which".split(" "), + + fixHooks: {}, + + keyHooks: { + props: "char charCode key keyCode".split(" "), + filter: function( event, original ) { + + // Add which for key events + if ( event.which == null ) { + event.which = original.charCode != null ? original.charCode : original.keyCode; + } + + return event; + } + }, + + mouseHooks: { + props: "button buttons clientX clientY fromElement offsetX offsetY pageX pageY screenX screenY toElement".split(" "), + filter: function( event, original ) { + var eventDoc, doc, body, + button = original.button, + fromElement = original.fromElement; + + // Calculate pageX/Y if missing and clientX/Y available + if ( event.pageX == null && original.clientX != null ) { + eventDoc = event.target.ownerDocument || document; + doc = eventDoc.documentElement; + body = eventDoc.body; + + event.pageX = original.clientX + ( doc && doc.scrollLeft || body && body.scrollLeft || 0 ) - ( doc && doc.clientLeft || body && body.clientLeft || 0 ); + event.pageY = original.clientY + ( doc && doc.scrollTop || body && body.scrollTop || 0 ) - ( doc && doc.clientTop || body && body.clientTop || 0 ); + } + + // Add relatedTarget, if necessary + if ( !event.relatedTarget && fromElement ) { + event.relatedTarget = fromElement === event.target ? original.toElement : fromElement; + } + + // Add which for click: 1 === left; 2 === middle; 3 === right + // Note: button is not normalized, so don't use it + if ( !event.which && button !== undefined ) { + event.which = ( button & 1 ? 1 : ( button & 2 ? 3 : ( button & 4 ? 2 : 0 ) ) ); + } + + return event; + } + }, + + fix: function( event ) { + if ( event[ jQuery.expando ] ) { + return event; + } + + // Create a writable copy of the event object and normalize some properties + var i, prop, + originalEvent = event, + fixHook = jQuery.event.fixHooks[ event.type ] || {}, + copy = fixHook.props ? this.props.concat( fixHook.props ) : this.props; + + event = jQuery.Event( originalEvent ); + + for ( i = copy.length; i; ) { + prop = copy[ --i ]; + event[ prop ] = originalEvent[ prop ]; + } + + // Fix target property, if necessary (#1925, IE 6/7/8 & Safari2) + if ( !event.target ) { + event.target = originalEvent.srcElement || document; + } + + // Target should not be a text node (#504, Safari) + if ( event.target.nodeType === 3 ) { + event.target = event.target.parentNode; + } + + // For mouse/key events, metaKey==false if it's undefined (#3368, #11328; IE6/7/8) + event.metaKey = !!event.metaKey; + + return fixHook.filter? fixHook.filter( event, originalEvent ) : event; + }, + + special: { + load: { + // Prevent triggered image.load events from bubbling to window.load + noBubble: true + }, + + focus: { + delegateType: "focusin" + }, + blur: { + delegateType: "focusout" + }, + + beforeunload: { + setup: function( data, namespaces, eventHandle ) { + // We only want to do this special case on windows + if ( jQuery.isWindow( this ) ) { + this.onbeforeunload = eventHandle; + } + }, + + teardown: function( namespaces, eventHandle ) { + if ( this.onbeforeunload === eventHandle ) { + this.onbeforeunload = null; + } + } + } + }, + + simulate: function( type, elem, event, bubble ) { + // Piggyback on a donor event to simulate a different one. + // Fake originalEvent to avoid donor's stopPropagation, but if the + // simulated event prevents default then we do the same on the donor. + var e = jQuery.extend( + new jQuery.Event(), + event, + { type: type, + isSimulated: true, + originalEvent: {} + } + ); + if ( bubble ) { + jQuery.event.trigger( e, null, elem ); + } else { + jQuery.event.dispatch.call( elem, e ); + } + if ( e.isDefaultPrevented() ) { + event.preventDefault(); + } + } +}; + +// Some plugins are using, but it's undocumented/deprecated and will be removed. +// The 1.7 special event interface should provide all the hooks needed now. +jQuery.event.handle = jQuery.event.dispatch; + +jQuery.removeEvent = document.removeEventListener ? + function( elem, type, handle ) { + if ( elem.removeEventListener ) { + elem.removeEventListener( type, handle, false ); + } + } : + function( elem, type, handle ) { + var name = "on" + type; + + if ( elem.detachEvent ) { + + // #8545, #7054, preventing memory leaks for custom events in IE6-8 + // detachEvent needed property on element, by name of that event, to properly expose it to GC + if ( typeof elem[ name ] === "undefined" ) { + elem[ name ] = null; + } + + elem.detachEvent( name, handle ); + } + }; + +jQuery.Event = function( src, props ) { + // Allow instantiation without the 'new' keyword + if ( !(this instanceof jQuery.Event) ) { + return new jQuery.Event( src, props ); + } + + // Event object + if ( src && src.type ) { + this.originalEvent = src; + this.type = src.type; + + // Events bubbling up the document may have been marked as prevented + // by a handler lower down the tree; reflect the correct value. + this.isDefaultPrevented = ( src.defaultPrevented || src.returnValue === false || + src.getPreventDefault && src.getPreventDefault() ) ? returnTrue : returnFalse; + + // Event type + } else { + this.type = src; + } + + // Put explicitly provided properties onto the event object + if ( props ) { + jQuery.extend( this, props ); + } + + // Create a timestamp if incoming event doesn't have one + this.timeStamp = src && src.timeStamp || jQuery.now(); + + // Mark it as fixed + this[ jQuery.expando ] = true; +}; + +function returnFalse() { + return false; +} +function returnTrue() { + return true; +} + +// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding +// http://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html +jQuery.Event.prototype = { + preventDefault: function() { + this.isDefaultPrevented = returnTrue; + + var e = this.originalEvent; + if ( !e ) { + return; + } + + // if preventDefault exists run it on the original event + if ( e.preventDefault ) { + e.preventDefault(); + + // otherwise set the returnValue property of the original event to false (IE) + } else { + e.returnValue = false; + } + }, + stopPropagation: function() { + this.isPropagationStopped = returnTrue; + + var e = this.originalEvent; + if ( !e ) { + return; + } + // if stopPropagation exists run it on the original event + if ( e.stopPropagation ) { + e.stopPropagation(); + } + // otherwise set the cancelBubble property of the original event to true (IE) + e.cancelBubble = true; + }, + stopImmediatePropagation: function() { + this.isImmediatePropagationStopped = returnTrue; + this.stopPropagation(); + }, + isDefaultPrevented: returnFalse, + isPropagationStopped: returnFalse, + isImmediatePropagationStopped: returnFalse +}; + +// Create mouseenter/leave events using mouseover/out and event-time checks +jQuery.each({ + mouseenter: "mouseover", + mouseleave: "mouseout" +}, function( orig, fix ) { + jQuery.event.special[ orig ] = { + delegateType: fix, + bindType: fix, + + handle: function( event ) { + var ret, + target = this, + related = event.relatedTarget, + handleObj = event.handleObj, + selector = handleObj.selector; + + // For mousenter/leave call the handler if related is outside the target. + // NB: No relatedTarget if the mouse left/entered the browser window + if ( !related || (related !== target && !jQuery.contains( target, related )) ) { + event.type = handleObj.origType; + ret = handleObj.handler.apply( this, arguments ); + event.type = fix; + } + return ret; + } + }; +}); + +// IE submit delegation +if ( !jQuery.support.submitBubbles ) { + + jQuery.event.special.submit = { + setup: function() { + // Only need this for delegated form submit events + if ( jQuery.nodeName( this, "form" ) ) { + return false; + } + + // Lazy-add a submit handler when a descendant form may potentially be submitted + jQuery.event.add( this, "click._submit keypress._submit", function( e ) { + // Node name check avoids a VML-related crash in IE (#9807) + var elem = e.target, + form = jQuery.nodeName( elem, "input" ) || jQuery.nodeName( elem, "button" ) ? elem.form : undefined; + if ( form && !jQuery._data( form, "_submit_attached" ) ) { + jQuery.event.add( form, "submit._submit", function( event ) { + event._submit_bubble = true; + }); + jQuery._data( form, "_submit_attached", true ); + } + }); + // return undefined since we don't need an event listener + }, + + postDispatch: function( event ) { + // If form was submitted by the user, bubble the event up the tree + if ( event._submit_bubble ) { + delete event._submit_bubble; + if ( this.parentNode && !event.isTrigger ) { + jQuery.event.simulate( "submit", this.parentNode, event, true ); + } + } + }, + + teardown: function() { + // Only need this for delegated form submit events + if ( jQuery.nodeName( this, "form" ) ) { + return false; + } + + // Remove delegated handlers; cleanData eventually reaps submit handlers attached above + jQuery.event.remove( this, "._submit" ); + } + }; +} + +// IE change delegation and checkbox/radio fix +if ( !jQuery.support.changeBubbles ) { + + jQuery.event.special.change = { + + setup: function() { + + if ( rformElems.test( this.nodeName ) ) { + // IE doesn't fire change on a check/radio until blur; trigger it on click + // after a propertychange. Eat the blur-change in special.change.handle. + // This still fires onchange a second time for check/radio after blur. + if ( this.type === "checkbox" || this.type === "radio" ) { + jQuery.event.add( this, "propertychange._change", function( event ) { + if ( event.originalEvent.propertyName === "checked" ) { + this._just_changed = true; + } + }); + jQuery.event.add( this, "click._change", function( event ) { + if ( this._just_changed && !event.isTrigger ) { + this._just_changed = false; + } + // Allow triggered, simulated change events (#11500) + jQuery.event.simulate( "change", this, event, true ); + }); + } + return false; + } + // Delegated event; lazy-add a change handler on descendant inputs + jQuery.event.add( this, "beforeactivate._change", function( e ) { + var elem = e.target; + + if ( rformElems.test( elem.nodeName ) && !jQuery._data( elem, "_change_attached" ) ) { + jQuery.event.add( elem, "change._change", function( event ) { + if ( this.parentNode && !event.isSimulated && !event.isTrigger ) { + jQuery.event.simulate( "change", this.parentNode, event, true ); + } + }); + jQuery._data( elem, "_change_attached", true ); + } + }); + }, + + handle: function( event ) { + var elem = event.target; + + // Swallow native change events from checkbox/radio, we already triggered them above + if ( this !== elem || event.isSimulated || event.isTrigger || (elem.type !== "radio" && elem.type !== "checkbox") ) { + return event.handleObj.handler.apply( this, arguments ); + } + }, + + teardown: function() { + jQuery.event.remove( this, "._change" ); + + return !rformElems.test( this.nodeName ); + } + }; +} + +// Create "bubbling" focus and blur events +if ( !jQuery.support.focusinBubbles ) { + jQuery.each({ focus: "focusin", blur: "focusout" }, function( orig, fix ) { + + // Attach a single capturing handler while someone wants focusin/focusout + var attaches = 0, + handler = function( event ) { + jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ), true ); + }; + + jQuery.event.special[ fix ] = { + setup: function() { + if ( attaches++ === 0 ) { + document.addEventListener( orig, handler, true ); + } + }, + teardown: function() { + if ( --attaches === 0 ) { + document.removeEventListener( orig, handler, true ); + } + } + }; + }); +} + +jQuery.fn.extend({ + + on: function( types, selector, data, fn, /*INTERNAL*/ one ) { + var origFn, type; + + // Types can be a map of types/handlers + if ( typeof types === "object" ) { + // ( types-Object, selector, data ) + if ( typeof selector !== "string" ) { // && selector != null + // ( types-Object, data ) + data = data || selector; + selector = undefined; + } + for ( type in types ) { + this.on( type, selector, data, types[ type ], one ); + } + return this; + } + + if ( data == null && fn == null ) { + // ( types, fn ) + fn = selector; + data = selector = undefined; + } else if ( fn == null ) { + if ( typeof selector === "string" ) { + // ( types, selector, fn ) + fn = data; + data = undefined; + } else { + // ( types, data, fn ) + fn = data; + data = selector; + selector = undefined; + } + } + if ( fn === false ) { + fn = returnFalse; + } else if ( !fn ) { + return this; + } + + if ( one === 1 ) { + origFn = fn; + fn = function( event ) { + // Can use an empty set, since event contains the info + jQuery().off( event ); + return origFn.apply( this, arguments ); + }; + // Use same guid so caller can remove using origFn + fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); + } + return this.each( function() { + jQuery.event.add( this, types, fn, data, selector ); + }); + }, + one: function( types, selector, data, fn ) { + return this.on( types, selector, data, fn, 1 ); + }, + off: function( types, selector, fn ) { + var handleObj, type; + if ( types && types.preventDefault && types.handleObj ) { + // ( event ) dispatched jQuery.Event + handleObj = types.handleObj; + jQuery( types.delegateTarget ).off( + handleObj.namespace ? handleObj.origType + "." + handleObj.namespace : handleObj.origType, + handleObj.selector, + handleObj.handler + ); + return this; + } + if ( typeof types === "object" ) { + // ( types-object [, selector] ) + for ( type in types ) { + this.off( type, selector, types[ type ] ); + } + return this; + } + if ( selector === false || typeof selector === "function" ) { + // ( types [, fn] ) + fn = selector; + selector = undefined; + } + if ( fn === false ) { + fn = returnFalse; + } + return this.each(function() { + jQuery.event.remove( this, types, fn, selector ); + }); + }, + + bind: function( types, data, fn ) { + return this.on( types, null, data, fn ); + }, + unbind: function( types, fn ) { + return this.off( types, null, fn ); + }, + + live: function( types, data, fn ) { + jQuery( this.context ).on( types, this.selector, data, fn ); + return this; + }, + die: function( types, fn ) { + jQuery( this.context ).off( types, this.selector || "**", fn ); + return this; + }, + + delegate: function( selector, types, data, fn ) { + return this.on( types, selector, data, fn ); + }, + undelegate: function( selector, types, fn ) { + // ( namespace ) or ( selector, types [, fn] ) + return arguments.length === 1 ? this.off( selector, "**" ) : this.off( types, selector || "**", fn ); + }, + + trigger: function( type, data ) { + return this.each(function() { + jQuery.event.trigger( type, data, this ); + }); + }, + triggerHandler: function( type, data ) { + if ( this[0] ) { + return jQuery.event.trigger( type, data, this[0], true ); + } + }, + + toggle: function( fn ) { + // Save reference to arguments for access in closure + var args = arguments, + guid = fn.guid || jQuery.guid++, + i = 0, + toggler = function( event ) { + // Figure out which function to execute + var lastToggle = ( jQuery._data( this, "lastToggle" + fn.guid ) || 0 ) % i; + jQuery._data( this, "lastToggle" + fn.guid, lastToggle + 1 ); + + // Make sure that clicks stop + event.preventDefault(); + + // and execute the function + return args[ lastToggle ].apply( this, arguments ) || false; + }; + + // link all the functions, so any of them can unbind this click handler + toggler.guid = guid; + while ( i < args.length ) { + args[ i++ ].guid = guid; + } + + return this.click( toggler ); + }, + + hover: function( fnOver, fnOut ) { + return this.mouseenter( fnOver ).mouseleave( fnOut || fnOver ); + } +}); + +jQuery.each( ("blur focus focusin focusout load resize scroll unload click dblclick " + + "mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave " + + "change select submit keydown keypress keyup error contextmenu").split(" "), function( i, name ) { + + // Handle event binding + jQuery.fn[ name ] = function( data, fn ) { + if ( fn == null ) { + fn = data; + data = null; + } + + return arguments.length > 0 ? + this.on( name, null, data, fn ) : + this.trigger( name ); + }; + + if ( rkeyEvent.test( name ) ) { + jQuery.event.fixHooks[ name ] = jQuery.event.keyHooks; + } + + if ( rmouseEvent.test( name ) ) { + jQuery.event.fixHooks[ name ] = jQuery.event.mouseHooks; + } +}); +/*! + * Sizzle CSS Selector Engine + * Copyright 2012 jQuery Foundation and other contributors + * Released under the MIT license + * http://sizzlejs.com/ + */ +(function( window, undefined ) { + +var cachedruns, + assertGetIdNotName, + Expr, + getText, + isXML, + contains, + compile, + sortOrder, + hasDuplicate, + outermostContext, + + baseHasDuplicate = true, + strundefined = "undefined", + + expando = ( "sizcache" + Math.random() ).replace( ".", "" ), + + Token = String, + document = window.document, + docElem = document.documentElement, + dirruns = 0, + done = 0, + pop = [].pop, + push = [].push, + slice = [].slice, + // Use a stripped-down indexOf if a native one is unavailable + indexOf = [].indexOf || function( elem ) { + var i = 0, + len = this.length; + for ( ; i < len; i++ ) { + if ( this[i] === elem ) { + return i; + } + } + return -1; + }, + + // Augment a function for special use by Sizzle + markFunction = function( fn, value ) { + fn[ expando ] = value == null || value; + return fn; + }, + + createCache = function() { + var cache = {}, + keys = []; + + return markFunction(function( key, value ) { + // Only keep the most recent entries + if ( keys.push( key ) > Expr.cacheLength ) { + delete cache[ keys.shift() ]; + } + + // Retrieve with (key + " ") to avoid collision with native Object.prototype properties (see Issue #157) + return (cache[ key + " " ] = value); + }, cache ); + }, + + classCache = createCache(), + tokenCache = createCache(), + compilerCache = createCache(), + + // Regex + + // Whitespace characters http://www.w3.org/TR/css3-selectors/#whitespace + whitespace = "[\\x20\\t\\r\\n\\f]", + // http://www.w3.org/TR/css3-syntax/#characters + characterEncoding = "(?:\\\\.|[-\\w]|[^\\x00-\\xa0])+", + + // Loosely modeled on CSS identifier characters + // An unquoted value should be a CSS identifier (http://www.w3.org/TR/css3-selectors/#attribute-selectors) + // Proper syntax: http://www.w3.org/TR/CSS21/syndata.html#value-def-identifier + identifier = characterEncoding.replace( "w", "w#" ), + + // Acceptable operators http://www.w3.org/TR/selectors/#attribute-selectors + operators = "([*^$|!~]?=)", + attributes = "\\[" + whitespace + "*(" + characterEncoding + ")" + whitespace + + "*(?:" + operators + whitespace + "*(?:(['\"])((?:\\\\.|[^\\\\])*?)\\3|(" + identifier + ")|)|)" + whitespace + "*\\]", + + // Prefer arguments not in parens/brackets, + // then attribute selectors and non-pseudos (denoted by :), + // then anything else + // These preferences are here to reduce the number of selectors + // needing tokenize in the PSEUDO preFilter + pseudos = ":(" + characterEncoding + ")(?:\\((?:(['\"])((?:\\\\.|[^\\\\])*?)\\2|([^()[\\]]*|(?:(?:" + attributes + ")|[^:]|\\\\.)*|.*))\\)|)", + + // For matchExpr.POS and matchExpr.needsContext + pos = ":(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + whitespace + + "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", + + // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter + rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + whitespace + "+$", "g" ), + + rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), + rcombinators = new RegExp( "^" + whitespace + "*([\\x20\\t\\r\\n\\f>+~])" + whitespace + "*" ), + rpseudo = new RegExp( pseudos ), + + // Easily-parseable/retrievable ID or TAG or CLASS selectors + rquickExpr = /^(?:#([\w\-]+)|(\w+)|\.([\w\-]+))$/, + + rnot = /^:not/, + rsibling = /[\x20\t\r\n\f]*[+~]/, + rendsWithNot = /:not\($/, + + rheader = /h\d/i, + rinputs = /input|select|textarea|button/i, + + rbackslash = /\\(?!\\)/g, + + matchExpr = { + "ID": new RegExp( "^#(" + characterEncoding + ")" ), + "CLASS": new RegExp( "^\\.(" + characterEncoding + ")" ), + "NAME": new RegExp( "^\\[name=['\"]?(" + characterEncoding + ")['\"]?\\]" ), + "TAG": new RegExp( "^(" + characterEncoding.replace( "w", "w*" ) + ")" ), + "ATTR": new RegExp( "^" + attributes ), + "PSEUDO": new RegExp( "^" + pseudos ), + "POS": new RegExp( pos, "i" ), + "CHILD": new RegExp( "^:(only|nth|first|last)-child(?:\\(" + whitespace + + "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + whitespace + + "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), + // For use in libraries implementing .is() + "needsContext": new RegExp( "^" + whitespace + "*[>+~]|" + pos, "i" ) + }, + + // Support + + // Used for testing something on an element + assert = function( fn ) { + var div = document.createElement("div"); + + try { + return fn( div ); + } catch (e) { + return false; + } finally { + // release memory in IE + div = null; + } + }, + + // Check if getElementsByTagName("*") returns only elements + assertTagNameNoComments = assert(function( div ) { + div.appendChild( document.createComment("") ); + return !div.getElementsByTagName("*").length; + }), + + // Check if getAttribute returns normalized href attributes + assertHrefNotNormalized = assert(function( div ) { + div.innerHTML = ""; + return div.firstChild && typeof div.firstChild.getAttribute !== strundefined && + div.firstChild.getAttribute("href") === "#"; + }), + + // Check if attributes should be retrieved by attribute nodes + assertAttributes = assert(function( div ) { + div.innerHTML = ""; + var type = typeof div.lastChild.getAttribute("multiple"); + // IE8 returns a string for some attributes even when not present + return type !== "boolean" && type !== "string"; + }), + + // Check if getElementsByClassName can be trusted + assertUsableClassName = assert(function( div ) { + // Opera can't find a second classname (in 9.6) + div.innerHTML = ""; + if ( !div.getElementsByClassName || !div.getElementsByClassName("e").length ) { + return false; + } + + // Safari 3.2 caches class attributes and doesn't catch changes + div.lastChild.className = "e"; + return div.getElementsByClassName("e").length === 2; + }), + + // Check if getElementById returns elements by name + // Check if getElementsByName privileges form controls or returns elements by ID + assertUsableName = assert(function( div ) { + // Inject content + div.id = expando + 0; + div.innerHTML = "
"; + docElem.insertBefore( div, docElem.firstChild ); + + // Test + var pass = document.getElementsByName && + // buggy browsers will return fewer than the correct 2 + document.getElementsByName( expando ).length === 2 + + // buggy browsers will return more than the correct 0 + document.getElementsByName( expando + 0 ).length; + assertGetIdNotName = !document.getElementById( expando ); + + // Cleanup + docElem.removeChild( div ); + + return pass; + }); + +// If slice is not available, provide a backup +try { + slice.call( docElem.childNodes, 0 )[0].nodeType; +} catch ( e ) { + slice = function( i ) { + var elem, + results = []; + for ( ; (elem = this[i]); i++ ) { + results.push( elem ); + } + return results; + }; +} + +function Sizzle( selector, context, results, seed ) { + results = results || []; + context = context || document; + var match, elem, xml, m, + nodeType = context.nodeType; + + if ( !selector || typeof selector !== "string" ) { + return results; + } + + if ( nodeType !== 1 && nodeType !== 9 ) { + return []; + } + + xml = isXML( context ); + + if ( !xml && !seed ) { + if ( (match = rquickExpr.exec( selector )) ) { + // Speed-up: Sizzle("#ID") + if ( (m = match[1]) ) { + if ( nodeType === 9 ) { + elem = context.getElementById( m ); + // Check parentNode to catch when Blackberry 4.6 returns + // nodes that are no longer in the document #6963 + if ( elem && elem.parentNode ) { + // Handle the case where IE, Opera, and Webkit return items + // by name instead of ID + if ( elem.id === m ) { + results.push( elem ); + return results; + } + } else { + return results; + } + } else { + // Context is not a document + if ( context.ownerDocument && (elem = context.ownerDocument.getElementById( m )) && + contains( context, elem ) && elem.id === m ) { + results.push( elem ); + return results; + } + } + + // Speed-up: Sizzle("TAG") + } else if ( match[2] ) { + push.apply( results, slice.call(context.getElementsByTagName( selector ), 0) ); + return results; + + // Speed-up: Sizzle(".CLASS") + } else if ( (m = match[3]) && assertUsableClassName && context.getElementsByClassName ) { + push.apply( results, slice.call(context.getElementsByClassName( m ), 0) ); + return results; + } + } + } + + // All others + return select( selector.replace( rtrim, "$1" ), context, results, seed, xml ); +} + +Sizzle.matches = function( expr, elements ) { + return Sizzle( expr, null, null, elements ); +}; + +Sizzle.matchesSelector = function( elem, expr ) { + return Sizzle( expr, null, null, [ elem ] ).length > 0; +}; + +// Returns a function to use in pseudos for input types +function createInputPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === type; + }; +} + +// Returns a function to use in pseudos for buttons +function createButtonPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return (name === "input" || name === "button") && elem.type === type; + }; +} + +// Returns a function to use in pseudos for positionals +function createPositionalPseudo( fn ) { + return markFunction(function( argument ) { + argument = +argument; + return markFunction(function( seed, matches ) { + var j, + matchIndexes = fn( [], seed.length, argument ), + i = matchIndexes.length; + + // Match elements found at the specified indexes + while ( i-- ) { + if ( seed[ (j = matchIndexes[i]) ] ) { + seed[j] = !(matches[j] = seed[j]); + } + } + }); + }); +} + +/** + * Utility function for retrieving the text value of an array of DOM nodes + * @param {Array|Element} elem + */ +getText = Sizzle.getText = function( elem ) { + var node, + ret = "", + i = 0, + nodeType = elem.nodeType; + + if ( nodeType ) { + if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { + // Use textContent for elements + // innerText usage removed for consistency of new lines (see #11153) + if ( typeof elem.textContent === "string" ) { + return elem.textContent; + } else { + // Traverse its children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { + ret += getText( elem ); + } + } + } else if ( nodeType === 3 || nodeType === 4 ) { + return elem.nodeValue; + } + // Do not include comment or processing instruction nodes + } else { + + // If no nodeType, this is expected to be an array + for ( ; (node = elem[i]); i++ ) { + // Do not traverse comment nodes + ret += getText( node ); + } + } + return ret; +}; + +isXML = Sizzle.isXML = function( elem ) { + // documentElement is verified for cases where it doesn't yet exist + // (such as loading iframes in IE - #4833) + var documentElement = elem && (elem.ownerDocument || elem).documentElement; + return documentElement ? documentElement.nodeName !== "HTML" : false; +}; + +// Element contains another +contains = Sizzle.contains = docElem.contains ? + function( a, b ) { + var adown = a.nodeType === 9 ? a.documentElement : a, + bup = b && b.parentNode; + return a === bup || !!( bup && bup.nodeType === 1 && adown.contains && adown.contains(bup) ); + } : + docElem.compareDocumentPosition ? + function( a, b ) { + return b && !!( a.compareDocumentPosition( b ) & 16 ); + } : + function( a, b ) { + while ( (b = b.parentNode) ) { + if ( b === a ) { + return true; + } + } + return false; + }; + +Sizzle.attr = function( elem, name ) { + var val, + xml = isXML( elem ); + + if ( !xml ) { + name = name.toLowerCase(); + } + if ( (val = Expr.attrHandle[ name ]) ) { + return val( elem ); + } + if ( xml || assertAttributes ) { + return elem.getAttribute( name ); + } + val = elem.getAttributeNode( name ); + return val ? + typeof elem[ name ] === "boolean" ? + elem[ name ] ? name : null : + val.specified ? val.value : null : + null; +}; + +Expr = Sizzle.selectors = { + + // Can be adjusted by the user + cacheLength: 50, + + createPseudo: markFunction, + + match: matchExpr, + + // IE6/7 return a modified href + attrHandle: assertHrefNotNormalized ? + {} : + { + "href": function( elem ) { + return elem.getAttribute( "href", 2 ); + }, + "type": function( elem ) { + return elem.getAttribute("type"); + } + }, + + find: { + "ID": assertGetIdNotName ? + function( id, context, xml ) { + if ( typeof context.getElementById !== strundefined && !xml ) { + var m = context.getElementById( id ); + // Check parentNode to catch when Blackberry 4.6 returns + // nodes that are no longer in the document #6963 + return m && m.parentNode ? [m] : []; + } + } : + function( id, context, xml ) { + if ( typeof context.getElementById !== strundefined && !xml ) { + var m = context.getElementById( id ); + + return m ? + m.id === id || typeof m.getAttributeNode !== strundefined && m.getAttributeNode("id").value === id ? + [m] : + undefined : + []; + } + }, + + "TAG": assertTagNameNoComments ? + function( tag, context ) { + if ( typeof context.getElementsByTagName !== strundefined ) { + return context.getElementsByTagName( tag ); + } + } : + function( tag, context ) { + var results = context.getElementsByTagName( tag ); + + // Filter out possible comments + if ( tag === "*" ) { + var elem, + tmp = [], + i = 0; + + for ( ; (elem = results[i]); i++ ) { + if ( elem.nodeType === 1 ) { + tmp.push( elem ); + } + } + + return tmp; + } + return results; + }, + + "NAME": assertUsableName && function( tag, context ) { + if ( typeof context.getElementsByName !== strundefined ) { + return context.getElementsByName( name ); + } + }, + + "CLASS": assertUsableClassName && function( className, context, xml ) { + if ( typeof context.getElementsByClassName !== strundefined && !xml ) { + return context.getElementsByClassName( className ); + } + } + }, + + relative: { + ">": { dir: "parentNode", first: true }, + " ": { dir: "parentNode" }, + "+": { dir: "previousSibling", first: true }, + "~": { dir: "previousSibling" } + }, + + preFilter: { + "ATTR": function( match ) { + match[1] = match[1].replace( rbackslash, "" ); + + // Move the given value to match[3] whether quoted or unquoted + match[3] = ( match[4] || match[5] || "" ).replace( rbackslash, "" ); + + if ( match[2] === "~=" ) { + match[3] = " " + match[3] + " "; + } + + return match.slice( 0, 4 ); + }, + + "CHILD": function( match ) { + /* matches from matchExpr["CHILD"] + 1 type (only|nth|...) + 2 argument (even|odd|\d*|\d*n([+-]\d+)?|...) + 3 xn-component of xn+y argument ([+-]?\d*n|) + 4 sign of xn-component + 5 x of xn-component + 6 sign of y-component + 7 y of y-component + */ + match[1] = match[1].toLowerCase(); + + if ( match[1] === "nth" ) { + // nth-child requires argument + if ( !match[2] ) { + Sizzle.error( match[0] ); + } + + // numeric x and y parameters for Expr.filter.CHILD + // remember that false/true cast respectively to 0/1 + match[3] = +( match[3] ? match[4] + (match[5] || 1) : 2 * ( match[2] === "even" || match[2] === "odd" ) ); + match[4] = +( ( match[6] + match[7] ) || match[2] === "odd" ); + + // other types prohibit arguments + } else if ( match[2] ) { + Sizzle.error( match[0] ); + } + + return match; + }, + + "PSEUDO": function( match ) { + var unquoted, excess; + if ( matchExpr["CHILD"].test( match[0] ) ) { + return null; + } + + if ( match[3] ) { + match[2] = match[3]; + } else if ( (unquoted = match[4]) ) { + // Only check arguments that contain a pseudo + if ( rpseudo.test(unquoted) && + // Get excess from tokenize (recursively) + (excess = tokenize( unquoted, true )) && + // advance to the next closing parenthesis + (excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length) ) { + + // excess is a negative index + unquoted = unquoted.slice( 0, excess ); + match[0] = match[0].slice( 0, excess ); + } + match[2] = unquoted; + } + + // Return only captures needed by the pseudo filter method (type and argument) + return match.slice( 0, 3 ); + } + }, + + filter: { + "ID": assertGetIdNotName ? + function( id ) { + id = id.replace( rbackslash, "" ); + return function( elem ) { + return elem.getAttribute("id") === id; + }; + } : + function( id ) { + id = id.replace( rbackslash, "" ); + return function( elem ) { + var node = typeof elem.getAttributeNode !== strundefined && elem.getAttributeNode("id"); + return node && node.value === id; + }; + }, + + "TAG": function( nodeName ) { + if ( nodeName === "*" ) { + return function() { return true; }; + } + nodeName = nodeName.replace( rbackslash, "" ).toLowerCase(); + + return function( elem ) { + return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; + }; + }, + + "CLASS": function( className ) { + var pattern = classCache[ expando ][ className + " " ]; + + return pattern || + (pattern = new RegExp( "(^|" + whitespace + ")" + className + "(" + whitespace + "|$)" )) && + classCache( className, function( elem ) { + return pattern.test( elem.className || (typeof elem.getAttribute !== strundefined && elem.getAttribute("class")) || "" ); + }); + }, + + "ATTR": function( name, operator, check ) { + return function( elem, context ) { + var result = Sizzle.attr( elem, name ); + + if ( result == null ) { + return operator === "!="; + } + if ( !operator ) { + return true; + } + + result += ""; + + return operator === "=" ? result === check : + operator === "!=" ? result !== check : + operator === "^=" ? check && result.indexOf( check ) === 0 : + operator === "*=" ? check && result.indexOf( check ) > -1 : + operator === "$=" ? check && result.substr( result.length - check.length ) === check : + operator === "~=" ? ( " " + result + " " ).indexOf( check ) > -1 : + operator === "|=" ? result === check || result.substr( 0, check.length + 1 ) === check + "-" : + false; + }; + }, + + "CHILD": function( type, argument, first, last ) { + + if ( type === "nth" ) { + return function( elem ) { + var node, diff, + parent = elem.parentNode; + + if ( first === 1 && last === 0 ) { + return true; + } + + if ( parent ) { + diff = 0; + for ( node = parent.firstChild; node; node = node.nextSibling ) { + if ( node.nodeType === 1 ) { + diff++; + if ( elem === node ) { + break; + } + } + } + } + + // Incorporate the offset (or cast to NaN), then check against cycle size + diff -= last; + return diff === first || ( diff % first === 0 && diff / first >= 0 ); + }; + } + + return function( elem ) { + var node = elem; + + switch ( type ) { + case "only": + case "first": + while ( (node = node.previousSibling) ) { + if ( node.nodeType === 1 ) { + return false; + } + } + + if ( type === "first" ) { + return true; + } + + node = elem; + + /* falls through */ + case "last": + while ( (node = node.nextSibling) ) { + if ( node.nodeType === 1 ) { + return false; + } + } + + return true; + } + }; + }, + + "PSEUDO": function( pseudo, argument ) { + // pseudo-class names are case-insensitive + // http://www.w3.org/TR/selectors/#pseudo-classes + // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters + // Remember that setFilters inherits from pseudos + var args, + fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || + Sizzle.error( "unsupported pseudo: " + pseudo ); + + // The user may use createPseudo to indicate that + // arguments are needed to create the filter function + // just as Sizzle does + if ( fn[ expando ] ) { + return fn( argument ); + } + + // But maintain support for old signatures + if ( fn.length > 1 ) { + args = [ pseudo, pseudo, "", argument ]; + return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? + markFunction(function( seed, matches ) { + var idx, + matched = fn( seed, argument ), + i = matched.length; + while ( i-- ) { + idx = indexOf.call( seed, matched[i] ); + seed[ idx ] = !( matches[ idx ] = matched[i] ); + } + }) : + function( elem ) { + return fn( elem, 0, args ); + }; + } + + return fn; + } + }, + + pseudos: { + "not": markFunction(function( selector ) { + // Trim the selector passed to compile + // to avoid treating leading and trailing + // spaces as combinators + var input = [], + results = [], + matcher = compile( selector.replace( rtrim, "$1" ) ); + + return matcher[ expando ] ? + markFunction(function( seed, matches, context, xml ) { + var elem, + unmatched = matcher( seed, null, xml, [] ), + i = seed.length; + + // Match elements unmatched by `matcher` + while ( i-- ) { + if ( (elem = unmatched[i]) ) { + seed[i] = !(matches[i] = elem); + } + } + }) : + function( elem, context, xml ) { + input[0] = elem; + matcher( input, null, xml, results ); + return !results.pop(); + }; + }), + + "has": markFunction(function( selector ) { + return function( elem ) { + return Sizzle( selector, elem ).length > 0; + }; + }), + + "contains": markFunction(function( text ) { + return function( elem ) { + return ( elem.textContent || elem.innerText || getText( elem ) ).indexOf( text ) > -1; + }; + }), + + "enabled": function( elem ) { + return elem.disabled === false; + }, + + "disabled": function( elem ) { + return elem.disabled === true; + }, + + "checked": function( elem ) { + // In CSS3, :checked should return both checked and selected elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + var nodeName = elem.nodeName.toLowerCase(); + return (nodeName === "input" && !!elem.checked) || (nodeName === "option" && !!elem.selected); + }, + + "selected": function( elem ) { + // Accessing this property makes selected-by-default + // options in Safari work properly + if ( elem.parentNode ) { + elem.parentNode.selectedIndex; + } + + return elem.selected === true; + }, + + "parent": function( elem ) { + return !Expr.pseudos["empty"]( elem ); + }, + + "empty": function( elem ) { + // http://www.w3.org/TR/selectors/#empty-pseudo + // :empty is only affected by element nodes and content nodes(including text(3), cdata(4)), + // not comment, processing instructions, or others + // Thanks to Diego Perini for the nodeName shortcut + // Greater than "@" means alpha characters (specifically not starting with "#" or "?") + var nodeType; + elem = elem.firstChild; + while ( elem ) { + if ( elem.nodeName > "@" || (nodeType = elem.nodeType) === 3 || nodeType === 4 ) { + return false; + } + elem = elem.nextSibling; + } + return true; + }, + + "header": function( elem ) { + return rheader.test( elem.nodeName ); + }, + + "text": function( elem ) { + var type, attr; + // IE6 and 7 will map elem.type to 'text' for new HTML5 types (search, etc) + // use getAttribute instead to test this case + return elem.nodeName.toLowerCase() === "input" && + (type = elem.type) === "text" && + ( (attr = elem.getAttribute("type")) == null || attr.toLowerCase() === type ); + }, + + // Input types + "radio": createInputPseudo("radio"), + "checkbox": createInputPseudo("checkbox"), + "file": createInputPseudo("file"), + "password": createInputPseudo("password"), + "image": createInputPseudo("image"), + + "submit": createButtonPseudo("submit"), + "reset": createButtonPseudo("reset"), + + "button": function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === "button" || name === "button"; + }, + + "input": function( elem ) { + return rinputs.test( elem.nodeName ); + }, + + "focus": function( elem ) { + var doc = elem.ownerDocument; + return elem === doc.activeElement && (!doc.hasFocus || doc.hasFocus()) && !!(elem.type || elem.href || ~elem.tabIndex); + }, + + "active": function( elem ) { + return elem === elem.ownerDocument.activeElement; + }, + + // Positional types + "first": createPositionalPseudo(function() { + return [ 0 ]; + }), + + "last": createPositionalPseudo(function( matchIndexes, length ) { + return [ length - 1 ]; + }), + + "eq": createPositionalPseudo(function( matchIndexes, length, argument ) { + return [ argument < 0 ? argument + length : argument ]; + }), + + "even": createPositionalPseudo(function( matchIndexes, length ) { + for ( var i = 0; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + }), + + "odd": createPositionalPseudo(function( matchIndexes, length ) { + for ( var i = 1; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + }), + + "lt": createPositionalPseudo(function( matchIndexes, length, argument ) { + for ( var i = argument < 0 ? argument + length : argument; --i >= 0; ) { + matchIndexes.push( i ); + } + return matchIndexes; + }), + + "gt": createPositionalPseudo(function( matchIndexes, length, argument ) { + for ( var i = argument < 0 ? argument + length : argument; ++i < length; ) { + matchIndexes.push( i ); + } + return matchIndexes; + }) + } +}; + +function siblingCheck( a, b, ret ) { + if ( a === b ) { + return ret; + } + + var cur = a.nextSibling; + + while ( cur ) { + if ( cur === b ) { + return -1; + } + + cur = cur.nextSibling; + } + + return 1; +} + +sortOrder = docElem.compareDocumentPosition ? + function( a, b ) { + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + return ( !a.compareDocumentPosition || !b.compareDocumentPosition ? + a.compareDocumentPosition : + a.compareDocumentPosition(b) & 4 + ) ? -1 : 1; + } : + function( a, b ) { + // The nodes are identical, we can exit early + if ( a === b ) { + hasDuplicate = true; + return 0; + + // Fallback to using sourceIndex (in IE) if it's available on both nodes + } else if ( a.sourceIndex && b.sourceIndex ) { + return a.sourceIndex - b.sourceIndex; + } + + var al, bl, + ap = [], + bp = [], + aup = a.parentNode, + bup = b.parentNode, + cur = aup; + + // If the nodes are siblings (or identical) we can do a quick check + if ( aup === bup ) { + return siblingCheck( a, b ); + + // If no parents were found then the nodes are disconnected + } else if ( !aup ) { + return -1; + + } else if ( !bup ) { + return 1; + } + + // Otherwise they're somewhere else in the tree so we need + // to build up a full list of the parentNodes for comparison + while ( cur ) { + ap.unshift( cur ); + cur = cur.parentNode; + } + + cur = bup; + + while ( cur ) { + bp.unshift( cur ); + cur = cur.parentNode; + } + + al = ap.length; + bl = bp.length; + + // Start walking down the tree looking for a discrepancy + for ( var i = 0; i < al && i < bl; i++ ) { + if ( ap[i] !== bp[i] ) { + return siblingCheck( ap[i], bp[i] ); + } + } + + // We ended someplace up the tree so do a sibling check + return i === al ? + siblingCheck( a, bp[i], -1 ) : + siblingCheck( ap[i], b, 1 ); + }; + +// Always assume the presence of duplicates if sort doesn't +// pass them to our comparison function (as in Google Chrome). +[0, 0].sort( sortOrder ); +baseHasDuplicate = !hasDuplicate; + +// Document sorting and removing duplicates +Sizzle.uniqueSort = function( results ) { + var elem, + duplicates = [], + i = 1, + j = 0; + + hasDuplicate = baseHasDuplicate; + results.sort( sortOrder ); + + if ( hasDuplicate ) { + for ( ; (elem = results[i]); i++ ) { + if ( elem === results[ i - 1 ] ) { + j = duplicates.push( i ); + } + } + while ( j-- ) { + results.splice( duplicates[ j ], 1 ); + } + } + + return results; +}; + +Sizzle.error = function( msg ) { + throw new Error( "Syntax error, unrecognized expression: " + msg ); +}; + +function tokenize( selector, parseOnly ) { + var matched, match, tokens, type, + soFar, groups, preFilters, + cached = tokenCache[ expando ][ selector + " " ]; + + if ( cached ) { + return parseOnly ? 0 : cached.slice( 0 ); + } + + soFar = selector; + groups = []; + preFilters = Expr.preFilter; + + while ( soFar ) { + + // Comma and first run + if ( !matched || (match = rcomma.exec( soFar )) ) { + if ( match ) { + // Don't consume trailing commas as valid + soFar = soFar.slice( match[0].length ) || soFar; + } + groups.push( tokens = [] ); + } + + matched = false; + + // Combinators + if ( (match = rcombinators.exec( soFar )) ) { + tokens.push( matched = new Token( match.shift() ) ); + soFar = soFar.slice( matched.length ); + + // Cast descendant combinators to space + matched.type = match[0].replace( rtrim, " " ); + } + + // Filters + for ( type in Expr.filter ) { + if ( (match = matchExpr[ type ].exec( soFar )) && (!preFilters[ type ] || + (match = preFilters[ type ]( match ))) ) { + + tokens.push( matched = new Token( match.shift() ) ); + soFar = soFar.slice( matched.length ); + matched.type = type; + matched.matches = match; + } + } + + if ( !matched ) { + break; + } + } + + // Return the length of the invalid excess + // if we're just parsing + // Otherwise, throw an error or return tokens + return parseOnly ? + soFar.length : + soFar ? + Sizzle.error( selector ) : + // Cache the tokens + tokenCache( selector, groups ).slice( 0 ); +} + +function addCombinator( matcher, combinator, base ) { + var dir = combinator.dir, + checkNonElements = base && combinator.dir === "parentNode", + doneName = done++; + + return combinator.first ? + // Check against closest ancestor/preceding element + function( elem, context, xml ) { + while ( (elem = elem[ dir ]) ) { + if ( checkNonElements || elem.nodeType === 1 ) { + return matcher( elem, context, xml ); + } + } + } : + + // Check against all ancestor/preceding elements + function( elem, context, xml ) { + // We can't set arbitrary data on XML nodes, so they don't benefit from dir caching + if ( !xml ) { + var cache, + dirkey = dirruns + " " + doneName + " ", + cachedkey = dirkey + cachedruns; + while ( (elem = elem[ dir ]) ) { + if ( checkNonElements || elem.nodeType === 1 ) { + if ( (cache = elem[ expando ]) === cachedkey ) { + return elem.sizset; + } else if ( typeof cache === "string" && cache.indexOf(dirkey) === 0 ) { + if ( elem.sizset ) { + return elem; + } + } else { + elem[ expando ] = cachedkey; + if ( matcher( elem, context, xml ) ) { + elem.sizset = true; + return elem; + } + elem.sizset = false; + } + } + } + } else { + while ( (elem = elem[ dir ]) ) { + if ( checkNonElements || elem.nodeType === 1 ) { + if ( matcher( elem, context, xml ) ) { + return elem; + } + } + } + } + }; +} + +function elementMatcher( matchers ) { + return matchers.length > 1 ? + function( elem, context, xml ) { + var i = matchers.length; + while ( i-- ) { + if ( !matchers[i]( elem, context, xml ) ) { + return false; + } + } + return true; + } : + matchers[0]; +} + +function condense( unmatched, map, filter, context, xml ) { + var elem, + newUnmatched = [], + i = 0, + len = unmatched.length, + mapped = map != null; + + for ( ; i < len; i++ ) { + if ( (elem = unmatched[i]) ) { + if ( !filter || filter( elem, context, xml ) ) { + newUnmatched.push( elem ); + if ( mapped ) { + map.push( i ); + } + } + } + } + + return newUnmatched; +} + +function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { + if ( postFilter && !postFilter[ expando ] ) { + postFilter = setMatcher( postFilter ); + } + if ( postFinder && !postFinder[ expando ] ) { + postFinder = setMatcher( postFinder, postSelector ); + } + return markFunction(function( seed, results, context, xml ) { + var temp, i, elem, + preMap = [], + postMap = [], + preexisting = results.length, + + // Get initial elements from seed or context + elems = seed || multipleContexts( selector || "*", context.nodeType ? [ context ] : context, [] ), + + // Prefilter to get matcher input, preserving a map for seed-results synchronization + matcherIn = preFilter && ( seed || !selector ) ? + condense( elems, preMap, preFilter, context, xml ) : + elems, + + matcherOut = matcher ? + // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, + postFinder || ( seed ? preFilter : preexisting || postFilter ) ? + + // ...intermediate processing is necessary + [] : + + // ...otherwise use results directly + results : + matcherIn; + + // Find primary matches + if ( matcher ) { + matcher( matcherIn, matcherOut, context, xml ); + } + + // Apply postFilter + if ( postFilter ) { + temp = condense( matcherOut, postMap ); + postFilter( temp, [], context, xml ); + + // Un-match failing elements by moving them back to matcherIn + i = temp.length; + while ( i-- ) { + if ( (elem = temp[i]) ) { + matcherOut[ postMap[i] ] = !(matcherIn[ postMap[i] ] = elem); + } + } + } + + if ( seed ) { + if ( postFinder || preFilter ) { + if ( postFinder ) { + // Get the final matcherOut by condensing this intermediate into postFinder contexts + temp = []; + i = matcherOut.length; + while ( i-- ) { + if ( (elem = matcherOut[i]) ) { + // Restore matcherIn since elem is not yet a final match + temp.push( (matcherIn[i] = elem) ); + } + } + postFinder( null, (matcherOut = []), temp, xml ); + } + + // Move matched elements from seed to results to keep them synchronized + i = matcherOut.length; + while ( i-- ) { + if ( (elem = matcherOut[i]) && + (temp = postFinder ? indexOf.call( seed, elem ) : preMap[i]) > -1 ) { + + seed[temp] = !(results[temp] = elem); + } + } + } + + // Add elements to results, through postFinder if defined + } else { + matcherOut = condense( + matcherOut === results ? + matcherOut.splice( preexisting, matcherOut.length ) : + matcherOut + ); + if ( postFinder ) { + postFinder( null, results, matcherOut, xml ); + } else { + push.apply( results, matcherOut ); + } + } + }); +} + +function matcherFromTokens( tokens ) { + var checkContext, matcher, j, + len = tokens.length, + leadingRelative = Expr.relative[ tokens[0].type ], + implicitRelative = leadingRelative || Expr.relative[" "], + i = leadingRelative ? 1 : 0, + + // The foundational matcher ensures that elements are reachable from top-level context(s) + matchContext = addCombinator( function( elem ) { + return elem === checkContext; + }, implicitRelative, true ), + matchAnyContext = addCombinator( function( elem ) { + return indexOf.call( checkContext, elem ) > -1; + }, implicitRelative, true ), + matchers = [ function( elem, context, xml ) { + return ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( + (checkContext = context).nodeType ? + matchContext( elem, context, xml ) : + matchAnyContext( elem, context, xml ) ); + } ]; + + for ( ; i < len; i++ ) { + if ( (matcher = Expr.relative[ tokens[i].type ]) ) { + matchers = [ addCombinator( elementMatcher( matchers ), matcher ) ]; + } else { + matcher = Expr.filter[ tokens[i].type ].apply( null, tokens[i].matches ); + + // Return special upon seeing a positional matcher + if ( matcher[ expando ] ) { + // Find the next relative operator (if any) for proper handling + j = ++i; + for ( ; j < len; j++ ) { + if ( Expr.relative[ tokens[j].type ] ) { + break; + } + } + return setMatcher( + i > 1 && elementMatcher( matchers ), + i > 1 && tokens.slice( 0, i - 1 ).join("").replace( rtrim, "$1" ), + matcher, + i < j && matcherFromTokens( tokens.slice( i, j ) ), + j < len && matcherFromTokens( (tokens = tokens.slice( j )) ), + j < len && tokens.join("") + ); + } + matchers.push( matcher ); + } + } + + return elementMatcher( matchers ); +} + +function matcherFromGroupMatchers( elementMatchers, setMatchers ) { + var bySet = setMatchers.length > 0, + byElement = elementMatchers.length > 0, + superMatcher = function( seed, context, xml, results, expandContext ) { + var elem, j, matcher, + setMatched = [], + matchedCount = 0, + i = "0", + unmatched = seed && [], + outermost = expandContext != null, + contextBackup = outermostContext, + // We must always have either seed elements or context + elems = seed || byElement && Expr.find["TAG"]( "*", expandContext && context.parentNode || context ), + // Nested matchers should use non-integer dirruns + dirrunsUnique = (dirruns += contextBackup == null ? 1 : Math.E); + + if ( outermost ) { + outermostContext = context !== document && context; + cachedruns = superMatcher.el; + } + + // Add elements passing elementMatchers directly to results + for ( ; (elem = elems[i]) != null; i++ ) { + if ( byElement && elem ) { + for ( j = 0; (matcher = elementMatchers[j]); j++ ) { + if ( matcher( elem, context, xml ) ) { + results.push( elem ); + break; + } + } + if ( outermost ) { + dirruns = dirrunsUnique; + cachedruns = ++superMatcher.el; + } + } + + // Track unmatched elements for set filters + if ( bySet ) { + // They will have gone through all possible matchers + if ( (elem = !matcher && elem) ) { + matchedCount--; + } + + // Lengthen the array for every element, matched or not + if ( seed ) { + unmatched.push( elem ); + } + } + } + + // Apply set filters to unmatched elements + matchedCount += i; + if ( bySet && i !== matchedCount ) { + for ( j = 0; (matcher = setMatchers[j]); j++ ) { + matcher( unmatched, setMatched, context, xml ); + } + + if ( seed ) { + // Reintegrate element matches to eliminate the need for sorting + if ( matchedCount > 0 ) { + while ( i-- ) { + if ( !(unmatched[i] || setMatched[i]) ) { + setMatched[i] = pop.call( results ); + } + } + } + + // Discard index placeholder values to get only actual matches + setMatched = condense( setMatched ); + } + + // Add matches to results + push.apply( results, setMatched ); + + // Seedless set matches succeeding multiple successful matchers stipulate sorting + if ( outermost && !seed && setMatched.length > 0 && + ( matchedCount + setMatchers.length ) > 1 ) { + + Sizzle.uniqueSort( results ); + } + } + + // Override manipulation of globals by nested matchers + if ( outermost ) { + dirruns = dirrunsUnique; + outermostContext = contextBackup; + } + + return unmatched; + }; + + superMatcher.el = 0; + return bySet ? + markFunction( superMatcher ) : + superMatcher; +} + +compile = Sizzle.compile = function( selector, group /* Internal Use Only */ ) { + var i, + setMatchers = [], + elementMatchers = [], + cached = compilerCache[ expando ][ selector + " " ]; + + if ( !cached ) { + // Generate a function of recursive functions that can be used to check each element + if ( !group ) { + group = tokenize( selector ); + } + i = group.length; + while ( i-- ) { + cached = matcherFromTokens( group[i] ); + if ( cached[ expando ] ) { + setMatchers.push( cached ); + } else { + elementMatchers.push( cached ); + } + } + + // Cache the compiled function + cached = compilerCache( selector, matcherFromGroupMatchers( elementMatchers, setMatchers ) ); + } + return cached; +}; + +function multipleContexts( selector, contexts, results ) { + var i = 0, + len = contexts.length; + for ( ; i < len; i++ ) { + Sizzle( selector, contexts[i], results ); + } + return results; +} + +function select( selector, context, results, seed, xml ) { + var i, tokens, token, type, find, + match = tokenize( selector ), + j = match.length; + + if ( !seed ) { + // Try to minimize operations if there is only one group + if ( match.length === 1 ) { + + // Take a shortcut and set the context if the root selector is an ID + tokens = match[0] = match[0].slice( 0 ); + if ( tokens.length > 2 && (token = tokens[0]).type === "ID" && + context.nodeType === 9 && !xml && + Expr.relative[ tokens[1].type ] ) { + + context = Expr.find["ID"]( token.matches[0].replace( rbackslash, "" ), context, xml )[0]; + if ( !context ) { + return results; + } + + selector = selector.slice( tokens.shift().length ); + } + + // Fetch a seed set for right-to-left matching + for ( i = matchExpr["POS"].test( selector ) ? -1 : tokens.length - 1; i >= 0; i-- ) { + token = tokens[i]; + + // Abort if we hit a combinator + if ( Expr.relative[ (type = token.type) ] ) { + break; + } + if ( (find = Expr.find[ type ]) ) { + // Search, expanding context for leading sibling combinators + if ( (seed = find( + token.matches[0].replace( rbackslash, "" ), + rsibling.test( tokens[0].type ) && context.parentNode || context, + xml + )) ) { + + // If seed is empty or no tokens remain, we can return early + tokens.splice( i, 1 ); + selector = seed.length && tokens.join(""); + if ( !selector ) { + push.apply( results, slice.call( seed, 0 ) ); + return results; + } + + break; + } + } + } + } + } + + // Compile and execute a filtering function + // Provide `match` to avoid retokenization if we modified the selector above + compile( selector, match )( + seed, + context, + xml, + results, + rsibling.test( selector ) + ); + return results; +} + +if ( document.querySelectorAll ) { + (function() { + var disconnectedMatch, + oldSelect = select, + rescape = /'|\\/g, + rattributeQuotes = /\=[\x20\t\r\n\f]*([^'"\]]*)[\x20\t\r\n\f]*\]/g, + + // qSa(:focus) reports false when true (Chrome 21), no need to also add to buggyMatches since matches checks buggyQSA + // A support test would require too much code (would include document ready) + rbuggyQSA = [ ":focus" ], + + // matchesSelector(:active) reports false when true (IE9/Opera 11.5) + // A support test would require too much code (would include document ready) + // just skip matchesSelector for :active + rbuggyMatches = [ ":active" ], + matches = docElem.matchesSelector || + docElem.mozMatchesSelector || + docElem.webkitMatchesSelector || + docElem.oMatchesSelector || + docElem.msMatchesSelector; + + // Build QSA regex + // Regex strategy adopted from Diego Perini + assert(function( div ) { + // Select is set to empty string on purpose + // This is to test IE's treatment of not explictly + // setting a boolean content attribute, + // since its presence should be enough + // http://bugs.jquery.com/ticket/12359 + div.innerHTML = ""; + + // IE8 - Some boolean attributes are not treated correctly + if ( !div.querySelectorAll("[selected]").length ) { + rbuggyQSA.push( "\\[" + whitespace + "*(?:checked|disabled|ismap|multiple|readonly|selected|value)" ); + } + + // Webkit/Opera - :checked should return selected option elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + // IE8 throws error here (do not put tests after this one) + if ( !div.querySelectorAll(":checked").length ) { + rbuggyQSA.push(":checked"); + } + }); + + assert(function( div ) { + + // Opera 10-12/IE9 - ^= $= *= and empty values + // Should not select anything + div.innerHTML = "

"; + if ( div.querySelectorAll("[test^='']").length ) { + rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:\"\"|'')" ); + } + + // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) + // IE8 throws error here (do not put tests after this one) + div.innerHTML = ""; + if ( !div.querySelectorAll(":enabled").length ) { + rbuggyQSA.push(":enabled", ":disabled"); + } + }); + + // rbuggyQSA always contains :focus, so no need for a length check + rbuggyQSA = /* rbuggyQSA.length && */ new RegExp( rbuggyQSA.join("|") ); + + select = function( selector, context, results, seed, xml ) { + // Only use querySelectorAll when not filtering, + // when this is not xml, + // and when no QSA bugs apply + if ( !seed && !xml && !rbuggyQSA.test( selector ) ) { + var groups, i, + old = true, + nid = expando, + newContext = context, + newSelector = context.nodeType === 9 && selector; + + // qSA works strangely on Element-rooted queries + // We can work around this by specifying an extra ID on the root + // and working up from there (Thanks to Andrew Dupont for the technique) + // IE 8 doesn't work on object elements + if ( context.nodeType === 1 && context.nodeName.toLowerCase() !== "object" ) { + groups = tokenize( selector ); + + if ( (old = context.getAttribute("id")) ) { + nid = old.replace( rescape, "\\$&" ); + } else { + context.setAttribute( "id", nid ); + } + nid = "[id='" + nid + "'] "; + + i = groups.length; + while ( i-- ) { + groups[i] = nid + groups[i].join(""); + } + newContext = rsibling.test( selector ) && context.parentNode || context; + newSelector = groups.join(","); + } + + if ( newSelector ) { + try { + push.apply( results, slice.call( newContext.querySelectorAll( + newSelector + ), 0 ) ); + return results; + } catch(qsaError) { + } finally { + if ( !old ) { + context.removeAttribute("id"); + } + } + } + } + + return oldSelect( selector, context, results, seed, xml ); + }; + + if ( matches ) { + assert(function( div ) { + // Check to see if it's possible to do matchesSelector + // on a disconnected node (IE 9) + disconnectedMatch = matches.call( div, "div" ); + + // This should fail with an exception + // Gecko does not error, returns false instead + try { + matches.call( div, "[test!='']:sizzle" ); + rbuggyMatches.push( "!=", pseudos ); + } catch ( e ) {} + }); + + // rbuggyMatches always contains :active and :focus, so no need for a length check + rbuggyMatches = /* rbuggyMatches.length && */ new RegExp( rbuggyMatches.join("|") ); + + Sizzle.matchesSelector = function( elem, expr ) { + // Make sure that attribute selectors are quoted + expr = expr.replace( rattributeQuotes, "='$1']" ); + + // rbuggyMatches always contains :active, so no need for an existence check + if ( !isXML( elem ) && !rbuggyMatches.test( expr ) && !rbuggyQSA.test( expr ) ) { + try { + var ret = matches.call( elem, expr ); + + // IE 9's matchesSelector returns false on disconnected nodes + if ( ret || disconnectedMatch || + // As well, disconnected nodes are said to be in a document + // fragment in IE 9 + elem.document && elem.document.nodeType !== 11 ) { + return ret; + } + } catch(e) {} + } + + return Sizzle( expr, null, null, [ elem ] ).length > 0; + }; + } + })(); +} + +// Deprecated +Expr.pseudos["nth"] = Expr.pseudos["eq"]; + +// Back-compat +function setFilters() {} +Expr.filters = setFilters.prototype = Expr.pseudos; +Expr.setFilters = new setFilters(); + +// Override sizzle attribute retrieval +Sizzle.attr = jQuery.attr; +jQuery.find = Sizzle; +jQuery.expr = Sizzle.selectors; +jQuery.expr[":"] = jQuery.expr.pseudos; +jQuery.unique = Sizzle.uniqueSort; +jQuery.text = Sizzle.getText; +jQuery.isXMLDoc = Sizzle.isXML; +jQuery.contains = Sizzle.contains; + + +})( window ); +var runtil = /Until$/, + rparentsprev = /^(?:parents|prev(?:Until|All))/, + isSimple = /^.[^:#\[\.,]*$/, + rneedsContext = jQuery.expr.match.needsContext, + // methods guaranteed to produce a unique set when starting from a unique set + guaranteedUnique = { + children: true, + contents: true, + next: true, + prev: true + }; + +jQuery.fn.extend({ + find: function( selector ) { + var i, l, length, n, r, ret, + self = this; + + if ( typeof selector !== "string" ) { + return jQuery( selector ).filter(function() { + for ( i = 0, l = self.length; i < l; i++ ) { + if ( jQuery.contains( self[ i ], this ) ) { + return true; + } + } + }); + } + + ret = this.pushStack( "", "find", selector ); + + for ( i = 0, l = this.length; i < l; i++ ) { + length = ret.length; + jQuery.find( selector, this[i], ret ); + + if ( i > 0 ) { + // Make sure that the results are unique + for ( n = length; n < ret.length; n++ ) { + for ( r = 0; r < length; r++ ) { + if ( ret[r] === ret[n] ) { + ret.splice(n--, 1); + break; + } + } + } + } + } + + return ret; + }, + + has: function( target ) { + var i, + targets = jQuery( target, this ), + len = targets.length; + + return this.filter(function() { + for ( i = 0; i < len; i++ ) { + if ( jQuery.contains( this, targets[i] ) ) { + return true; + } + } + }); + }, + + not: function( selector ) { + return this.pushStack( winnow(this, selector, false), "not", selector); + }, + + filter: function( selector ) { + return this.pushStack( winnow(this, selector, true), "filter", selector ); + }, + + is: function( selector ) { + return !!selector && ( + typeof selector === "string" ? + // If this is a positional/relative selector, check membership in the returned set + // so $("p:first").is("p:last") won't return true for a doc with two "p". + rneedsContext.test( selector ) ? + jQuery( selector, this.context ).index( this[0] ) >= 0 : + jQuery.filter( selector, this ).length > 0 : + this.filter( selector ).length > 0 ); + }, + + closest: function( selectors, context ) { + var cur, + i = 0, + l = this.length, + ret = [], + pos = rneedsContext.test( selectors ) || typeof selectors !== "string" ? + jQuery( selectors, context || this.context ) : + 0; + + for ( ; i < l; i++ ) { + cur = this[i]; + + while ( cur && cur.ownerDocument && cur !== context && cur.nodeType !== 11 ) { + if ( pos ? pos.index(cur) > -1 : jQuery.find.matchesSelector(cur, selectors) ) { + ret.push( cur ); + break; + } + cur = cur.parentNode; + } + } + + ret = ret.length > 1 ? jQuery.unique( ret ) : ret; + + return this.pushStack( ret, "closest", selectors ); + }, + + // Determine the position of an element within + // the matched set of elements + index: function( elem ) { + + // No argument, return index in parent + if ( !elem ) { + return ( this[0] && this[0].parentNode ) ? this.prevAll().length : -1; + } + + // index in selector + if ( typeof elem === "string" ) { + return jQuery.inArray( this[0], jQuery( elem ) ); + } + + // Locate the position of the desired element + return jQuery.inArray( + // If it receives a jQuery object, the first element is used + elem.jquery ? elem[0] : elem, this ); + }, + + add: function( selector, context ) { + var set = typeof selector === "string" ? + jQuery( selector, context ) : + jQuery.makeArray( selector && selector.nodeType ? [ selector ] : selector ), + all = jQuery.merge( this.get(), set ); + + return this.pushStack( isDisconnected( set[0] ) || isDisconnected( all[0] ) ? + all : + jQuery.unique( all ) ); + }, + + addBack: function( selector ) { + return this.add( selector == null ? + this.prevObject : this.prevObject.filter(selector) + ); + } +}); + +jQuery.fn.andSelf = jQuery.fn.addBack; + +// A painfully simple check to see if an element is disconnected +// from a document (should be improved, where feasible). +function isDisconnected( node ) { + return !node || !node.parentNode || node.parentNode.nodeType === 11; +} + +function sibling( cur, dir ) { + do { + cur = cur[ dir ]; + } while ( cur && cur.nodeType !== 1 ); + + return cur; +} + +jQuery.each({ + parent: function( elem ) { + var parent = elem.parentNode; + return parent && parent.nodeType !== 11 ? parent : null; + }, + parents: function( elem ) { + return jQuery.dir( elem, "parentNode" ); + }, + parentsUntil: function( elem, i, until ) { + return jQuery.dir( elem, "parentNode", until ); + }, + next: function( elem ) { + return sibling( elem, "nextSibling" ); + }, + prev: function( elem ) { + return sibling( elem, "previousSibling" ); + }, + nextAll: function( elem ) { + return jQuery.dir( elem, "nextSibling" ); + }, + prevAll: function( elem ) { + return jQuery.dir( elem, "previousSibling" ); + }, + nextUntil: function( elem, i, until ) { + return jQuery.dir( elem, "nextSibling", until ); + }, + prevUntil: function( elem, i, until ) { + return jQuery.dir( elem, "previousSibling", until ); + }, + siblings: function( elem ) { + return jQuery.sibling( ( elem.parentNode || {} ).firstChild, elem ); + }, + children: function( elem ) { + return jQuery.sibling( elem.firstChild ); + }, + contents: function( elem ) { + return jQuery.nodeName( elem, "iframe" ) ? + elem.contentDocument || elem.contentWindow.document : + jQuery.merge( [], elem.childNodes ); + } +}, function( name, fn ) { + jQuery.fn[ name ] = function( until, selector ) { + var ret = jQuery.map( this, fn, until ); + + if ( !runtil.test( name ) ) { + selector = until; + } + + if ( selector && typeof selector === "string" ) { + ret = jQuery.filter( selector, ret ); + } + + ret = this.length > 1 && !guaranteedUnique[ name ] ? jQuery.unique( ret ) : ret; + + if ( this.length > 1 && rparentsprev.test( name ) ) { + ret = ret.reverse(); + } + + return this.pushStack( ret, name, core_slice.call( arguments ).join(",") ); + }; +}); + +jQuery.extend({ + filter: function( expr, elems, not ) { + if ( not ) { + expr = ":not(" + expr + ")"; + } + + return elems.length === 1 ? + jQuery.find.matchesSelector(elems[0], expr) ? [ elems[0] ] : [] : + jQuery.find.matches(expr, elems); + }, + + dir: function( elem, dir, until ) { + var matched = [], + cur = elem[ dir ]; + + while ( cur && cur.nodeType !== 9 && (until === undefined || cur.nodeType !== 1 || !jQuery( cur ).is( until )) ) { + if ( cur.nodeType === 1 ) { + matched.push( cur ); + } + cur = cur[dir]; + } + return matched; + }, + + sibling: function( n, elem ) { + var r = []; + + for ( ; n; n = n.nextSibling ) { + if ( n.nodeType === 1 && n !== elem ) { + r.push( n ); + } + } + + return r; + } +}); + +// Implement the identical functionality for filter and not +function winnow( elements, qualifier, keep ) { + + // Can't pass null or undefined to indexOf in Firefox 4 + // Set to 0 to skip string check + qualifier = qualifier || 0; + + if ( jQuery.isFunction( qualifier ) ) { + return jQuery.grep(elements, function( elem, i ) { + var retVal = !!qualifier.call( elem, i, elem ); + return retVal === keep; + }); + + } else if ( qualifier.nodeType ) { + return jQuery.grep(elements, function( elem, i ) { + return ( elem === qualifier ) === keep; + }); + + } else if ( typeof qualifier === "string" ) { + var filtered = jQuery.grep(elements, function( elem ) { + return elem.nodeType === 1; + }); + + if ( isSimple.test( qualifier ) ) { + return jQuery.filter(qualifier, filtered, !keep); + } else { + qualifier = jQuery.filter( qualifier, filtered ); + } + } + + return jQuery.grep(elements, function( elem, i ) { + return ( jQuery.inArray( elem, qualifier ) >= 0 ) === keep; + }); +} +function createSafeFragment( document ) { + var list = nodeNames.split( "|" ), + safeFrag = document.createDocumentFragment(); + + if ( safeFrag.createElement ) { + while ( list.length ) { + safeFrag.createElement( + list.pop() + ); + } + } + return safeFrag; +} + +var nodeNames = "abbr|article|aside|audio|bdi|canvas|data|datalist|details|figcaption|figure|footer|" + + "header|hgroup|mark|meter|nav|output|progress|section|summary|time|video", + rinlinejQuery = / jQuery\d+="(?:null|\d+)"/g, + rleadingWhitespace = /^\s+/, + rxhtmlTag = /<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/gi, + rtagName = /<([\w:]+)/, + rtbody = /]", "i"), + rcheckableType = /^(?:checkbox|radio)$/, + // checked="checked" or checked + rchecked = /checked\s*(?:[^=]|=\s*.checked.)/i, + rscriptType = /\/(java|ecma)script/i, + rcleanScript = /^\s*\s*$/g, + wrapMap = { + option: [ 1, "" ], + legend: [ 1, "
", "
" ], + thead: [ 1, "", "
" ], + tr: [ 2, "", "
" ], + td: [ 3, "", "
" ], + col: [ 2, "", "
" ], + area: [ 1, "", "" ], + _default: [ 0, "", "" ] + }, + safeFragment = createSafeFragment( document ), + fragmentDiv = safeFragment.appendChild( document.createElement("div") ); + +wrapMap.optgroup = wrapMap.option; +wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; +wrapMap.th = wrapMap.td; + +// IE6-8 can't serialize link, script, style, or any html5 (NoScope) tags, +// unless wrapped in a div with non-breaking characters in front of it. +if ( !jQuery.support.htmlSerialize ) { + wrapMap._default = [ 1, "X
", "
" ]; +} + +jQuery.fn.extend({ + text: function( value ) { + return jQuery.access( this, function( value ) { + return value === undefined ? + jQuery.text( this ) : + this.empty().append( ( this[0] && this[0].ownerDocument || document ).createTextNode( value ) ); + }, null, value, arguments.length ); + }, + + wrapAll: function( html ) { + if ( jQuery.isFunction( html ) ) { + return this.each(function(i) { + jQuery(this).wrapAll( html.call(this, i) ); + }); + } + + if ( this[0] ) { + // The elements to wrap the target around + var wrap = jQuery( html, this[0].ownerDocument ).eq(0).clone(true); + + if ( this[0].parentNode ) { + wrap.insertBefore( this[0] ); + } + + wrap.map(function() { + var elem = this; + + while ( elem.firstChild && elem.firstChild.nodeType === 1 ) { + elem = elem.firstChild; + } + + return elem; + }).append( this ); + } + + return this; + }, + + wrapInner: function( html ) { + if ( jQuery.isFunction( html ) ) { + return this.each(function(i) { + jQuery(this).wrapInner( html.call(this, i) ); + }); + } + + return this.each(function() { + var self = jQuery( this ), + contents = self.contents(); + + if ( contents.length ) { + contents.wrapAll( html ); + + } else { + self.append( html ); + } + }); + }, + + wrap: function( html ) { + var isFunction = jQuery.isFunction( html ); + + return this.each(function(i) { + jQuery( this ).wrapAll( isFunction ? html.call(this, i) : html ); + }); + }, + + unwrap: function() { + return this.parent().each(function() { + if ( !jQuery.nodeName( this, "body" ) ) { + jQuery( this ).replaceWith( this.childNodes ); + } + }).end(); + }, + + append: function() { + return this.domManip(arguments, true, function( elem ) { + if ( this.nodeType === 1 || this.nodeType === 11 ) { + this.appendChild( elem ); + } + }); + }, + + prepend: function() { + return this.domManip(arguments, true, function( elem ) { + if ( this.nodeType === 1 || this.nodeType === 11 ) { + this.insertBefore( elem, this.firstChild ); + } + }); + }, + + before: function() { + if ( !isDisconnected( this[0] ) ) { + return this.domManip(arguments, false, function( elem ) { + this.parentNode.insertBefore( elem, this ); + }); + } + + if ( arguments.length ) { + var set = jQuery.clean( arguments ); + return this.pushStack( jQuery.merge( set, this ), "before", this.selector ); + } + }, + + after: function() { + if ( !isDisconnected( this[0] ) ) { + return this.domManip(arguments, false, function( elem ) { + this.parentNode.insertBefore( elem, this.nextSibling ); + }); + } + + if ( arguments.length ) { + var set = jQuery.clean( arguments ); + return this.pushStack( jQuery.merge( this, set ), "after", this.selector ); + } + }, + + // keepData is for internal use only--do not document + remove: function( selector, keepData ) { + var elem, + i = 0; + + for ( ; (elem = this[i]) != null; i++ ) { + if ( !selector || jQuery.filter( selector, [ elem ] ).length ) { + if ( !keepData && elem.nodeType === 1 ) { + jQuery.cleanData( elem.getElementsByTagName("*") ); + jQuery.cleanData( [ elem ] ); + } + + if ( elem.parentNode ) { + elem.parentNode.removeChild( elem ); + } + } + } + + return this; + }, + + empty: function() { + var elem, + i = 0; + + for ( ; (elem = this[i]) != null; i++ ) { + // Remove element nodes and prevent memory leaks + if ( elem.nodeType === 1 ) { + jQuery.cleanData( elem.getElementsByTagName("*") ); + } + + // Remove any remaining nodes + while ( elem.firstChild ) { + elem.removeChild( elem.firstChild ); + } + } + + return this; + }, + + clone: function( dataAndEvents, deepDataAndEvents ) { + dataAndEvents = dataAndEvents == null ? false : dataAndEvents; + deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; + + return this.map( function () { + return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); + }); + }, + + html: function( value ) { + return jQuery.access( this, function( value ) { + var elem = this[0] || {}, + i = 0, + l = this.length; + + if ( value === undefined ) { + return elem.nodeType === 1 ? + elem.innerHTML.replace( rinlinejQuery, "" ) : + undefined; + } + + // See if we can take a shortcut and just use innerHTML + if ( typeof value === "string" && !rnoInnerhtml.test( value ) && + ( jQuery.support.htmlSerialize || !rnoshimcache.test( value ) ) && + ( jQuery.support.leadingWhitespace || !rleadingWhitespace.test( value ) ) && + !wrapMap[ ( rtagName.exec( value ) || ["", ""] )[1].toLowerCase() ] ) { + + value = value.replace( rxhtmlTag, "<$1>" ); + + try { + for (; i < l; i++ ) { + // Remove element nodes and prevent memory leaks + elem = this[i] || {}; + if ( elem.nodeType === 1 ) { + jQuery.cleanData( elem.getElementsByTagName( "*" ) ); + elem.innerHTML = value; + } + } + + elem = 0; + + // If using innerHTML throws an exception, use the fallback method + } catch(e) {} + } + + if ( elem ) { + this.empty().append( value ); + } + }, null, value, arguments.length ); + }, + + replaceWith: function( value ) { + if ( !isDisconnected( this[0] ) ) { + // Make sure that the elements are removed from the DOM before they are inserted + // this can help fix replacing a parent with child elements + if ( jQuery.isFunction( value ) ) { + return this.each(function(i) { + var self = jQuery(this), old = self.html(); + self.replaceWith( value.call( this, i, old ) ); + }); + } + + if ( typeof value !== "string" ) { + value = jQuery( value ).detach(); + } + + return this.each(function() { + var next = this.nextSibling, + parent = this.parentNode; + + jQuery( this ).remove(); + + if ( next ) { + jQuery(next).before( value ); + } else { + jQuery(parent).append( value ); + } + }); + } + + return this.length ? + this.pushStack( jQuery(jQuery.isFunction(value) ? value() : value), "replaceWith", value ) : + this; + }, + + detach: function( selector ) { + return this.remove( selector, true ); + }, + + domManip: function( args, table, callback ) { + + // Flatten any nested arrays + args = [].concat.apply( [], args ); + + var results, first, fragment, iNoClone, + i = 0, + value = args[0], + scripts = [], + l = this.length; + + // We can't cloneNode fragments that contain checked, in WebKit + if ( !jQuery.support.checkClone && l > 1 && typeof value === "string" && rchecked.test( value ) ) { + return this.each(function() { + jQuery(this).domManip( args, table, callback ); + }); + } + + if ( jQuery.isFunction(value) ) { + return this.each(function(i) { + var self = jQuery(this); + args[0] = value.call( this, i, table ? self.html() : undefined ); + self.domManip( args, table, callback ); + }); + } + + if ( this[0] ) { + results = jQuery.buildFragment( args, this, scripts ); + fragment = results.fragment; + first = fragment.firstChild; + + if ( fragment.childNodes.length === 1 ) { + fragment = first; + } + + if ( first ) { + table = table && jQuery.nodeName( first, "tr" ); + + // Use the original fragment for the last item instead of the first because it can end up + // being emptied incorrectly in certain situations (#8070). + // Fragments from the fragment cache must always be cloned and never used in place. + for ( iNoClone = results.cacheable || l - 1; i < l; i++ ) { + callback.call( + table && jQuery.nodeName( this[i], "table" ) ? + findOrAppend( this[i], "tbody" ) : + this[i], + i === iNoClone ? + fragment : + jQuery.clone( fragment, true, true ) + ); + } + } + + // Fix #11809: Avoid leaking memory + fragment = first = null; + + if ( scripts.length ) { + jQuery.each( scripts, function( i, elem ) { + if ( elem.src ) { + if ( jQuery.ajax ) { + jQuery.ajax({ + url: elem.src, + type: "GET", + dataType: "script", + async: false, + global: false, + "throws": true + }); + } else { + jQuery.error("no ajax"); + } + } else { + jQuery.globalEval( ( elem.text || elem.textContent || elem.innerHTML || "" ).replace( rcleanScript, "" ) ); + } + + if ( elem.parentNode ) { + elem.parentNode.removeChild( elem ); + } + }); + } + } + + return this; + } +}); + +function findOrAppend( elem, tag ) { + return elem.getElementsByTagName( tag )[0] || elem.appendChild( elem.ownerDocument.createElement( tag ) ); +} + +function cloneCopyEvent( src, dest ) { + + if ( dest.nodeType !== 1 || !jQuery.hasData( src ) ) { + return; + } + + var type, i, l, + oldData = jQuery._data( src ), + curData = jQuery._data( dest, oldData ), + events = oldData.events; + + if ( events ) { + delete curData.handle; + curData.events = {}; + + for ( type in events ) { + for ( i = 0, l = events[ type ].length; i < l; i++ ) { + jQuery.event.add( dest, type, events[ type ][ i ] ); + } + } + } + + // make the cloned public data object a copy from the original + if ( curData.data ) { + curData.data = jQuery.extend( {}, curData.data ); + } +} + +function cloneFixAttributes( src, dest ) { + var nodeName; + + // We do not need to do anything for non-Elements + if ( dest.nodeType !== 1 ) { + return; + } + + // clearAttributes removes the attributes, which we don't want, + // but also removes the attachEvent events, which we *do* want + if ( dest.clearAttributes ) { + dest.clearAttributes(); + } + + // mergeAttributes, in contrast, only merges back on the + // original attributes, not the events + if ( dest.mergeAttributes ) { + dest.mergeAttributes( src ); + } + + nodeName = dest.nodeName.toLowerCase(); + + if ( nodeName === "object" ) { + // IE6-10 improperly clones children of object elements using classid. + // IE10 throws NoModificationAllowedError if parent is null, #12132. + if ( dest.parentNode ) { + dest.outerHTML = src.outerHTML; + } + + // This path appears unavoidable for IE9. When cloning an object + // element in IE9, the outerHTML strategy above is not sufficient. + // If the src has innerHTML and the destination does not, + // copy the src.innerHTML into the dest.innerHTML. #10324 + if ( jQuery.support.html5Clone && (src.innerHTML && !jQuery.trim(dest.innerHTML)) ) { + dest.innerHTML = src.innerHTML; + } + + } else if ( nodeName === "input" && rcheckableType.test( src.type ) ) { + // IE6-8 fails to persist the checked state of a cloned checkbox + // or radio button. Worse, IE6-7 fail to give the cloned element + // a checked appearance if the defaultChecked value isn't also set + + dest.defaultChecked = dest.checked = src.checked; + + // IE6-7 get confused and end up setting the value of a cloned + // checkbox/radio button to an empty string instead of "on" + if ( dest.value !== src.value ) { + dest.value = src.value; + } + + // IE6-8 fails to return the selected option to the default selected + // state when cloning options + } else if ( nodeName === "option" ) { + dest.selected = src.defaultSelected; + + // IE6-8 fails to set the defaultValue to the correct value when + // cloning other types of input fields + } else if ( nodeName === "input" || nodeName === "textarea" ) { + dest.defaultValue = src.defaultValue; + + // IE blanks contents when cloning scripts + } else if ( nodeName === "script" && dest.text !== src.text ) { + dest.text = src.text; + } + + // Event data gets referenced instead of copied if the expando + // gets copied too + dest.removeAttribute( jQuery.expando ); +} + +jQuery.buildFragment = function( args, context, scripts ) { + var fragment, cacheable, cachehit, + first = args[ 0 ]; + + // Set context from what may come in as undefined or a jQuery collection or a node + // Updated to fix #12266 where accessing context[0] could throw an exception in IE9/10 & + // also doubles as fix for #8950 where plain objects caused createDocumentFragment exception + context = context || document; + context = !context.nodeType && context[0] || context; + context = context.ownerDocument || context; + + // Only cache "small" (1/2 KB) HTML strings that are associated with the main document + // Cloning options loses the selected state, so don't cache them + // IE 6 doesn't like it when you put or elements in a fragment + // Also, WebKit does not clone 'checked' attributes on cloneNode, so don't cache + // Lastly, IE6,7,8 will not correctly reuse cached fragments that were created from unknown elems #10501 + if ( args.length === 1 && typeof first === "string" && first.length < 512 && context === document && + first.charAt(0) === "<" && !rnocache.test( first ) && + (jQuery.support.checkClone || !rchecked.test( first )) && + (jQuery.support.html5Clone || !rnoshimcache.test( first )) ) { + + // Mark cacheable and look for a hit + cacheable = true; + fragment = jQuery.fragments[ first ]; + cachehit = fragment !== undefined; + } + + if ( !fragment ) { + fragment = context.createDocumentFragment(); + jQuery.clean( args, context, fragment, scripts ); + + // Update the cache, but only store false + // unless this is a second parsing of the same content + if ( cacheable ) { + jQuery.fragments[ first ] = cachehit && fragment; + } + } + + return { fragment: fragment, cacheable: cacheable }; +}; + +jQuery.fragments = {}; + +jQuery.each({ + appendTo: "append", + prependTo: "prepend", + insertBefore: "before", + insertAfter: "after", + replaceAll: "replaceWith" +}, function( name, original ) { + jQuery.fn[ name ] = function( selector ) { + var elems, + i = 0, + ret = [], + insert = jQuery( selector ), + l = insert.length, + parent = this.length === 1 && this[0].parentNode; + + if ( (parent == null || parent && parent.nodeType === 11 && parent.childNodes.length === 1) && l === 1 ) { + insert[ original ]( this[0] ); + return this; + } else { + for ( ; i < l; i++ ) { + elems = ( i > 0 ? this.clone(true) : this ).get(); + jQuery( insert[i] )[ original ]( elems ); + ret = ret.concat( elems ); + } + + return this.pushStack( ret, name, insert.selector ); + } + }; +}); + +function getAll( elem ) { + if ( typeof elem.getElementsByTagName !== "undefined" ) { + return elem.getElementsByTagName( "*" ); + + } else if ( typeof elem.querySelectorAll !== "undefined" ) { + return elem.querySelectorAll( "*" ); + + } else { + return []; + } +} + +// Used in clean, fixes the defaultChecked property +function fixDefaultChecked( elem ) { + if ( rcheckableType.test( elem.type ) ) { + elem.defaultChecked = elem.checked; + } +} + +jQuery.extend({ + clone: function( elem, dataAndEvents, deepDataAndEvents ) { + var srcElements, + destElements, + i, + clone; + + if ( jQuery.support.html5Clone || jQuery.isXMLDoc(elem) || !rnoshimcache.test( "<" + elem.nodeName + ">" ) ) { + clone = elem.cloneNode( true ); + + // IE<=8 does not properly clone detached, unknown element nodes + } else { + fragmentDiv.innerHTML = elem.outerHTML; + fragmentDiv.removeChild( clone = fragmentDiv.firstChild ); + } + + if ( (!jQuery.support.noCloneEvent || !jQuery.support.noCloneChecked) && + (elem.nodeType === 1 || elem.nodeType === 11) && !jQuery.isXMLDoc(elem) ) { + // IE copies events bound via attachEvent when using cloneNode. + // Calling detachEvent on the clone will also remove the events + // from the original. In order to get around this, we use some + // proprietary methods to clear the events. Thanks to MooTools + // guys for this hotness. + + cloneFixAttributes( elem, clone ); + + // Using Sizzle here is crazy slow, so we use getElementsByTagName instead + srcElements = getAll( elem ); + destElements = getAll( clone ); + + // Weird iteration because IE will replace the length property + // with an element if you are cloning the body and one of the + // elements on the page has a name or id of "length" + for ( i = 0; srcElements[i]; ++i ) { + // Ensure that the destination node is not null; Fixes #9587 + if ( destElements[i] ) { + cloneFixAttributes( srcElements[i], destElements[i] ); + } + } + } + + // Copy the events from the original to the clone + if ( dataAndEvents ) { + cloneCopyEvent( elem, clone ); + + if ( deepDataAndEvents ) { + srcElements = getAll( elem ); + destElements = getAll( clone ); + + for ( i = 0; srcElements[i]; ++i ) { + cloneCopyEvent( srcElements[i], destElements[i] ); + } + } + } + + srcElements = destElements = null; + + // Return the cloned set + return clone; + }, + + clean: function( elems, context, fragment, scripts ) { + var i, j, elem, tag, wrap, depth, div, hasBody, tbody, len, handleScript, jsTags, + safe = context === document && safeFragment, + ret = []; + + // Ensure that context is a document + if ( !context || typeof context.createDocumentFragment === "undefined" ) { + context = document; + } + + // Use the already-created safe fragment if context permits + for ( i = 0; (elem = elems[i]) != null; i++ ) { + if ( typeof elem === "number" ) { + elem += ""; + } + + if ( !elem ) { + continue; + } + + // Convert html string into DOM nodes + if ( typeof elem === "string" ) { + if ( !rhtml.test( elem ) ) { + elem = context.createTextNode( elem ); + } else { + // Ensure a safe container in which to render the html + safe = safe || createSafeFragment( context ); + div = context.createElement("div"); + safe.appendChild( div ); + + // Fix "XHTML"-style tags in all browsers + elem = elem.replace(rxhtmlTag, "<$1>"); + + // Go to html and back, then peel off extra wrappers + tag = ( rtagName.exec( elem ) || ["", ""] )[1].toLowerCase(); + wrap = wrapMap[ tag ] || wrapMap._default; + depth = wrap[0]; + div.innerHTML = wrap[1] + elem + wrap[2]; + + // Move to the right depth + while ( depth-- ) { + div = div.lastChild; + } + + // Remove IE's autoinserted from table fragments + if ( !jQuery.support.tbody ) { + + // String was a , *may* have spurious + hasBody = rtbody.test(elem); + tbody = tag === "table" && !hasBody ? + div.firstChild && div.firstChild.childNodes : + + // String was a bare or + wrap[1] === "
" && !hasBody ? + div.childNodes : + []; + + for ( j = tbody.length - 1; j >= 0 ; --j ) { + if ( jQuery.nodeName( tbody[ j ], "tbody" ) && !tbody[ j ].childNodes.length ) { + tbody[ j ].parentNode.removeChild( tbody[ j ] ); + } + } + } + + // IE completely kills leading whitespace when innerHTML is used + if ( !jQuery.support.leadingWhitespace && rleadingWhitespace.test( elem ) ) { + div.insertBefore( context.createTextNode( rleadingWhitespace.exec(elem)[0] ), div.firstChild ); + } + + elem = div.childNodes; + + // Take out of fragment container (we need a fresh div each time) + div.parentNode.removeChild( div ); + } + } + + if ( elem.nodeType ) { + ret.push( elem ); + } else { + jQuery.merge( ret, elem ); + } + } + + // Fix #11356: Clear elements from safeFragment + if ( div ) { + elem = div = safe = null; + } + + // Reset defaultChecked for any radios and checkboxes + // about to be appended to the DOM in IE 6/7 (#8060) + if ( !jQuery.support.appendChecked ) { + for ( i = 0; (elem = ret[i]) != null; i++ ) { + if ( jQuery.nodeName( elem, "input" ) ) { + fixDefaultChecked( elem ); + } else if ( typeof elem.getElementsByTagName !== "undefined" ) { + jQuery.grep( elem.getElementsByTagName("input"), fixDefaultChecked ); + } + } + } + + // Append elements to a provided document fragment + if ( fragment ) { + // Special handling of each script element + handleScript = function( elem ) { + // Check if we consider it executable + if ( !elem.type || rscriptType.test( elem.type ) ) { + // Detach the script and store it in the scripts array (if provided) or the fragment + // Return truthy to indicate that it has been handled + return scripts ? + scripts.push( elem.parentNode ? elem.parentNode.removeChild( elem ) : elem ) : + fragment.appendChild( elem ); + } + }; + + for ( i = 0; (elem = ret[i]) != null; i++ ) { + // Check if we're done after handling an executable script + if ( !( jQuery.nodeName( elem, "script" ) && handleScript( elem ) ) ) { + // Append to fragment and handle embedded scripts + fragment.appendChild( elem ); + if ( typeof elem.getElementsByTagName !== "undefined" ) { + // handleScript alters the DOM, so use jQuery.merge to ensure snapshot iteration + jsTags = jQuery.grep( jQuery.merge( [], elem.getElementsByTagName("script") ), handleScript ); + + // Splice the scripts into ret after their former ancestor and advance our index beyond them + ret.splice.apply( ret, [i + 1, 0].concat( jsTags ) ); + i += jsTags.length; + } + } + } + } + + return ret; + }, + + cleanData: function( elems, /* internal */ acceptData ) { + var data, id, elem, type, + i = 0, + internalKey = jQuery.expando, + cache = jQuery.cache, + deleteExpando = jQuery.support.deleteExpando, + special = jQuery.event.special; + + for ( ; (elem = elems[i]) != null; i++ ) { + + if ( acceptData || jQuery.acceptData( elem ) ) { + + id = elem[ internalKey ]; + data = id && cache[ id ]; + + if ( data ) { + if ( data.events ) { + for ( type in data.events ) { + if ( special[ type ] ) { + jQuery.event.remove( elem, type ); + + // This is a shortcut to avoid jQuery.event.remove's overhead + } else { + jQuery.removeEvent( elem, type, data.handle ); + } + } + } + + // Remove cache only if it was not already removed by jQuery.event.remove + if ( cache[ id ] ) { + + delete cache[ id ]; + + // IE does not allow us to delete expando properties from nodes, + // nor does it have a removeAttribute function on Document nodes; + // we must handle all of these cases + if ( deleteExpando ) { + delete elem[ internalKey ]; + + } else if ( elem.removeAttribute ) { + elem.removeAttribute( internalKey ); + + } else { + elem[ internalKey ] = null; + } + + jQuery.deletedIds.push( id ); + } + } + } + } + } +}); +// Limit scope pollution from any deprecated API +(function() { + +var matched, browser; + +// Use of jQuery.browser is frowned upon. +// More details: http://api.jquery.com/jQuery.browser +// jQuery.uaMatch maintained for back-compat +jQuery.uaMatch = function( ua ) { + ua = ua.toLowerCase(); + + var match = /(chrome)[ \/]([\w.]+)/.exec( ua ) || + /(webkit)[ \/]([\w.]+)/.exec( ua ) || + /(opera)(?:.*version|)[ \/]([\w.]+)/.exec( ua ) || + /(msie) ([\w.]+)/.exec( ua ) || + ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec( ua ) || + []; + + return { + browser: match[ 1 ] || "", + version: match[ 2 ] || "0" + }; +}; + +matched = jQuery.uaMatch( navigator.userAgent ); +browser = {}; + +if ( matched.browser ) { + browser[ matched.browser ] = true; + browser.version = matched.version; +} + +// Chrome is Webkit, but Webkit is also Safari. +if ( browser.chrome ) { + browser.webkit = true; +} else if ( browser.webkit ) { + browser.safari = true; +} + +jQuery.browser = browser; + +jQuery.sub = function() { + function jQuerySub( selector, context ) { + return new jQuerySub.fn.init( selector, context ); + } + jQuery.extend( true, jQuerySub, this ); + jQuerySub.superclass = this; + jQuerySub.fn = jQuerySub.prototype = this(); + jQuerySub.fn.constructor = jQuerySub; + jQuerySub.sub = this.sub; + jQuerySub.fn.init = function init( selector, context ) { + if ( context && context instanceof jQuery && !(context instanceof jQuerySub) ) { + context = jQuerySub( context ); + } + + return jQuery.fn.init.call( this, selector, context, rootjQuerySub ); + }; + jQuerySub.fn.init.prototype = jQuerySub.fn; + var rootjQuerySub = jQuerySub(document); + return jQuerySub; +}; + +})(); +var curCSS, iframe, iframeDoc, + ralpha = /alpha\([^)]*\)/i, + ropacity = /opacity=([^)]*)/, + rposition = /^(top|right|bottom|left)$/, + // swappable if display is none or starts with table except "table", "table-cell", or "table-caption" + // see here for display values: https://developer.mozilla.org/en-US/docs/CSS/display + rdisplayswap = /^(none|table(?!-c[ea]).+)/, + rmargin = /^margin/, + rnumsplit = new RegExp( "^(" + core_pnum + ")(.*)$", "i" ), + rnumnonpx = new RegExp( "^(" + core_pnum + ")(?!px)[a-z%]+$", "i" ), + rrelNum = new RegExp( "^([-+])=(" + core_pnum + ")", "i" ), + elemdisplay = { BODY: "block" }, + + cssShow = { position: "absolute", visibility: "hidden", display: "block" }, + cssNormalTransform = { + letterSpacing: 0, + fontWeight: 400 + }, + + cssExpand = [ "Top", "Right", "Bottom", "Left" ], + cssPrefixes = [ "Webkit", "O", "Moz", "ms" ], + + eventsToggle = jQuery.fn.toggle; + +// return a css property mapped to a potentially vendor prefixed property +function vendorPropName( style, name ) { + + // shortcut for names that are not vendor prefixed + if ( name in style ) { + return name; + } + + // check for vendor prefixed names + var capName = name.charAt(0).toUpperCase() + name.slice(1), + origName = name, + i = cssPrefixes.length; + + while ( i-- ) { + name = cssPrefixes[ i ] + capName; + if ( name in style ) { + return name; + } + } + + return origName; +} + +function isHidden( elem, el ) { + elem = el || elem; + return jQuery.css( elem, "display" ) === "none" || !jQuery.contains( elem.ownerDocument, elem ); +} + +function showHide( elements, show ) { + var elem, display, + values = [], + index = 0, + length = elements.length; + + for ( ; index < length; index++ ) { + elem = elements[ index ]; + if ( !elem.style ) { + continue; + } + values[ index ] = jQuery._data( elem, "olddisplay" ); + if ( show ) { + // Reset the inline display of this element to learn if it is + // being hidden by cascaded rules or not + if ( !values[ index ] && elem.style.display === "none" ) { + elem.style.display = ""; + } + + // Set elements which have been overridden with display: none + // in a stylesheet to whatever the default browser style is + // for such an element + if ( elem.style.display === "" && isHidden( elem ) ) { + values[ index ] = jQuery._data( elem, "olddisplay", css_defaultDisplay(elem.nodeName) ); + } + } else { + display = curCSS( elem, "display" ); + + if ( !values[ index ] && display !== "none" ) { + jQuery._data( elem, "olddisplay", display ); + } + } + } + + // Set the display of most of the elements in a second loop + // to avoid the constant reflow + for ( index = 0; index < length; index++ ) { + elem = elements[ index ]; + if ( !elem.style ) { + continue; + } + if ( !show || elem.style.display === "none" || elem.style.display === "" ) { + elem.style.display = show ? values[ index ] || "" : "none"; + } + } + + return elements; +} + +jQuery.fn.extend({ + css: function( name, value ) { + return jQuery.access( this, function( elem, name, value ) { + return value !== undefined ? + jQuery.style( elem, name, value ) : + jQuery.css( elem, name ); + }, name, value, arguments.length > 1 ); + }, + show: function() { + return showHide( this, true ); + }, + hide: function() { + return showHide( this ); + }, + toggle: function( state, fn2 ) { + var bool = typeof state === "boolean"; + + if ( jQuery.isFunction( state ) && jQuery.isFunction( fn2 ) ) { + return eventsToggle.apply( this, arguments ); + } + + return this.each(function() { + if ( bool ? state : isHidden( this ) ) { + jQuery( this ).show(); + } else { + jQuery( this ).hide(); + } + }); + } +}); + +jQuery.extend({ + // Add in style property hooks for overriding the default + // behavior of getting and setting a style property + cssHooks: { + opacity: { + get: function( elem, computed ) { + if ( computed ) { + // We should always get a number back from opacity + var ret = curCSS( elem, "opacity" ); + return ret === "" ? "1" : ret; + + } + } + } + }, + + // Exclude the following css properties to add px + cssNumber: { + "fillOpacity": true, + "fontWeight": true, + "lineHeight": true, + "opacity": true, + "orphans": true, + "widows": true, + "zIndex": true, + "zoom": true + }, + + // Add in properties whose names you wish to fix before + // setting or getting the value + cssProps: { + // normalize float css property + "float": jQuery.support.cssFloat ? "cssFloat" : "styleFloat" + }, + + // Get and set the style property on a DOM Node + style: function( elem, name, value, extra ) { + // Don't set styles on text and comment nodes + if ( !elem || elem.nodeType === 3 || elem.nodeType === 8 || !elem.style ) { + return; + } + + // Make sure that we're working with the right name + var ret, type, hooks, + origName = jQuery.camelCase( name ), + style = elem.style; + + name = jQuery.cssProps[ origName ] || ( jQuery.cssProps[ origName ] = vendorPropName( style, origName ) ); + + // gets hook for the prefixed version + // followed by the unprefixed version + hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; + + // Check if we're setting a value + if ( value !== undefined ) { + type = typeof value; + + // convert relative number strings (+= or -=) to relative numbers. #7345 + if ( type === "string" && (ret = rrelNum.exec( value )) ) { + value = ( ret[1] + 1 ) * ret[2] + parseFloat( jQuery.css( elem, name ) ); + // Fixes bug #9237 + type = "number"; + } + + // Make sure that NaN and null values aren't set. See: #7116 + if ( value == null || type === "number" && isNaN( value ) ) { + return; + } + + // If a number was passed in, add 'px' to the (except for certain CSS properties) + if ( type === "number" && !jQuery.cssNumber[ origName ] ) { + value += "px"; + } + + // If a hook was provided, use that value, otherwise just set the specified value + if ( !hooks || !("set" in hooks) || (value = hooks.set( elem, value, extra )) !== undefined ) { + // Wrapped to prevent IE from throwing errors when 'invalid' values are provided + // Fixes bug #5509 + try { + style[ name ] = value; + } catch(e) {} + } + + } else { + // If a hook was provided get the non-computed value from there + if ( hooks && "get" in hooks && (ret = hooks.get( elem, false, extra )) !== undefined ) { + return ret; + } + + // Otherwise just get the value from the style object + return style[ name ]; + } + }, + + css: function( elem, name, numeric, extra ) { + var val, num, hooks, + origName = jQuery.camelCase( name ); + + // Make sure that we're working with the right name + name = jQuery.cssProps[ origName ] || ( jQuery.cssProps[ origName ] = vendorPropName( elem.style, origName ) ); + + // gets hook for the prefixed version + // followed by the unprefixed version + hooks = jQuery.cssHooks[ name ] || jQuery.cssHooks[ origName ]; + + // If a hook was provided get the computed value from there + if ( hooks && "get" in hooks ) { + val = hooks.get( elem, true, extra ); + } + + // Otherwise, if a way to get the computed value exists, use that + if ( val === undefined ) { + val = curCSS( elem, name ); + } + + //convert "normal" to computed value + if ( val === "normal" && name in cssNormalTransform ) { + val = cssNormalTransform[ name ]; + } + + // Return, converting to number if forced or a qualifier was provided and val looks numeric + if ( numeric || extra !== undefined ) { + num = parseFloat( val ); + return numeric || jQuery.isNumeric( num ) ? num || 0 : val; + } + return val; + }, + + // A method for quickly swapping in/out CSS properties to get correct calculations + swap: function( elem, options, callback ) { + var ret, name, + old = {}; + + // Remember the old values, and insert the new ones + for ( name in options ) { + old[ name ] = elem.style[ name ]; + elem.style[ name ] = options[ name ]; + } + + ret = callback.call( elem ); + + // Revert the old values + for ( name in options ) { + elem.style[ name ] = old[ name ]; + } + + return ret; + } +}); + +// NOTE: To any future maintainer, we've window.getComputedStyle +// because jsdom on node.js will break without it. +if ( window.getComputedStyle ) { + curCSS = function( elem, name ) { + var ret, width, minWidth, maxWidth, + computed = window.getComputedStyle( elem, null ), + style = elem.style; + + if ( computed ) { + + // getPropertyValue is only needed for .css('filter') in IE9, see #12537 + ret = computed.getPropertyValue( name ) || computed[ name ]; + + if ( ret === "" && !jQuery.contains( elem.ownerDocument, elem ) ) { + ret = jQuery.style( elem, name ); + } + + // A tribute to the "awesome hack by Dean Edwards" + // Chrome < 17 and Safari 5.0 uses "computed value" instead of "used value" for margin-right + // Safari 5.1.7 (at least) returns percentage for a larger set of values, but width seems to be reliably pixels + // this is against the CSSOM draft spec: http://dev.w3.org/csswg/cssom/#resolved-values + if ( rnumnonpx.test( ret ) && rmargin.test( name ) ) { + width = style.width; + minWidth = style.minWidth; + maxWidth = style.maxWidth; + + style.minWidth = style.maxWidth = style.width = ret; + ret = computed.width; + + style.width = width; + style.minWidth = minWidth; + style.maxWidth = maxWidth; + } + } + + return ret; + }; +} else if ( document.documentElement.currentStyle ) { + curCSS = function( elem, name ) { + var left, rsLeft, + ret = elem.currentStyle && elem.currentStyle[ name ], + style = elem.style; + + // Avoid setting ret to empty string here + // so we don't default to auto + if ( ret == null && style && style[ name ] ) { + ret = style[ name ]; + } + + // From the awesome hack by Dean Edwards + // http://erik.eae.net/archives/2007/07/27/18.54.15/#comment-102291 + + // If we're not dealing with a regular pixel number + // but a number that has a weird ending, we need to convert it to pixels + // but not position css attributes, as those are proportional to the parent element instead + // and we can't measure the parent instead because it might trigger a "stacking dolls" problem + if ( rnumnonpx.test( ret ) && !rposition.test( name ) ) { + + // Remember the original values + left = style.left; + rsLeft = elem.runtimeStyle && elem.runtimeStyle.left; + + // Put in the new values to get a computed value out + if ( rsLeft ) { + elem.runtimeStyle.left = elem.currentStyle.left; + } + style.left = name === "fontSize" ? "1em" : ret; + ret = style.pixelLeft + "px"; + + // Revert the changed values + style.left = left; + if ( rsLeft ) { + elem.runtimeStyle.left = rsLeft; + } + } + + return ret === "" ? "auto" : ret; + }; +} + +function setPositiveNumber( elem, value, subtract ) { + var matches = rnumsplit.exec( value ); + return matches ? + Math.max( 0, matches[ 1 ] - ( subtract || 0 ) ) + ( matches[ 2 ] || "px" ) : + value; +} + +function augmentWidthOrHeight( elem, name, extra, isBorderBox ) { + var i = extra === ( isBorderBox ? "border" : "content" ) ? + // If we already have the right measurement, avoid augmentation + 4 : + // Otherwise initialize for horizontal or vertical properties + name === "width" ? 1 : 0, + + val = 0; + + for ( ; i < 4; i += 2 ) { + // both box models exclude margin, so add it if we want it + if ( extra === "margin" ) { + // we use jQuery.css instead of curCSS here + // because of the reliableMarginRight CSS hook! + val += jQuery.css( elem, extra + cssExpand[ i ], true ); + } + + // From this point on we use curCSS for maximum performance (relevant in animations) + if ( isBorderBox ) { + // border-box includes padding, so remove it if we want content + if ( extra === "content" ) { + val -= parseFloat( curCSS( elem, "padding" + cssExpand[ i ] ) ) || 0; + } + + // at this point, extra isn't border nor margin, so remove border + if ( extra !== "margin" ) { + val -= parseFloat( curCSS( elem, "border" + cssExpand[ i ] + "Width" ) ) || 0; + } + } else { + // at this point, extra isn't content, so add padding + val += parseFloat( curCSS( elem, "padding" + cssExpand[ i ] ) ) || 0; + + // at this point, extra isn't content nor padding, so add border + if ( extra !== "padding" ) { + val += parseFloat( curCSS( elem, "border" + cssExpand[ i ] + "Width" ) ) || 0; + } + } + } + + return val; +} + +function getWidthOrHeight( elem, name, extra ) { + + // Start with offset property, which is equivalent to the border-box value + var val = name === "width" ? elem.offsetWidth : elem.offsetHeight, + valueIsBorderBox = true, + isBorderBox = jQuery.support.boxSizing && jQuery.css( elem, "boxSizing" ) === "border-box"; + + // some non-html elements return undefined for offsetWidth, so check for null/undefined + // svg - https://bugzilla.mozilla.org/show_bug.cgi?id=649285 + // MathML - https://bugzilla.mozilla.org/show_bug.cgi?id=491668 + if ( val <= 0 || val == null ) { + // Fall back to computed then uncomputed css if necessary + val = curCSS( elem, name ); + if ( val < 0 || val == null ) { + val = elem.style[ name ]; + } + + // Computed unit is not pixels. Stop here and return. + if ( rnumnonpx.test(val) ) { + return val; + } + + // we need the check for style in case a browser which returns unreliable values + // for getComputedStyle silently falls back to the reliable elem.style + valueIsBorderBox = isBorderBox && ( jQuery.support.boxSizingReliable || val === elem.style[ name ] ); + + // Normalize "", auto, and prepare for extra + val = parseFloat( val ) || 0; + } + + // use the active box-sizing model to add/subtract irrelevant styles + return ( val + + augmentWidthOrHeight( + elem, + name, + extra || ( isBorderBox ? "border" : "content" ), + valueIsBorderBox + ) + ) + "px"; +} + + +// Try to determine the default display value of an element +function css_defaultDisplay( nodeName ) { + if ( elemdisplay[ nodeName ] ) { + return elemdisplay[ nodeName ]; + } + + var elem = jQuery( "<" + nodeName + ">" ).appendTo( document.body ), + display = elem.css("display"); + elem.remove(); + + // If the simple way fails, + // get element's real default display by attaching it to a temp iframe + if ( display === "none" || display === "" ) { + // Use the already-created iframe if possible + iframe = document.body.appendChild( + iframe || jQuery.extend( document.createElement("iframe"), { + frameBorder: 0, + width: 0, + height: 0 + }) + ); + + // Create a cacheable copy of the iframe document on first call. + // IE and Opera will allow us to reuse the iframeDoc without re-writing the fake HTML + // document to it; WebKit & Firefox won't allow reusing the iframe document. + if ( !iframeDoc || !iframe.createElement ) { + iframeDoc = ( iframe.contentWindow || iframe.contentDocument ).document; + iframeDoc.write(""); + iframeDoc.close(); + } + + elem = iframeDoc.body.appendChild( iframeDoc.createElement(nodeName) ); + + display = curCSS( elem, "display" ); + document.body.removeChild( iframe ); + } + + // Store the correct default display + elemdisplay[ nodeName ] = display; + + return display; +} + +jQuery.each([ "height", "width" ], function( i, name ) { + jQuery.cssHooks[ name ] = { + get: function( elem, computed, extra ) { + if ( computed ) { + // certain elements can have dimension info if we invisibly show them + // however, it must have a current display style that would benefit from this + if ( elem.offsetWidth === 0 && rdisplayswap.test( curCSS( elem, "display" ) ) ) { + return jQuery.swap( elem, cssShow, function() { + return getWidthOrHeight( elem, name, extra ); + }); + } else { + return getWidthOrHeight( elem, name, extra ); + } + } + }, + + set: function( elem, value, extra ) { + return setPositiveNumber( elem, value, extra ? + augmentWidthOrHeight( + elem, + name, + extra, + jQuery.support.boxSizing && jQuery.css( elem, "boxSizing" ) === "border-box" + ) : 0 + ); + } + }; +}); + +if ( !jQuery.support.opacity ) { + jQuery.cssHooks.opacity = { + get: function( elem, computed ) { + // IE uses filters for opacity + return ropacity.test( (computed && elem.currentStyle ? elem.currentStyle.filter : elem.style.filter) || "" ) ? + ( 0.01 * parseFloat( RegExp.$1 ) ) + "" : + computed ? "1" : ""; + }, + + set: function( elem, value ) { + var style = elem.style, + currentStyle = elem.currentStyle, + opacity = jQuery.isNumeric( value ) ? "alpha(opacity=" + value * 100 + ")" : "", + filter = currentStyle && currentStyle.filter || style.filter || ""; + + // IE has trouble with opacity if it does not have layout + // Force it by setting the zoom level + style.zoom = 1; + + // if setting opacity to 1, and no other filters exist - attempt to remove filter attribute #6652 + if ( value >= 1 && jQuery.trim( filter.replace( ralpha, "" ) ) === "" && + style.removeAttribute ) { + + // Setting style.filter to null, "" & " " still leave "filter:" in the cssText + // if "filter:" is present at all, clearType is disabled, we want to avoid this + // style.removeAttribute is IE Only, but so apparently is this code path... + style.removeAttribute( "filter" ); + + // if there there is no filter style applied in a css rule, we are done + if ( currentStyle && !currentStyle.filter ) { + return; + } + } + + // otherwise, set new filter values + style.filter = ralpha.test( filter ) ? + filter.replace( ralpha, opacity ) : + filter + " " + opacity; + } + }; +} + +// These hooks cannot be added until DOM ready because the support test +// for it is not run until after DOM ready +jQuery(function() { + if ( !jQuery.support.reliableMarginRight ) { + jQuery.cssHooks.marginRight = { + get: function( elem, computed ) { + // WebKit Bug 13343 - getComputedStyle returns wrong value for margin-right + // Work around by temporarily setting element display to inline-block + return jQuery.swap( elem, { "display": "inline-block" }, function() { + if ( computed ) { + return curCSS( elem, "marginRight" ); + } + }); + } + }; + } + + // Webkit bug: https://bugs.webkit.org/show_bug.cgi?id=29084 + // getComputedStyle returns percent when specified for top/left/bottom/right + // rather than make the css module depend on the offset module, we just check for it here + if ( !jQuery.support.pixelPosition && jQuery.fn.position ) { + jQuery.each( [ "top", "left" ], function( i, prop ) { + jQuery.cssHooks[ prop ] = { + get: function( elem, computed ) { + if ( computed ) { + var ret = curCSS( elem, prop ); + // if curCSS returns percentage, fallback to offset + return rnumnonpx.test( ret ) ? jQuery( elem ).position()[ prop ] + "px" : ret; + } + } + }; + }); + } + +}); + +if ( jQuery.expr && jQuery.expr.filters ) { + jQuery.expr.filters.hidden = function( elem ) { + return ( elem.offsetWidth === 0 && elem.offsetHeight === 0 ) || (!jQuery.support.reliableHiddenOffsets && ((elem.style && elem.style.display) || curCSS( elem, "display" )) === "none"); + }; + + jQuery.expr.filters.visible = function( elem ) { + return !jQuery.expr.filters.hidden( elem ); + }; +} + +// These hooks are used by animate to expand properties +jQuery.each({ + margin: "", + padding: "", + border: "Width" +}, function( prefix, suffix ) { + jQuery.cssHooks[ prefix + suffix ] = { + expand: function( value ) { + var i, + + // assumes a single number if not a string + parts = typeof value === "string" ? value.split(" ") : [ value ], + expanded = {}; + + for ( i = 0; i < 4; i++ ) { + expanded[ prefix + cssExpand[ i ] + suffix ] = + parts[ i ] || parts[ i - 2 ] || parts[ 0 ]; + } + + return expanded; + } + }; + + if ( !rmargin.test( prefix ) ) { + jQuery.cssHooks[ prefix + suffix ].set = setPositiveNumber; + } +}); +var r20 = /%20/g, + rbracket = /\[\]$/, + rCRLF = /\r?\n/g, + rinput = /^(?:color|date|datetime|datetime-local|email|hidden|month|number|password|range|search|tel|text|time|url|week)$/i, + rselectTextarea = /^(?:select|textarea)/i; + +jQuery.fn.extend({ + serialize: function() { + return jQuery.param( this.serializeArray() ); + }, + serializeArray: function() { + return this.map(function(){ + return this.elements ? jQuery.makeArray( this.elements ) : this; + }) + .filter(function(){ + return this.name && !this.disabled && + ( this.checked || rselectTextarea.test( this.nodeName ) || + rinput.test( this.type ) ); + }) + .map(function( i, elem ){ + var val = jQuery( this ).val(); + + return val == null ? + null : + jQuery.isArray( val ) ? + jQuery.map( val, function( val, i ){ + return { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; + }) : + { name: elem.name, value: val.replace( rCRLF, "\r\n" ) }; + }).get(); + } +}); + +//Serialize an array of form elements or a set of +//key/values into a query string +jQuery.param = function( a, traditional ) { + var prefix, + s = [], + add = function( key, value ) { + // If value is a function, invoke it and return its value + value = jQuery.isFunction( value ) ? value() : ( value == null ? "" : value ); + s[ s.length ] = encodeURIComponent( key ) + "=" + encodeURIComponent( value ); + }; + + // Set traditional to true for jQuery <= 1.3.2 behavior. + if ( traditional === undefined ) { + traditional = jQuery.ajaxSettings && jQuery.ajaxSettings.traditional; + } + + // If an array was passed in, assume that it is an array of form elements. + if ( jQuery.isArray( a ) || ( a.jquery && !jQuery.isPlainObject( a ) ) ) { + // Serialize the form elements + jQuery.each( a, function() { + add( this.name, this.value ); + }); + + } else { + // If traditional, encode the "old" way (the way 1.3.2 or older + // did it), otherwise encode params recursively. + for ( prefix in a ) { + buildParams( prefix, a[ prefix ], traditional, add ); + } + } + + // Return the resulting serialization + return s.join( "&" ).replace( r20, "+" ); +}; + +function buildParams( prefix, obj, traditional, add ) { + var name; + + if ( jQuery.isArray( obj ) ) { + // Serialize array item. + jQuery.each( obj, function( i, v ) { + if ( traditional || rbracket.test( prefix ) ) { + // Treat each array item as a scalar. + add( prefix, v ); + + } else { + // If array item is non-scalar (array or object), encode its + // numeric index to resolve deserialization ambiguity issues. + // Note that rack (as of 1.0.0) can't currently deserialize + // nested arrays properly, and attempting to do so may cause + // a server error. Possible fixes are to modify rack's + // deserialization algorithm or to provide an option or flag + // to force array serialization to be shallow. + buildParams( prefix + "[" + ( typeof v === "object" ? i : "" ) + "]", v, traditional, add ); + } + }); + + } else if ( !traditional && jQuery.type( obj ) === "object" ) { + // Serialize object item. + for ( name in obj ) { + buildParams( prefix + "[" + name + "]", obj[ name ], traditional, add ); + } + + } else { + // Serialize scalar item. + add( prefix, obj ); + } +} +var + // Document location + ajaxLocParts, + ajaxLocation, + + rhash = /#.*$/, + rheaders = /^(.*?):[ \t]*([^\r\n]*)\r?$/mg, // IE leaves an \r character at EOL + // #7653, #8125, #8152: local protocol detection + rlocalProtocol = /^(?:about|app|app\-storage|.+\-extension|file|res|widget):$/, + rnoContent = /^(?:GET|HEAD)$/, + rprotocol = /^\/\//, + rquery = /\?/, + rscript = /)<[^<]*)*<\/script>/gi, + rts = /([?&])_=[^&]*/, + rurl = /^([\w\+\.\-]+:)(?:\/\/([^\/?#:]*)(?::(\d+)|)|)/, + + // Keep a copy of the old load method + _load = jQuery.fn.load, + + /* Prefilters + * 1) They are useful to introduce custom dataTypes (see ajax/jsonp.js for an example) + * 2) These are called: + * - BEFORE asking for a transport + * - AFTER param serialization (s.data is a string if s.processData is true) + * 3) key is the dataType + * 4) the catchall symbol "*" can be used + * 5) execution will start with transport dataType and THEN continue down to "*" if needed + */ + prefilters = {}, + + /* Transports bindings + * 1) key is the dataType + * 2) the catchall symbol "*" can be used + * 3) selection will start with transport dataType and THEN go to "*" if needed + */ + transports = {}, + + // Avoid comment-prolog char sequence (#10098); must appease lint and evade compression + allTypes = ["*/"] + ["*"]; + +// #8138, IE may throw an exception when accessing +// a field from window.location if document.domain has been set +try { + ajaxLocation = location.href; +} catch( e ) { + // Use the href attribute of an A element + // since IE will modify it given document.location + ajaxLocation = document.createElement( "a" ); + ajaxLocation.href = ""; + ajaxLocation = ajaxLocation.href; +} + +// Segment location into parts +ajaxLocParts = rurl.exec( ajaxLocation.toLowerCase() ) || []; + +// Base "constructor" for jQuery.ajaxPrefilter and jQuery.ajaxTransport +function addToPrefiltersOrTransports( structure ) { + + // dataTypeExpression is optional and defaults to "*" + return function( dataTypeExpression, func ) { + + if ( typeof dataTypeExpression !== "string" ) { + func = dataTypeExpression; + dataTypeExpression = "*"; + } + + var dataType, list, placeBefore, + dataTypes = dataTypeExpression.toLowerCase().split( core_rspace ), + i = 0, + length = dataTypes.length; + + if ( jQuery.isFunction( func ) ) { + // For each dataType in the dataTypeExpression + for ( ; i < length; i++ ) { + dataType = dataTypes[ i ]; + // We control if we're asked to add before + // any existing element + placeBefore = /^\+/.test( dataType ); + if ( placeBefore ) { + dataType = dataType.substr( 1 ) || "*"; + } + list = structure[ dataType ] = structure[ dataType ] || []; + // then we add to the structure accordingly + list[ placeBefore ? "unshift" : "push" ]( func ); + } + } + }; +} + +// Base inspection function for prefilters and transports +function inspectPrefiltersOrTransports( structure, options, originalOptions, jqXHR, + dataType /* internal */, inspected /* internal */ ) { + + dataType = dataType || options.dataTypes[ 0 ]; + inspected = inspected || {}; + + inspected[ dataType ] = true; + + var selection, + list = structure[ dataType ], + i = 0, + length = list ? list.length : 0, + executeOnly = ( structure === prefilters ); + + for ( ; i < length && ( executeOnly || !selection ); i++ ) { + selection = list[ i ]( options, originalOptions, jqXHR ); + // If we got redirected to another dataType + // we try there if executing only and not done already + if ( typeof selection === "string" ) { + if ( !executeOnly || inspected[ selection ] ) { + selection = undefined; + } else { + options.dataTypes.unshift( selection ); + selection = inspectPrefiltersOrTransports( + structure, options, originalOptions, jqXHR, selection, inspected ); + } + } + } + // If we're only executing or nothing was selected + // we try the catchall dataType if not done already + if ( ( executeOnly || !selection ) && !inspected[ "*" ] ) { + selection = inspectPrefiltersOrTransports( + structure, options, originalOptions, jqXHR, "*", inspected ); + } + // unnecessary when only executing (prefilters) + // but it'll be ignored by the caller in that case + return selection; +} + +// A special extend for ajax options +// that takes "flat" options (not to be deep extended) +// Fixes #9887 +function ajaxExtend( target, src ) { + var key, deep, + flatOptions = jQuery.ajaxSettings.flatOptions || {}; + for ( key in src ) { + if ( src[ key ] !== undefined ) { + ( flatOptions[ key ] ? target : ( deep || ( deep = {} ) ) )[ key ] = src[ key ]; + } + } + if ( deep ) { + jQuery.extend( true, target, deep ); + } +} + +jQuery.fn.load = function( url, params, callback ) { + if ( typeof url !== "string" && _load ) { + return _load.apply( this, arguments ); + } + + // Don't do a request if no elements are being requested + if ( !this.length ) { + return this; + } + + var selector, type, response, + self = this, + off = url.indexOf(" "); + + if ( off >= 0 ) { + selector = url.slice( off, url.length ); + url = url.slice( 0, off ); + } + + // If it's a function + if ( jQuery.isFunction( params ) ) { + + // We assume that it's the callback + callback = params; + params = undefined; + + // Otherwise, build a param string + } else if ( params && typeof params === "object" ) { + type = "POST"; + } + + // Request the remote document + jQuery.ajax({ + url: url, + + // if "type" variable is undefined, then "GET" method will be used + type: type, + dataType: "html", + data: params, + complete: function( jqXHR, status ) { + if ( callback ) { + self.each( callback, response || [ jqXHR.responseText, status, jqXHR ] ); + } + } + }).done(function( responseText ) { + + // Save response for use in complete callback + response = arguments; + + // See if a selector was specified + self.html( selector ? + + // Create a dummy div to hold the results + jQuery("
") + + // inject the contents of the document in, removing the scripts + // to avoid any 'Permission Denied' errors in IE + .append( responseText.replace( rscript, "" ) ) + + // Locate the specified elements + .find( selector ) : + + // If not, just inject the full result + responseText ); + + }); + + return this; +}; + +// Attach a bunch of functions for handling common AJAX events +jQuery.each( "ajaxStart ajaxStop ajaxComplete ajaxError ajaxSuccess ajaxSend".split( " " ), function( i, o ){ + jQuery.fn[ o ] = function( f ){ + return this.on( o, f ); + }; +}); + +jQuery.each( [ "get", "post" ], function( i, method ) { + jQuery[ method ] = function( url, data, callback, type ) { + // shift arguments if data argument was omitted + if ( jQuery.isFunction( data ) ) { + type = type || callback; + callback = data; + data = undefined; + } + + return jQuery.ajax({ + type: method, + url: url, + data: data, + success: callback, + dataType: type + }); + }; +}); + +jQuery.extend({ + + getScript: function( url, callback ) { + return jQuery.get( url, undefined, callback, "script" ); + }, + + getJSON: function( url, data, callback ) { + return jQuery.get( url, data, callback, "json" ); + }, + + // Creates a full fledged settings object into target + // with both ajaxSettings and settings fields. + // If target is omitted, writes into ajaxSettings. + ajaxSetup: function( target, settings ) { + if ( settings ) { + // Building a settings object + ajaxExtend( target, jQuery.ajaxSettings ); + } else { + // Extending ajaxSettings + settings = target; + target = jQuery.ajaxSettings; + } + ajaxExtend( target, settings ); + return target; + }, + + ajaxSettings: { + url: ajaxLocation, + isLocal: rlocalProtocol.test( ajaxLocParts[ 1 ] ), + global: true, + type: "GET", + contentType: "application/x-www-form-urlencoded; charset=UTF-8", + processData: true, + async: true, + /* + timeout: 0, + data: null, + dataType: null, + username: null, + password: null, + cache: null, + throws: false, + traditional: false, + headers: {}, + */ + + accepts: { + xml: "application/xml, text/xml", + html: "text/html", + text: "text/plain", + json: "application/json, text/javascript", + "*": allTypes + }, + + contents: { + xml: /xml/, + html: /html/, + json: /json/ + }, + + responseFields: { + xml: "responseXML", + text: "responseText" + }, + + // List of data converters + // 1) key format is "source_type destination_type" (a single space in-between) + // 2) the catchall symbol "*" can be used for source_type + converters: { + + // Convert anything to text + "* text": window.String, + + // Text to html (true = no transformation) + "text html": true, + + // Evaluate text as a json expression + "text json": jQuery.parseJSON, + + // Parse text as xml + "text xml": jQuery.parseXML + }, + + // For options that shouldn't be deep extended: + // you can add your own custom options here if + // and when you create one that shouldn't be + // deep extended (see ajaxExtend) + flatOptions: { + context: true, + url: true + } + }, + + ajaxPrefilter: addToPrefiltersOrTransports( prefilters ), + ajaxTransport: addToPrefiltersOrTransports( transports ), + + // Main method + ajax: function( url, options ) { + + // If url is an object, simulate pre-1.5 signature + if ( typeof url === "object" ) { + options = url; + url = undefined; + } + + // Force options to be an object + options = options || {}; + + var // ifModified key + ifModifiedKey, + // Response headers + responseHeadersString, + responseHeaders, + // transport + transport, + // timeout handle + timeoutTimer, + // Cross-domain detection vars + parts, + // To know if global events are to be dispatched + fireGlobals, + // Loop variable + i, + // Create the final options object + s = jQuery.ajaxSetup( {}, options ), + // Callbacks context + callbackContext = s.context || s, + // Context for global events + // It's the callbackContext if one was provided in the options + // and if it's a DOM node or a jQuery collection + globalEventContext = callbackContext !== s && + ( callbackContext.nodeType || callbackContext instanceof jQuery ) ? + jQuery( callbackContext ) : jQuery.event, + // Deferreds + deferred = jQuery.Deferred(), + completeDeferred = jQuery.Callbacks( "once memory" ), + // Status-dependent callbacks + statusCode = s.statusCode || {}, + // Headers (they are sent all at once) + requestHeaders = {}, + requestHeadersNames = {}, + // The jqXHR state + state = 0, + // Default abort message + strAbort = "canceled", + // Fake xhr + jqXHR = { + + readyState: 0, + + // Caches the header + setRequestHeader: function( name, value ) { + if ( !state ) { + var lname = name.toLowerCase(); + name = requestHeadersNames[ lname ] = requestHeadersNames[ lname ] || name; + requestHeaders[ name ] = value; + } + return this; + }, + + // Raw string + getAllResponseHeaders: function() { + return state === 2 ? responseHeadersString : null; + }, + + // Builds headers hashtable if needed + getResponseHeader: function( key ) { + var match; + if ( state === 2 ) { + if ( !responseHeaders ) { + responseHeaders = {}; + while( ( match = rheaders.exec( responseHeadersString ) ) ) { + responseHeaders[ match[1].toLowerCase() ] = match[ 2 ]; + } + } + match = responseHeaders[ key.toLowerCase() ]; + } + return match === undefined ? null : match; + }, + + // Overrides response content-type header + overrideMimeType: function( type ) { + if ( !state ) { + s.mimeType = type; + } + return this; + }, + + // Cancel the request + abort: function( statusText ) { + statusText = statusText || strAbort; + if ( transport ) { + transport.abort( statusText ); + } + done( 0, statusText ); + return this; + } + }; + + // Callback for when everything is done + // It is defined here because jslint complains if it is declared + // at the end of the function (which would be more logical and readable) + function done( status, nativeStatusText, responses, headers ) { + var isSuccess, success, error, response, modified, + statusText = nativeStatusText; + + // Called once + if ( state === 2 ) { + return; + } + + // State is "done" now + state = 2; + + // Clear timeout if it exists + if ( timeoutTimer ) { + clearTimeout( timeoutTimer ); + } + + // Dereference transport for early garbage collection + // (no matter how long the jqXHR object will be used) + transport = undefined; + + // Cache response headers + responseHeadersString = headers || ""; + + // Set readyState + jqXHR.readyState = status > 0 ? 4 : 0; + + // Get response data + if ( responses ) { + response = ajaxHandleResponses( s, jqXHR, responses ); + } + + // If successful, handle type chaining + if ( status >= 200 && status < 300 || status === 304 ) { + + // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. + if ( s.ifModified ) { + + modified = jqXHR.getResponseHeader("Last-Modified"); + if ( modified ) { + jQuery.lastModified[ ifModifiedKey ] = modified; + } + modified = jqXHR.getResponseHeader("Etag"); + if ( modified ) { + jQuery.etag[ ifModifiedKey ] = modified; + } + } + + // If not modified + if ( status === 304 ) { + + statusText = "notmodified"; + isSuccess = true; + + // If we have data + } else { + + isSuccess = ajaxConvert( s, response ); + statusText = isSuccess.state; + success = isSuccess.data; + error = isSuccess.error; + isSuccess = !error; + } + } else { + // We extract error from statusText + // then normalize statusText and status for non-aborts + error = statusText; + if ( !statusText || status ) { + statusText = "error"; + if ( status < 0 ) { + status = 0; + } + } + } + + // Set data for the fake xhr object + jqXHR.status = status; + jqXHR.statusText = ( nativeStatusText || statusText ) + ""; + + // Success/Error + if ( isSuccess ) { + deferred.resolveWith( callbackContext, [ success, statusText, jqXHR ] ); + } else { + deferred.rejectWith( callbackContext, [ jqXHR, statusText, error ] ); + } + + // Status-dependent callbacks + jqXHR.statusCode( statusCode ); + statusCode = undefined; + + if ( fireGlobals ) { + globalEventContext.trigger( "ajax" + ( isSuccess ? "Success" : "Error" ), + [ jqXHR, s, isSuccess ? success : error ] ); + } + + // Complete + completeDeferred.fireWith( callbackContext, [ jqXHR, statusText ] ); + + if ( fireGlobals ) { + globalEventContext.trigger( "ajaxComplete", [ jqXHR, s ] ); + // Handle the global AJAX counter + if ( !( --jQuery.active ) ) { + jQuery.event.trigger( "ajaxStop" ); + } + } + } + + // Attach deferreds + deferred.promise( jqXHR ); + jqXHR.success = jqXHR.done; + jqXHR.error = jqXHR.fail; + jqXHR.complete = completeDeferred.add; + + // Status-dependent callbacks + jqXHR.statusCode = function( map ) { + if ( map ) { + var tmp; + if ( state < 2 ) { + for ( tmp in map ) { + statusCode[ tmp ] = [ statusCode[tmp], map[tmp] ]; + } + } else { + tmp = map[ jqXHR.status ]; + jqXHR.always( tmp ); + } + } + return this; + }; + + // Remove hash character (#7531: and string promotion) + // Add protocol if not provided (#5866: IE7 issue with protocol-less urls) + // We also use the url parameter if available + s.url = ( ( url || s.url ) + "" ).replace( rhash, "" ).replace( rprotocol, ajaxLocParts[ 1 ] + "//" ); + + // Extract dataTypes list + s.dataTypes = jQuery.trim( s.dataType || "*" ).toLowerCase().split( core_rspace ); + + // A cross-domain request is in order when we have a protocol:host:port mismatch + if ( s.crossDomain == null ) { + parts = rurl.exec( s.url.toLowerCase() ); + s.crossDomain = !!( parts && + ( parts[ 1 ] !== ajaxLocParts[ 1 ] || parts[ 2 ] !== ajaxLocParts[ 2 ] || + ( parts[ 3 ] || ( parts[ 1 ] === "http:" ? 80 : 443 ) ) != + ( ajaxLocParts[ 3 ] || ( ajaxLocParts[ 1 ] === "http:" ? 80 : 443 ) ) ) + ); + } + + // Convert data if not already a string + if ( s.data && s.processData && typeof s.data !== "string" ) { + s.data = jQuery.param( s.data, s.traditional ); + } + + // Apply prefilters + inspectPrefiltersOrTransports( prefilters, s, options, jqXHR ); + + // If request was aborted inside a prefilter, stop there + if ( state === 2 ) { + return jqXHR; + } + + // We can fire global events as of now if asked to + fireGlobals = s.global; + + // Uppercase the type + s.type = s.type.toUpperCase(); + + // Determine if request has content + s.hasContent = !rnoContent.test( s.type ); + + // Watch for a new set of requests + if ( fireGlobals && jQuery.active++ === 0 ) { + jQuery.event.trigger( "ajaxStart" ); + } + + // More options handling for requests with no content + if ( !s.hasContent ) { + + // If data is available, append data to url + if ( s.data ) { + s.url += ( rquery.test( s.url ) ? "&" : "?" ) + s.data; + // #9682: remove data so that it's not used in an eventual retry + delete s.data; + } + + // Get ifModifiedKey before adding the anti-cache parameter + ifModifiedKey = s.url; + + // Add anti-cache in url if needed + if ( s.cache === false ) { + + var ts = jQuery.now(), + // try replacing _= if it is there + ret = s.url.replace( rts, "$1_=" + ts ); + + // if nothing was replaced, add timestamp to the end + s.url = ret + ( ( ret === s.url ) ? ( rquery.test( s.url ) ? "&" : "?" ) + "_=" + ts : "" ); + } + } + + // Set the correct header, if data is being sent + if ( s.data && s.hasContent && s.contentType !== false || options.contentType ) { + jqXHR.setRequestHeader( "Content-Type", s.contentType ); + } + + // Set the If-Modified-Since and/or If-None-Match header, if in ifModified mode. + if ( s.ifModified ) { + ifModifiedKey = ifModifiedKey || s.url; + if ( jQuery.lastModified[ ifModifiedKey ] ) { + jqXHR.setRequestHeader( "If-Modified-Since", jQuery.lastModified[ ifModifiedKey ] ); + } + if ( jQuery.etag[ ifModifiedKey ] ) { + jqXHR.setRequestHeader( "If-None-Match", jQuery.etag[ ifModifiedKey ] ); + } + } + + // Set the Accepts header for the server, depending on the dataType + jqXHR.setRequestHeader( + "Accept", + s.dataTypes[ 0 ] && s.accepts[ s.dataTypes[0] ] ? + s.accepts[ s.dataTypes[0] ] + ( s.dataTypes[ 0 ] !== "*" ? ", " + allTypes + "; q=0.01" : "" ) : + s.accepts[ "*" ] + ); + + // Check for headers option + for ( i in s.headers ) { + jqXHR.setRequestHeader( i, s.headers[ i ] ); + } + + // Allow custom headers/mimetypes and early abort + if ( s.beforeSend && ( s.beforeSend.call( callbackContext, jqXHR, s ) === false || state === 2 ) ) { + // Abort if not done already and return + return jqXHR.abort(); + + } + + // aborting is no longer a cancellation + strAbort = "abort"; + + // Install callbacks on deferreds + for ( i in { success: 1, error: 1, complete: 1 } ) { + jqXHR[ i ]( s[ i ] ); + } + + // Get transport + transport = inspectPrefiltersOrTransports( transports, s, options, jqXHR ); + + // If no transport, we auto-abort + if ( !transport ) { + done( -1, "No Transport" ); + } else { + jqXHR.readyState = 1; + // Send global event + if ( fireGlobals ) { + globalEventContext.trigger( "ajaxSend", [ jqXHR, s ] ); + } + // Timeout + if ( s.async && s.timeout > 0 ) { + timeoutTimer = setTimeout( function(){ + jqXHR.abort( "timeout" ); + }, s.timeout ); + } + + try { + state = 1; + transport.send( requestHeaders, done ); + } catch (e) { + // Propagate exception as error if not done + if ( state < 2 ) { + done( -1, e ); + // Simply rethrow otherwise + } else { + throw e; + } + } + } + + return jqXHR; + }, + + // Counter for holding the number of active queries + active: 0, + + // Last-Modified header cache for next request + lastModified: {}, + etag: {} + +}); + +/* Handles responses to an ajax request: + * - sets all responseXXX fields accordingly + * - finds the right dataType (mediates between content-type and expected dataType) + * - returns the corresponding response + */ +function ajaxHandleResponses( s, jqXHR, responses ) { + + var ct, type, finalDataType, firstDataType, + contents = s.contents, + dataTypes = s.dataTypes, + responseFields = s.responseFields; + + // Fill responseXXX fields + for ( type in responseFields ) { + if ( type in responses ) { + jqXHR[ responseFields[type] ] = responses[ type ]; + } + } + + // Remove auto dataType and get content-type in the process + while( dataTypes[ 0 ] === "*" ) { + dataTypes.shift(); + if ( ct === undefined ) { + ct = s.mimeType || jqXHR.getResponseHeader( "content-type" ); + } + } + + // Check if we're dealing with a known content-type + if ( ct ) { + for ( type in contents ) { + if ( contents[ type ] && contents[ type ].test( ct ) ) { + dataTypes.unshift( type ); + break; + } + } + } + + // Check to see if we have a response for the expected dataType + if ( dataTypes[ 0 ] in responses ) { + finalDataType = dataTypes[ 0 ]; + } else { + // Try convertible dataTypes + for ( type in responses ) { + if ( !dataTypes[ 0 ] || s.converters[ type + " " + dataTypes[0] ] ) { + finalDataType = type; + break; + } + if ( !firstDataType ) { + firstDataType = type; + } + } + // Or just use first one + finalDataType = finalDataType || firstDataType; + } + + // If we found a dataType + // We add the dataType to the list if needed + // and return the corresponding response + if ( finalDataType ) { + if ( finalDataType !== dataTypes[ 0 ] ) { + dataTypes.unshift( finalDataType ); + } + return responses[ finalDataType ]; + } +} + +// Chain conversions given the request and the original response +function ajaxConvert( s, response ) { + + var conv, conv2, current, tmp, + // Work with a copy of dataTypes in case we need to modify it for conversion + dataTypes = s.dataTypes.slice(), + prev = dataTypes[ 0 ], + converters = {}, + i = 0; + + // Apply the dataFilter if provided + if ( s.dataFilter ) { + response = s.dataFilter( response, s.dataType ); + } + + // Create converters map with lowercased keys + if ( dataTypes[ 1 ] ) { + for ( conv in s.converters ) { + converters[ conv.toLowerCase() ] = s.converters[ conv ]; + } + } + + // Convert to each sequential dataType, tolerating list modification + for ( ; (current = dataTypes[++i]); ) { + + // There's only work to do if current dataType is non-auto + if ( current !== "*" ) { + + // Convert response if prev dataType is non-auto and differs from current + if ( prev !== "*" && prev !== current ) { + + // Seek a direct converter + conv = converters[ prev + " " + current ] || converters[ "* " + current ]; + + // If none found, seek a pair + if ( !conv ) { + for ( conv2 in converters ) { + + // If conv2 outputs current + tmp = conv2.split(" "); + if ( tmp[ 1 ] === current ) { + + // If prev can be converted to accepted input + conv = converters[ prev + " " + tmp[ 0 ] ] || + converters[ "* " + tmp[ 0 ] ]; + if ( conv ) { + // Condense equivalence converters + if ( conv === true ) { + conv = converters[ conv2 ]; + + // Otherwise, insert the intermediate dataType + } else if ( converters[ conv2 ] !== true ) { + current = tmp[ 0 ]; + dataTypes.splice( i--, 0, current ); + } + + break; + } + } + } + } + + // Apply converter (if not an equivalence) + if ( conv !== true ) { + + // Unless errors are allowed to bubble, catch and return them + if ( conv && s["throws"] ) { + response = conv( response ); + } else { + try { + response = conv( response ); + } catch ( e ) { + return { state: "parsererror", error: conv ? e : "No conversion from " + prev + " to " + current }; + } + } + } + } + + // Update prev for next iteration + prev = current; + } + } + + return { state: "success", data: response }; +} +var oldCallbacks = [], + rquestion = /\?/, + rjsonp = /(=)\?(?=&|$)|\?\?/, + nonce = jQuery.now(); + +// Default jsonp settings +jQuery.ajaxSetup({ + jsonp: "callback", + jsonpCallback: function() { + var callback = oldCallbacks.pop() || ( jQuery.expando + "_" + ( nonce++ ) ); + this[ callback ] = true; + return callback; + } +}); + +// Detect, normalize options and install callbacks for jsonp requests +jQuery.ajaxPrefilter( "json jsonp", function( s, originalSettings, jqXHR ) { + + var callbackName, overwritten, responseContainer, + data = s.data, + url = s.url, + hasCallback = s.jsonp !== false, + replaceInUrl = hasCallback && rjsonp.test( url ), + replaceInData = hasCallback && !replaceInUrl && typeof data === "string" && + !( s.contentType || "" ).indexOf("application/x-www-form-urlencoded") && + rjsonp.test( data ); + + // Handle iff the expected data type is "jsonp" or we have a parameter to set + if ( s.dataTypes[ 0 ] === "jsonp" || replaceInUrl || replaceInData ) { + + // Get callback name, remembering preexisting value associated with it + callbackName = s.jsonpCallback = jQuery.isFunction( s.jsonpCallback ) ? + s.jsonpCallback() : + s.jsonpCallback; + overwritten = window[ callbackName ]; + + // Insert callback into url or form data + if ( replaceInUrl ) { + s.url = url.replace( rjsonp, "$1" + callbackName ); + } else if ( replaceInData ) { + s.data = data.replace( rjsonp, "$1" + callbackName ); + } else if ( hasCallback ) { + s.url += ( rquestion.test( url ) ? "&" : "?" ) + s.jsonp + "=" + callbackName; + } + + // Use data converter to retrieve json after script execution + s.converters["script json"] = function() { + if ( !responseContainer ) { + jQuery.error( callbackName + " was not called" ); + } + return responseContainer[ 0 ]; + }; + + // force json dataType + s.dataTypes[ 0 ] = "json"; + + // Install callback + window[ callbackName ] = function() { + responseContainer = arguments; + }; + + // Clean-up function (fires after converters) + jqXHR.always(function() { + // Restore preexisting value + window[ callbackName ] = overwritten; + + // Save back as free + if ( s[ callbackName ] ) { + // make sure that re-using the options doesn't screw things around + s.jsonpCallback = originalSettings.jsonpCallback; + + // save the callback name for future use + oldCallbacks.push( callbackName ); + } + + // Call if it was a function and we have a response + if ( responseContainer && jQuery.isFunction( overwritten ) ) { + overwritten( responseContainer[ 0 ] ); + } + + responseContainer = overwritten = undefined; + }); + + // Delegate to script + return "script"; + } +}); +// Install script dataType +jQuery.ajaxSetup({ + accepts: { + script: "text/javascript, application/javascript, application/ecmascript, application/x-ecmascript" + }, + contents: { + script: /javascript|ecmascript/ + }, + converters: { + "text script": function( text ) { + jQuery.globalEval( text ); + return text; + } + } +}); + +// Handle cache's special case and global +jQuery.ajaxPrefilter( "script", function( s ) { + if ( s.cache === undefined ) { + s.cache = false; + } + if ( s.crossDomain ) { + s.type = "GET"; + s.global = false; + } +}); + +// Bind script tag hack transport +jQuery.ajaxTransport( "script", function(s) { + + // This transport only deals with cross domain requests + if ( s.crossDomain ) { + + var script, + head = document.head || document.getElementsByTagName( "head" )[0] || document.documentElement; + + return { + + send: function( _, callback ) { + + script = document.createElement( "script" ); + + script.async = "async"; + + if ( s.scriptCharset ) { + script.charset = s.scriptCharset; + } + + script.src = s.url; + + // Attach handlers for all browsers + script.onload = script.onreadystatechange = function( _, isAbort ) { + + if ( isAbort || !script.readyState || /loaded|complete/.test( script.readyState ) ) { + + // Handle memory leak in IE + script.onload = script.onreadystatechange = null; + + // Remove the script + if ( head && script.parentNode ) { + head.removeChild( script ); + } + + // Dereference the script + script = undefined; + + // Callback if not abort + if ( !isAbort ) { + callback( 200, "success" ); + } + } + }; + // Use insertBefore instead of appendChild to circumvent an IE6 bug. + // This arises when a base node is used (#2709 and #4378). + head.insertBefore( script, head.firstChild ); + }, + + abort: function() { + if ( script ) { + script.onload( 0, 1 ); + } + } + }; + } +}); +var xhrCallbacks, + // #5280: Internet Explorer will keep connections alive if we don't abort on unload + xhrOnUnloadAbort = window.ActiveXObject ? function() { + // Abort all pending requests + for ( var key in xhrCallbacks ) { + xhrCallbacks[ key ]( 0, 1 ); + } + } : false, + xhrId = 0; + +// Functions to create xhrs +function createStandardXHR() { + try { + return new window.XMLHttpRequest(); + } catch( e ) {} +} + +function createActiveXHR() { + try { + return new window.ActiveXObject( "Microsoft.XMLHTTP" ); + } catch( e ) {} +} + +// Create the request object +// (This is still attached to ajaxSettings for backward compatibility) +jQuery.ajaxSettings.xhr = window.ActiveXObject ? + /* Microsoft failed to properly + * implement the XMLHttpRequest in IE7 (can't request local files), + * so we use the ActiveXObject when it is available + * Additionally XMLHttpRequest can be disabled in IE7/IE8 so + * we need a fallback. + */ + function() { + return !this.isLocal && createStandardXHR() || createActiveXHR(); + } : + // For all other browsers, use the standard XMLHttpRequest object + createStandardXHR; + +// Determine support properties +(function( xhr ) { + jQuery.extend( jQuery.support, { + ajax: !!xhr, + cors: !!xhr && ( "withCredentials" in xhr ) + }); +})( jQuery.ajaxSettings.xhr() ); + +// Create transport if the browser can provide an xhr +if ( jQuery.support.ajax ) { + + jQuery.ajaxTransport(function( s ) { + // Cross domain only allowed if supported through XMLHttpRequest + if ( !s.crossDomain || jQuery.support.cors ) { + + var callback; + + return { + send: function( headers, complete ) { + + // Get a new xhr + var handle, i, + xhr = s.xhr(); + + // Open the socket + // Passing null username, generates a login popup on Opera (#2865) + if ( s.username ) { + xhr.open( s.type, s.url, s.async, s.username, s.password ); + } else { + xhr.open( s.type, s.url, s.async ); + } + + // Apply custom fields if provided + if ( s.xhrFields ) { + for ( i in s.xhrFields ) { + xhr[ i ] = s.xhrFields[ i ]; + } + } + + // Override mime type if needed + if ( s.mimeType && xhr.overrideMimeType ) { + xhr.overrideMimeType( s.mimeType ); + } + + // X-Requested-With header + // For cross-domain requests, seeing as conditions for a preflight are + // akin to a jigsaw puzzle, we simply never set it to be sure. + // (it can always be set on a per-request basis or even using ajaxSetup) + // For same-domain requests, won't change header if already provided. + if ( !s.crossDomain && !headers["X-Requested-With"] ) { + headers[ "X-Requested-With" ] = "XMLHttpRequest"; + } + + // Need an extra try/catch for cross domain requests in Firefox 3 + try { + for ( i in headers ) { + xhr.setRequestHeader( i, headers[ i ] ); + } + } catch( _ ) {} + + // Do send the request + // This may raise an exception which is actually + // handled in jQuery.ajax (so no try/catch here) + xhr.send( ( s.hasContent && s.data ) || null ); + + // Listener + callback = function( _, isAbort ) { + + var status, + statusText, + responseHeaders, + responses, + xml; + + // Firefox throws exceptions when accessing properties + // of an xhr when a network error occurred + // http://helpful.knobs-dials.com/index.php/Component_returned_failure_code:_0x80040111_(NS_ERROR_NOT_AVAILABLE) + try { + + // Was never called and is aborted or complete + if ( callback && ( isAbort || xhr.readyState === 4 ) ) { + + // Only called once + callback = undefined; + + // Do not keep as active anymore + if ( handle ) { + xhr.onreadystatechange = jQuery.noop; + if ( xhrOnUnloadAbort ) { + delete xhrCallbacks[ handle ]; + } + } + + // If it's an abort + if ( isAbort ) { + // Abort it manually if needed + if ( xhr.readyState !== 4 ) { + xhr.abort(); + } + } else { + status = xhr.status; + responseHeaders = xhr.getAllResponseHeaders(); + responses = {}; + xml = xhr.responseXML; + + // Construct response list + if ( xml && xml.documentElement /* #4958 */ ) { + responses.xml = xml; + } + + // When requesting binary data, IE6-9 will throw an exception + // on any attempt to access responseText (#11426) + try { + responses.text = xhr.responseText; + } catch( e ) { + } + + // Firefox throws an exception when accessing + // statusText for faulty cross-domain requests + try { + statusText = xhr.statusText; + } catch( e ) { + // We normalize with Webkit giving an empty statusText + statusText = ""; + } + + // Filter status for non standard behaviors + + // If the request is local and we have data: assume a success + // (success with no data won't get notified, that's the best we + // can do given current implementations) + if ( !status && s.isLocal && !s.crossDomain ) { + status = responses.text ? 200 : 404; + // IE - #1450: sometimes returns 1223 when it should be 204 + } else if ( status === 1223 ) { + status = 204; + } + } + } + } catch( firefoxAccessException ) { + if ( !isAbort ) { + complete( -1, firefoxAccessException ); + } + } + + // Call complete if needed + if ( responses ) { + complete( status, statusText, responses, responseHeaders ); + } + }; + + if ( !s.async ) { + // if we're in sync mode we fire the callback + callback(); + } else if ( xhr.readyState === 4 ) { + // (IE6 & IE7) if it's in cache and has been + // retrieved directly we need to fire the callback + setTimeout( callback, 0 ); + } else { + handle = ++xhrId; + if ( xhrOnUnloadAbort ) { + // Create the active xhrs callbacks list if needed + // and attach the unload handler + if ( !xhrCallbacks ) { + xhrCallbacks = {}; + jQuery( window ).unload( xhrOnUnloadAbort ); + } + // Add to list of active xhrs callbacks + xhrCallbacks[ handle ] = callback; + } + xhr.onreadystatechange = callback; + } + }, + + abort: function() { + if ( callback ) { + callback(0,1); + } + } + }; + } + }); +} +var fxNow, timerId, + rfxtypes = /^(?:toggle|show|hide)$/, + rfxnum = new RegExp( "^(?:([-+])=|)(" + core_pnum + ")([a-z%]*)$", "i" ), + rrun = /queueHooks$/, + animationPrefilters = [ defaultPrefilter ], + tweeners = { + "*": [function( prop, value ) { + var end, unit, + tween = this.createTween( prop, value ), + parts = rfxnum.exec( value ), + target = tween.cur(), + start = +target || 0, + scale = 1, + maxIterations = 20; + + if ( parts ) { + end = +parts[2]; + unit = parts[3] || ( jQuery.cssNumber[ prop ] ? "" : "px" ); + + // We need to compute starting value + if ( unit !== "px" && start ) { + // Iteratively approximate from a nonzero starting point + // Prefer the current property, because this process will be trivial if it uses the same units + // Fallback to end or a simple constant + start = jQuery.css( tween.elem, prop, true ) || end || 1; + + do { + // If previous iteration zeroed out, double until we get *something* + // Use a string for doubling factor so we don't accidentally see scale as unchanged below + scale = scale || ".5"; + + // Adjust and apply + start = start / scale; + jQuery.style( tween.elem, prop, start + unit ); + + // Update scale, tolerating zero or NaN from tween.cur() + // And breaking the loop if scale is unchanged or perfect, or if we've just had enough + } while ( scale !== (scale = tween.cur() / target) && scale !== 1 && --maxIterations ); + } + + tween.unit = unit; + tween.start = start; + // If a +=/-= token was provided, we're doing a relative animation + tween.end = parts[1] ? start + ( parts[1] + 1 ) * end : end; + } + return tween; + }] + }; + +// Animations created synchronously will run synchronously +function createFxNow() { + setTimeout(function() { + fxNow = undefined; + }, 0 ); + return ( fxNow = jQuery.now() ); +} + +function createTweens( animation, props ) { + jQuery.each( props, function( prop, value ) { + var collection = ( tweeners[ prop ] || [] ).concat( tweeners[ "*" ] ), + index = 0, + length = collection.length; + for ( ; index < length; index++ ) { + if ( collection[ index ].call( animation, prop, value ) ) { + + // we're done with this property + return; + } + } + }); +} + +function Animation( elem, properties, options ) { + var result, + index = 0, + tweenerIndex = 0, + length = animationPrefilters.length, + deferred = jQuery.Deferred().always( function() { + // don't match elem in the :animated selector + delete tick.elem; + }), + tick = function() { + var currentTime = fxNow || createFxNow(), + remaining = Math.max( 0, animation.startTime + animation.duration - currentTime ), + // archaic crash bug won't allow us to use 1 - ( 0.5 || 0 ) (#12497) + temp = remaining / animation.duration || 0, + percent = 1 - temp, + index = 0, + length = animation.tweens.length; + + for ( ; index < length ; index++ ) { + animation.tweens[ index ].run( percent ); + } + + deferred.notifyWith( elem, [ animation, percent, remaining ]); + + if ( percent < 1 && length ) { + return remaining; + } else { + deferred.resolveWith( elem, [ animation ] ); + return false; + } + }, + animation = deferred.promise({ + elem: elem, + props: jQuery.extend( {}, properties ), + opts: jQuery.extend( true, { specialEasing: {} }, options ), + originalProperties: properties, + originalOptions: options, + startTime: fxNow || createFxNow(), + duration: options.duration, + tweens: [], + createTween: function( prop, end, easing ) { + var tween = jQuery.Tween( elem, animation.opts, prop, end, + animation.opts.specialEasing[ prop ] || animation.opts.easing ); + animation.tweens.push( tween ); + return tween; + }, + stop: function( gotoEnd ) { + var index = 0, + // if we are going to the end, we want to run all the tweens + // otherwise we skip this part + length = gotoEnd ? animation.tweens.length : 0; + + for ( ; index < length ; index++ ) { + animation.tweens[ index ].run( 1 ); + } + + // resolve when we played the last frame + // otherwise, reject + if ( gotoEnd ) { + deferred.resolveWith( elem, [ animation, gotoEnd ] ); + } else { + deferred.rejectWith( elem, [ animation, gotoEnd ] ); + } + return this; + } + }), + props = animation.props; + + propFilter( props, animation.opts.specialEasing ); + + for ( ; index < length ; index++ ) { + result = animationPrefilters[ index ].call( animation, elem, props, animation.opts ); + if ( result ) { + return result; + } + } + + createTweens( animation, props ); + + if ( jQuery.isFunction( animation.opts.start ) ) { + animation.opts.start.call( elem, animation ); + } + + jQuery.fx.timer( + jQuery.extend( tick, { + anim: animation, + queue: animation.opts.queue, + elem: elem + }) + ); + + // attach callbacks from options + return animation.progress( animation.opts.progress ) + .done( animation.opts.done, animation.opts.complete ) + .fail( animation.opts.fail ) + .always( animation.opts.always ); +} + +function propFilter( props, specialEasing ) { + var index, name, easing, value, hooks; + + // camelCase, specialEasing and expand cssHook pass + for ( index in props ) { + name = jQuery.camelCase( index ); + easing = specialEasing[ name ]; + value = props[ index ]; + if ( jQuery.isArray( value ) ) { + easing = value[ 1 ]; + value = props[ index ] = value[ 0 ]; + } + + if ( index !== name ) { + props[ name ] = value; + delete props[ index ]; + } + + hooks = jQuery.cssHooks[ name ]; + if ( hooks && "expand" in hooks ) { + value = hooks.expand( value ); + delete props[ name ]; + + // not quite $.extend, this wont overwrite keys already present. + // also - reusing 'index' from above because we have the correct "name" + for ( index in value ) { + if ( !( index in props ) ) { + props[ index ] = value[ index ]; + specialEasing[ index ] = easing; + } + } + } else { + specialEasing[ name ] = easing; + } + } +} + +jQuery.Animation = jQuery.extend( Animation, { + + tweener: function( props, callback ) { + if ( jQuery.isFunction( props ) ) { + callback = props; + props = [ "*" ]; + } else { + props = props.split(" "); + } + + var prop, + index = 0, + length = props.length; + + for ( ; index < length ; index++ ) { + prop = props[ index ]; + tweeners[ prop ] = tweeners[ prop ] || []; + tweeners[ prop ].unshift( callback ); + } + }, + + prefilter: function( callback, prepend ) { + if ( prepend ) { + animationPrefilters.unshift( callback ); + } else { + animationPrefilters.push( callback ); + } + } +}); + +function defaultPrefilter( elem, props, opts ) { + var index, prop, value, length, dataShow, toggle, tween, hooks, oldfire, + anim = this, + style = elem.style, + orig = {}, + handled = [], + hidden = elem.nodeType && isHidden( elem ); + + // handle queue: false promises + if ( !opts.queue ) { + hooks = jQuery._queueHooks( elem, "fx" ); + if ( hooks.unqueued == null ) { + hooks.unqueued = 0; + oldfire = hooks.empty.fire; + hooks.empty.fire = function() { + if ( !hooks.unqueued ) { + oldfire(); + } + }; + } + hooks.unqueued++; + + anim.always(function() { + // doing this makes sure that the complete handler will be called + // before this completes + anim.always(function() { + hooks.unqueued--; + if ( !jQuery.queue( elem, "fx" ).length ) { + hooks.empty.fire(); + } + }); + }); + } + + // height/width overflow pass + if ( elem.nodeType === 1 && ( "height" in props || "width" in props ) ) { + // Make sure that nothing sneaks out + // Record all 3 overflow attributes because IE does not + // change the overflow attribute when overflowX and + // overflowY are set to the same value + opts.overflow = [ style.overflow, style.overflowX, style.overflowY ]; + + // Set display property to inline-block for height/width + // animations on inline elements that are having width/height animated + if ( jQuery.css( elem, "display" ) === "inline" && + jQuery.css( elem, "float" ) === "none" ) { + + // inline-level elements accept inline-block; + // block-level elements need to be inline with layout + if ( !jQuery.support.inlineBlockNeedsLayout || css_defaultDisplay( elem.nodeName ) === "inline" ) { + style.display = "inline-block"; + + } else { + style.zoom = 1; + } + } + } + + if ( opts.overflow ) { + style.overflow = "hidden"; + if ( !jQuery.support.shrinkWrapBlocks ) { + anim.done(function() { + style.overflow = opts.overflow[ 0 ]; + style.overflowX = opts.overflow[ 1 ]; + style.overflowY = opts.overflow[ 2 ]; + }); + } + } + + + // show/hide pass + for ( index in props ) { + value = props[ index ]; + if ( rfxtypes.exec( value ) ) { + delete props[ index ]; + toggle = toggle || value === "toggle"; + if ( value === ( hidden ? "hide" : "show" ) ) { + continue; + } + handled.push( index ); + } + } + + length = handled.length; + if ( length ) { + dataShow = jQuery._data( elem, "fxshow" ) || jQuery._data( elem, "fxshow", {} ); + if ( "hidden" in dataShow ) { + hidden = dataShow.hidden; + } + + // store state if its toggle - enables .stop().toggle() to "reverse" + if ( toggle ) { + dataShow.hidden = !hidden; + } + if ( hidden ) { + jQuery( elem ).show(); + } else { + anim.done(function() { + jQuery( elem ).hide(); + }); + } + anim.done(function() { + var prop; + jQuery.removeData( elem, "fxshow", true ); + for ( prop in orig ) { + jQuery.style( elem, prop, orig[ prop ] ); + } + }); + for ( index = 0 ; index < length ; index++ ) { + prop = handled[ index ]; + tween = anim.createTween( prop, hidden ? dataShow[ prop ] : 0 ); + orig[ prop ] = dataShow[ prop ] || jQuery.style( elem, prop ); + + if ( !( prop in dataShow ) ) { + dataShow[ prop ] = tween.start; + if ( hidden ) { + tween.end = tween.start; + tween.start = prop === "width" || prop === "height" ? 1 : 0; + } + } + } + } +} + +function Tween( elem, options, prop, end, easing ) { + return new Tween.prototype.init( elem, options, prop, end, easing ); +} +jQuery.Tween = Tween; + +Tween.prototype = { + constructor: Tween, + init: function( elem, options, prop, end, easing, unit ) { + this.elem = elem; + this.prop = prop; + this.easing = easing || "swing"; + this.options = options; + this.start = this.now = this.cur(); + this.end = end; + this.unit = unit || ( jQuery.cssNumber[ prop ] ? "" : "px" ); + }, + cur: function() { + var hooks = Tween.propHooks[ this.prop ]; + + return hooks && hooks.get ? + hooks.get( this ) : + Tween.propHooks._default.get( this ); + }, + run: function( percent ) { + var eased, + hooks = Tween.propHooks[ this.prop ]; + + if ( this.options.duration ) { + this.pos = eased = jQuery.easing[ this.easing ]( + percent, this.options.duration * percent, 0, 1, this.options.duration + ); + } else { + this.pos = eased = percent; + } + this.now = ( this.end - this.start ) * eased + this.start; + + if ( this.options.step ) { + this.options.step.call( this.elem, this.now, this ); + } + + if ( hooks && hooks.set ) { + hooks.set( this ); + } else { + Tween.propHooks._default.set( this ); + } + return this; + } +}; + +Tween.prototype.init.prototype = Tween.prototype; + +Tween.propHooks = { + _default: { + get: function( tween ) { + var result; + + if ( tween.elem[ tween.prop ] != null && + (!tween.elem.style || tween.elem.style[ tween.prop ] == null) ) { + return tween.elem[ tween.prop ]; + } + + // passing any value as a 4th parameter to .css will automatically + // attempt a parseFloat and fallback to a string if the parse fails + // so, simple values such as "10px" are parsed to Float. + // complex values such as "rotate(1rad)" are returned as is. + result = jQuery.css( tween.elem, tween.prop, false, "" ); + // Empty strings, null, undefined and "auto" are converted to 0. + return !result || result === "auto" ? 0 : result; + }, + set: function( tween ) { + // use step hook for back compat - use cssHook if its there - use .style if its + // available and use plain properties where available + if ( jQuery.fx.step[ tween.prop ] ) { + jQuery.fx.step[ tween.prop ]( tween ); + } else if ( tween.elem.style && ( tween.elem.style[ jQuery.cssProps[ tween.prop ] ] != null || jQuery.cssHooks[ tween.prop ] ) ) { + jQuery.style( tween.elem, tween.prop, tween.now + tween.unit ); + } else { + tween.elem[ tween.prop ] = tween.now; + } + } + } +}; + +// Remove in 2.0 - this supports IE8's panic based approach +// to setting things on disconnected nodes + +Tween.propHooks.scrollTop = Tween.propHooks.scrollLeft = { + set: function( tween ) { + if ( tween.elem.nodeType && tween.elem.parentNode ) { + tween.elem[ tween.prop ] = tween.now; + } + } +}; + +jQuery.each([ "toggle", "show", "hide" ], function( i, name ) { + var cssFn = jQuery.fn[ name ]; + jQuery.fn[ name ] = function( speed, easing, callback ) { + return speed == null || typeof speed === "boolean" || + // special check for .toggle( handler, handler, ... ) + ( !i && jQuery.isFunction( speed ) && jQuery.isFunction( easing ) ) ? + cssFn.apply( this, arguments ) : + this.animate( genFx( name, true ), speed, easing, callback ); + }; +}); + +jQuery.fn.extend({ + fadeTo: function( speed, to, easing, callback ) { + + // show any hidden elements after setting opacity to 0 + return this.filter( isHidden ).css( "opacity", 0 ).show() + + // animate to the value specified + .end().animate({ opacity: to }, speed, easing, callback ); + }, + animate: function( prop, speed, easing, callback ) { + var empty = jQuery.isEmptyObject( prop ), + optall = jQuery.speed( speed, easing, callback ), + doAnimation = function() { + // Operate on a copy of prop so per-property easing won't be lost + var anim = Animation( this, jQuery.extend( {}, prop ), optall ); + + // Empty animations resolve immediately + if ( empty ) { + anim.stop( true ); + } + }; + + return empty || optall.queue === false ? + this.each( doAnimation ) : + this.queue( optall.queue, doAnimation ); + }, + stop: function( type, clearQueue, gotoEnd ) { + var stopQueue = function( hooks ) { + var stop = hooks.stop; + delete hooks.stop; + stop( gotoEnd ); + }; + + if ( typeof type !== "string" ) { + gotoEnd = clearQueue; + clearQueue = type; + type = undefined; + } + if ( clearQueue && type !== false ) { + this.queue( type || "fx", [] ); + } + + return this.each(function() { + var dequeue = true, + index = type != null && type + "queueHooks", + timers = jQuery.timers, + data = jQuery._data( this ); + + if ( index ) { + if ( data[ index ] && data[ index ].stop ) { + stopQueue( data[ index ] ); + } + } else { + for ( index in data ) { + if ( data[ index ] && data[ index ].stop && rrun.test( index ) ) { + stopQueue( data[ index ] ); + } + } + } + + for ( index = timers.length; index--; ) { + if ( timers[ index ].elem === this && (type == null || timers[ index ].queue === type) ) { + timers[ index ].anim.stop( gotoEnd ); + dequeue = false; + timers.splice( index, 1 ); + } + } + + // start the next in the queue if the last step wasn't forced + // timers currently will call their complete callbacks, which will dequeue + // but only if they were gotoEnd + if ( dequeue || !gotoEnd ) { + jQuery.dequeue( this, type ); + } + }); + } +}); + +// Generate parameters to create a standard animation +function genFx( type, includeWidth ) { + var which, + attrs = { height: type }, + i = 0; + + // if we include width, step value is 1 to do all cssExpand values, + // if we don't include width, step value is 2 to skip over Left and Right + includeWidth = includeWidth? 1 : 0; + for( ; i < 4 ; i += 2 - includeWidth ) { + which = cssExpand[ i ]; + attrs[ "margin" + which ] = attrs[ "padding" + which ] = type; + } + + if ( includeWidth ) { + attrs.opacity = attrs.width = type; + } + + return attrs; +} + +// Generate shortcuts for custom animations +jQuery.each({ + slideDown: genFx("show"), + slideUp: genFx("hide"), + slideToggle: genFx("toggle"), + fadeIn: { opacity: "show" }, + fadeOut: { opacity: "hide" }, + fadeToggle: { opacity: "toggle" } +}, function( name, props ) { + jQuery.fn[ name ] = function( speed, easing, callback ) { + return this.animate( props, speed, easing, callback ); + }; +}); + +jQuery.speed = function( speed, easing, fn ) { + var opt = speed && typeof speed === "object" ? jQuery.extend( {}, speed ) : { + complete: fn || !fn && easing || + jQuery.isFunction( speed ) && speed, + duration: speed, + easing: fn && easing || easing && !jQuery.isFunction( easing ) && easing + }; + + opt.duration = jQuery.fx.off ? 0 : typeof opt.duration === "number" ? opt.duration : + opt.duration in jQuery.fx.speeds ? jQuery.fx.speeds[ opt.duration ] : jQuery.fx.speeds._default; + + // normalize opt.queue - true/undefined/null -> "fx" + if ( opt.queue == null || opt.queue === true ) { + opt.queue = "fx"; + } + + // Queueing + opt.old = opt.complete; + + opt.complete = function() { + if ( jQuery.isFunction( opt.old ) ) { + opt.old.call( this ); + } + + if ( opt.queue ) { + jQuery.dequeue( this, opt.queue ); + } + }; + + return opt; +}; + +jQuery.easing = { + linear: function( p ) { + return p; + }, + swing: function( p ) { + return 0.5 - Math.cos( p*Math.PI ) / 2; + } +}; + +jQuery.timers = []; +jQuery.fx = Tween.prototype.init; +jQuery.fx.tick = function() { + var timer, + timers = jQuery.timers, + i = 0; + + fxNow = jQuery.now(); + + for ( ; i < timers.length; i++ ) { + timer = timers[ i ]; + // Checks the timer has not already been removed + if ( !timer() && timers[ i ] === timer ) { + timers.splice( i--, 1 ); + } + } + + if ( !timers.length ) { + jQuery.fx.stop(); + } + fxNow = undefined; +}; + +jQuery.fx.timer = function( timer ) { + if ( timer() && jQuery.timers.push( timer ) && !timerId ) { + timerId = setInterval( jQuery.fx.tick, jQuery.fx.interval ); + } +}; + +jQuery.fx.interval = 13; + +jQuery.fx.stop = function() { + clearInterval( timerId ); + timerId = null; +}; + +jQuery.fx.speeds = { + slow: 600, + fast: 200, + // Default speed + _default: 400 +}; + +// Back Compat <1.8 extension point +jQuery.fx.step = {}; + +if ( jQuery.expr && jQuery.expr.filters ) { + jQuery.expr.filters.animated = function( elem ) { + return jQuery.grep(jQuery.timers, function( fn ) { + return elem === fn.elem; + }).length; + }; +} +var rroot = /^(?:body|html)$/i; + +jQuery.fn.offset = function( options ) { + if ( arguments.length ) { + return options === undefined ? + this : + this.each(function( i ) { + jQuery.offset.setOffset( this, options, i ); + }); + } + + var docElem, body, win, clientTop, clientLeft, scrollTop, scrollLeft, + box = { top: 0, left: 0 }, + elem = this[ 0 ], + doc = elem && elem.ownerDocument; + + if ( !doc ) { + return; + } + + if ( (body = doc.body) === elem ) { + return jQuery.offset.bodyOffset( elem ); + } + + docElem = doc.documentElement; + + // Make sure it's not a disconnected DOM node + if ( !jQuery.contains( docElem, elem ) ) { + return box; + } + + // If we don't have gBCR, just use 0,0 rather than error + // BlackBerry 5, iOS 3 (original iPhone) + if ( typeof elem.getBoundingClientRect !== "undefined" ) { + box = elem.getBoundingClientRect(); + } + win = getWindow( doc ); + clientTop = docElem.clientTop || body.clientTop || 0; + clientLeft = docElem.clientLeft || body.clientLeft || 0; + scrollTop = win.pageYOffset || docElem.scrollTop; + scrollLeft = win.pageXOffset || docElem.scrollLeft; + return { + top: box.top + scrollTop - clientTop, + left: box.left + scrollLeft - clientLeft + }; +}; + +jQuery.offset = { + + bodyOffset: function( body ) { + var top = body.offsetTop, + left = body.offsetLeft; + + if ( jQuery.support.doesNotIncludeMarginInBodyOffset ) { + top += parseFloat( jQuery.css(body, "marginTop") ) || 0; + left += parseFloat( jQuery.css(body, "marginLeft") ) || 0; + } + + return { top: top, left: left }; + }, + + setOffset: function( elem, options, i ) { + var position = jQuery.css( elem, "position" ); + + // set position first, in-case top/left are set even on static elem + if ( position === "static" ) { + elem.style.position = "relative"; + } + + var curElem = jQuery( elem ), + curOffset = curElem.offset(), + curCSSTop = jQuery.css( elem, "top" ), + curCSSLeft = jQuery.css( elem, "left" ), + calculatePosition = ( position === "absolute" || position === "fixed" ) && jQuery.inArray("auto", [curCSSTop, curCSSLeft]) > -1, + props = {}, curPosition = {}, curTop, curLeft; + + // need to be able to calculate position if either top or left is auto and position is either absolute or fixed + if ( calculatePosition ) { + curPosition = curElem.position(); + curTop = curPosition.top; + curLeft = curPosition.left; + } else { + curTop = parseFloat( curCSSTop ) || 0; + curLeft = parseFloat( curCSSLeft ) || 0; + } + + if ( jQuery.isFunction( options ) ) { + options = options.call( elem, i, curOffset ); + } + + if ( options.top != null ) { + props.top = ( options.top - curOffset.top ) + curTop; + } + if ( options.left != null ) { + props.left = ( options.left - curOffset.left ) + curLeft; + } + + if ( "using" in options ) { + options.using.call( elem, props ); + } else { + curElem.css( props ); + } + } +}; + + +jQuery.fn.extend({ + + position: function() { + if ( !this[0] ) { + return; + } + + var elem = this[0], + + // Get *real* offsetParent + offsetParent = this.offsetParent(), + + // Get correct offsets + offset = this.offset(), + parentOffset = rroot.test(offsetParent[0].nodeName) ? { top: 0, left: 0 } : offsetParent.offset(); + + // Subtract element margins + // note: when an element has margin: auto the offsetLeft and marginLeft + // are the same in Safari causing offset.left to incorrectly be 0 + offset.top -= parseFloat( jQuery.css(elem, "marginTop") ) || 0; + offset.left -= parseFloat( jQuery.css(elem, "marginLeft") ) || 0; + + // Add offsetParent borders + parentOffset.top += parseFloat( jQuery.css(offsetParent[0], "borderTopWidth") ) || 0; + parentOffset.left += parseFloat( jQuery.css(offsetParent[0], "borderLeftWidth") ) || 0; + + // Subtract the two offsets + return { + top: offset.top - parentOffset.top, + left: offset.left - parentOffset.left + }; + }, + + offsetParent: function() { + return this.map(function() { + var offsetParent = this.offsetParent || document.body; + while ( offsetParent && (!rroot.test(offsetParent.nodeName) && jQuery.css(offsetParent, "position") === "static") ) { + offsetParent = offsetParent.offsetParent; + } + return offsetParent || document.body; + }); + } +}); + + +// Create scrollLeft and scrollTop methods +jQuery.each( {scrollLeft: "pageXOffset", scrollTop: "pageYOffset"}, function( method, prop ) { + var top = /Y/.test( prop ); + + jQuery.fn[ method ] = function( val ) { + return jQuery.access( this, function( elem, method, val ) { + var win = getWindow( elem ); + + if ( val === undefined ) { + return win ? (prop in win) ? win[ prop ] : + win.document.documentElement[ method ] : + elem[ method ]; + } + + if ( win ) { + win.scrollTo( + !top ? val : jQuery( win ).scrollLeft(), + top ? val : jQuery( win ).scrollTop() + ); + + } else { + elem[ method ] = val; + } + }, method, val, arguments.length, null ); + }; +}); + +function getWindow( elem ) { + return jQuery.isWindow( elem ) ? + elem : + elem.nodeType === 9 ? + elem.defaultView || elem.parentWindow : + false; +} +// Create innerHeight, innerWidth, height, width, outerHeight and outerWidth methods +jQuery.each( { Height: "height", Width: "width" }, function( name, type ) { + jQuery.each( { padding: "inner" + name, content: type, "": "outer" + name }, function( defaultExtra, funcName ) { + // margin is only for outerHeight, outerWidth + jQuery.fn[ funcName ] = function( margin, value ) { + var chainable = arguments.length && ( defaultExtra || typeof margin !== "boolean" ), + extra = defaultExtra || ( margin === true || value === true ? "margin" : "border" ); + + return jQuery.access( this, function( elem, type, value ) { + var doc; + + if ( jQuery.isWindow( elem ) ) { + // As of 5/8/2012 this will yield incorrect results for Mobile Safari, but there + // isn't a whole lot we can do. See pull request at this URL for discussion: + // https://github.com/jquery/jquery/pull/764 + return elem.document.documentElement[ "client" + name ]; + } + + // Get document width or height + if ( elem.nodeType === 9 ) { + doc = elem.documentElement; + + // Either scroll[Width/Height] or offset[Width/Height] or client[Width/Height], whichever is greatest + // unfortunately, this causes bug #3838 in IE6/8 only, but there is currently no good, small way to fix it. + return Math.max( + elem.body[ "scroll" + name ], doc[ "scroll" + name ], + elem.body[ "offset" + name ], doc[ "offset" + name ], + doc[ "client" + name ] + ); + } + + return value === undefined ? + // Get width or height on the element, requesting but not forcing parseFloat + jQuery.css( elem, type, value, extra ) : + + // Set width or height on the element + jQuery.style( elem, type, value, extra ); + }, type, chainable ? margin : undefined, chainable, null ); + }; + }); +}); +// Expose jQuery to the global object +window.jQuery = window.$ = jQuery; + +// Expose jQuery as an AMD module, but only for AMD loaders that +// understand the issues with loading multiple versions of jQuery +// in a page that all might call define(). The loader will indicate +// they have special allowances for multiple jQuery versions by +// specifying define.amd.jQuery = true. Register as a named module, +// since jQuery can be concatenated with other files that may use define, +// but not use a proper concatenation script that understands anonymous +// AMD modules. A named AMD is safest and most robust way to register. +// Lowercase jquery is used because AMD module names are derived from +// file names, and jQuery is normally delivered in a lowercase file name. +// Do this after creating the global so that if an AMD module wants to call +// noConflict to hide this version of jQuery, it will work. +if ( typeof define === "function" && define.amd && define.amd.jQuery ) { + define( "jquery", [], function () { return jQuery; } ); +} + +})( window ); diff --git a/qa/workunits/erasure-code/plot.js b/qa/workunits/erasure-code/plot.js new file mode 100644 index 00000000..bd2bba5b --- /dev/null +++ b/qa/workunits/erasure-code/plot.js @@ -0,0 +1,82 @@ +$(function() { + encode = []; + if (typeof encode_vandermonde_isa != 'undefined') { + encode.push({ + data: encode_vandermonde_isa, + label: "ISA, Vandermonde", + points: { show: true }, + lines: { show: true }, + }); + } + if (typeof encode_vandermonde_jerasure != 'undefined') { + encode.push({ + data: encode_vandermonde_jerasure, + label: "Jerasure Generic, Vandermonde", + points: { show: true }, + lines: { show: true }, + }); + } + if (typeof encode_cauchy_isa != 'undefined') { + encode.push({ + data: encode_cauchy_isa, + label: "ISA, Cauchy", + points: { show: true }, + lines: { show: true }, + }); + } + if (typeof encode_cauchy_jerasure != 'undefined') { + encode.push({ + data: encode_cauchy_jerasure, + label: "Jerasure, Cauchy", + points: { show: true }, + lines: { show: true }, + }); + } + $.plot("#encode", encode, { + xaxis: { + mode: "categories", + tickLength: 0 + }, + }); + + decode = []; + if (typeof decode_vandermonde_isa != 'undefined') { + decode.push({ + data: decode_vandermonde_isa, + label: "ISA, Vandermonde", + points: { show: true }, + lines: { show: true }, + }); + } + if (typeof decode_vandermonde_jerasure != 'undefined') { + decode.push({ + data: decode_vandermonde_jerasure, + label: "Jerasure Generic, Vandermonde", + points: { show: true }, + lines: { show: true }, + }); + } + if (typeof decode_cauchy_isa != 'undefined') { + decode.push({ + data: decode_cauchy_isa, + label: "ISA, Cauchy", + points: { show: true }, + lines: { show: true }, + }); + } + if (typeof decode_cauchy_jerasure != 'undefined') { + decode.push({ + data: decode_cauchy_jerasure, + label: "Jerasure, Cauchy", + points: { show: true }, + lines: { show: true }, + }); + } + $.plot("#decode", decode, { + xaxis: { + mode: "categories", + tickLength: 0 + }, + }); + +}); diff --git a/qa/workunits/false.sh b/qa/workunits/false.sh new file mode 100644 index 00000000..8a961b32 --- /dev/null +++ b/qa/workunits/false.sh @@ -0,0 +1,3 @@ +#!/bin/sh -ex + +false \ No newline at end of file diff --git a/qa/workunits/fs/.gitignore b/qa/workunits/fs/.gitignore new file mode 100644 index 00000000..f7f7a061 --- /dev/null +++ b/qa/workunits/fs/.gitignore @@ -0,0 +1 @@ +test_o_trunc diff --git a/qa/workunits/fs/Makefile b/qa/workunits/fs/Makefile new file mode 100644 index 00000000..c9934254 --- /dev/null +++ b/qa/workunits/fs/Makefile @@ -0,0 +1,11 @@ +CFLAGS = -Wall -Wextra -D_GNU_SOURCE + +TARGETS = test_o_trunc + +.c: + $(CC) $(CFLAGS) $@.c -o $@ + +all: $(TARGETS) + +clean: + rm $(TARGETS) diff --git a/qa/workunits/fs/misc/acl.sh b/qa/workunits/fs/misc/acl.sh new file mode 100755 index 00000000..198b0567 --- /dev/null +++ b/qa/workunits/fs/misc/acl.sh @@ -0,0 +1,50 @@ +#!/bin/sh -x + +set -e +mkdir -p testdir +cd testdir + +set +e +setfacl -d -m u:nobody:rw . +if test $? != 0; then + echo "Filesystem does not support ACL" + exit 0 +fi + +expect_failure() { + if "$@"; then return 1; else return 0; fi +} + +set -e +c=0 +while [ $c -lt 100 ] +do + c=`expr $c + 1` + # inherited ACL from parent directory's default ACL + mkdir d1 + c1=`getfacl d1 | grep -c "nobody:rw"` + echo 3 | sudo tee /proc/sys/vm/drop_caches > /dev/null + c2=`getfacl d1 | grep -c "nobody:rw"` + rmdir d1 + if [ $c1 -ne 2 ] || [ $c2 -ne 2 ] + then + echo "ERROR: incorrect ACLs" + exit 1 + fi +done + +mkdir d1 + +# The ACL xattr only contains ACL header. ACL should be removed +# in this case. +setfattr -n system.posix_acl_access -v 0x02000000 d1 +setfattr -n system.posix_acl_default -v 0x02000000 . + +expect_failure getfattr -n system.posix_acl_access d1 +expect_failure getfattr -n system.posix_acl_default . + + +rmdir d1 +cd .. +rmdir testdir +echo OK diff --git a/qa/workunits/fs/misc/chmod.sh b/qa/workunits/fs/misc/chmod.sh new file mode 100755 index 00000000..de66776f --- /dev/null +++ b/qa/workunits/fs/misc/chmod.sh @@ -0,0 +1,60 @@ +#!/bin/sh -x + +set -e + +check_perms() { + + file=$1 + r=$(ls -la ${file}) + if test $? != 0; then + echo "ERROR: File listing/stat failed" + exit 1 + fi + + perms=$2 + if test "${perms}" != $(echo ${r} | awk '{print $1}') && \ + test "${perms}." != $(echo ${r} | awk '{print $1}') && \ + test "${perms}+" != $(echo ${r} | awk '{print $1}'); then + echo "ERROR: Permissions should be ${perms}" + exit 1 + fi +} + +file=test_chmod.$$ + +echo "foo" > ${file} +if test $? != 0; then + echo "ERROR: Failed to create file ${file}" + exit 1 +fi + +chmod 400 ${file} +if test $? != 0; then + echo "ERROR: Failed to change mode of ${file}" + exit 1 +fi + +check_perms ${file} "-r--------" + +set +e +echo "bar" >> ${file} +if test $? = 0; then + echo "ERROR: Write to read-only file should Fail" + exit 1 +fi + +set -e +chmod 600 ${file} +echo "bar" >> ${file} +if test $? != 0; then + echo "ERROR: Write to writeable file failed" + exit 1 +fi + +check_perms ${file} "-rw-------" + +echo "foo" >> ${file} +if test $? != 0; then + echo "ERROR: Failed to write to file" + exit 1 +fi diff --git a/qa/workunits/fs/misc/direct_io.py b/qa/workunits/fs/misc/direct_io.py new file mode 100755 index 00000000..b5c42265 --- /dev/null +++ b/qa/workunits/fs/misc/direct_io.py @@ -0,0 +1,50 @@ +#!/usr/bin/python + +import json +import mmap +import os +import subprocess + + +def get_data_pool(): + cmd = ['ceph', 'fs', 'ls', '--format=json-pretty'] + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE) + out = proc.communicate()[0] + return json.loads(out)[0]['data_pools'][0] + + +def main(): + fd = os.open("testfile", os.O_RDWR | os.O_CREAT | os.O_TRUNC | os.O_DIRECT, 0o644) + + ino = os.fstat(fd).st_ino + obj_name = "{ino:x}.00000000".format(ino=ino) + pool_name = get_data_pool() + + buf = mmap.mmap(-1, 1) + buf.write('1') + os.write(fd, buf) + + proc = subprocess.Popen(['rados', '-p', pool_name, 'get', obj_name, 'tmpfile']) + proc.wait() + + with open('tmpfile', 'r') as tmpf: + out = tmpf.read() + if out != '1': + raise RuntimeError("data were not written to object store directly") + + with open('tmpfile', 'w') as tmpf: + tmpf.write('2') + + proc = subprocess.Popen(['rados', '-p', pool_name, 'put', obj_name, 'tmpfile']) + proc.wait() + + os.lseek(fd, 0, os.SEEK_SET) + out = os.read(fd, 1) + if out != '2': + raise RuntimeError("data were not directly read from object store") + + os.close(fd) + print('ok') + + +main() diff --git a/qa/workunits/fs/misc/dirfrag.sh b/qa/workunits/fs/misc/dirfrag.sh new file mode 100755 index 00000000..eea0ec3b --- /dev/null +++ b/qa/workunits/fs/misc/dirfrag.sh @@ -0,0 +1,52 @@ +#!/usr/bin/env bash + +set -e + +DEPTH=5 +COUNT=10000 + +kill_jobs() { + jobs -p | xargs kill +} +trap kill_jobs INT + +create_files() { + for i in `seq 1 $COUNT` + do + touch file$i + done +} + +delete_files() { + for i in `ls -f` + do + if [[ ${i}a = file*a ]] + then + rm -f $i + fi + done +} + +rm -rf testdir +mkdir testdir +cd testdir + +echo "creating folder hierarchy" +for i in `seq 1 $DEPTH`; do + mkdir dir$i + cd dir$i + create_files & +done +wait + +echo "created hierarchy, now cleaning up" + +for i in `seq 1 $DEPTH`; do + delete_files & + cd .. +done +wait + +echo "cleaned up hierarchy" +cd .. +rm -rf testdir diff --git a/qa/workunits/fs/misc/filelock_deadlock.py b/qa/workunits/fs/misc/filelock_deadlock.py new file mode 100755 index 00000000..3ebc9777 --- /dev/null +++ b/qa/workunits/fs/misc/filelock_deadlock.py @@ -0,0 +1,72 @@ +#!/usr/bin/python + +import errno +import fcntl +import os +import signal +import struct +import time + + +def handler(signum, frame): + pass + + +def lock_two(f1, f2): + lockdata = struct.pack('hhllhh', fcntl.F_WRLCK, 0, 0, 10, 0, 0) + fcntl.fcntl(f1, fcntl.F_SETLKW, lockdata) + time.sleep(10) + + # don't wait forever + signal.signal(signal.SIGALRM, handler) + signal.alarm(10) + exitcode = 0 + try: + fcntl.fcntl(f2, fcntl.F_SETLKW, lockdata) + except IOError as e: + if e.errno == errno.EDEADLK: + exitcode = 1 + elif e.errno == errno.EINTR: + exitcode = 2 + else: + exitcode = 3 + os._exit(exitcode) + + +def main(): + pid1 = os.fork() + if pid1 == 0: + f1 = open("testfile1", 'w') + f2 = open("testfile2", 'w') + lock_two(f1, f2) + + pid2 = os.fork() + if pid2 == 0: + f1 = open("testfile2", 'w') + f2 = open("testfile3", 'w') + lock_two(f1, f2) + + pid3 = os.fork() + if pid3 == 0: + f1 = open("testfile3", 'w') + f2 = open("testfile1", 'w') + lock_two(f1, f2) + + deadlk_count = 0 + i = 0 + while i < 3: + pid, status = os.wait() + exitcode = status >> 8 + if exitcode == 1: + deadlk_count += 1 + elif exitcode != 0: + raise RuntimeError("unexpect exit code of child") + i += 1 + + if deadlk_count != 1: + raise RuntimeError("unexpect count of EDEADLK") + + print('ok') + + +main() diff --git a/qa/workunits/fs/misc/filelock_interrupt.py b/qa/workunits/fs/misc/filelock_interrupt.py new file mode 100755 index 00000000..7b5b3e7d --- /dev/null +++ b/qa/workunits/fs/misc/filelock_interrupt.py @@ -0,0 +1,87 @@ +#!/usr/bin/python + +import errno +import fcntl +import signal +import struct + +""" +introduced by Linux 3.15 +""" +fcntl.F_OFD_GETLK = 36 +fcntl.F_OFD_SETLK = 37 +fcntl.F_OFD_SETLKW = 38 + + +def handler(signum, frame): + pass + + +def main(): + f1 = open("testfile", 'w') + f2 = open("testfile", 'w') + + fcntl.flock(f1, fcntl.LOCK_SH | fcntl.LOCK_NB) + + """ + is flock interruptible? + """ + signal.signal(signal.SIGALRM, handler) + signal.alarm(5) + try: + fcntl.flock(f2, fcntl.LOCK_EX) + except IOError as e: + if e.errno != errno.EINTR: + raise + else: + raise RuntimeError("expect flock to block") + + fcntl.flock(f1, fcntl.LOCK_UN) + + lockdata = struct.pack('hhllhh', fcntl.F_WRLCK, 0, 0, 10, 0, 0) + try: + fcntl.fcntl(f1, fcntl.F_OFD_SETLK, lockdata) + except IOError as e: + if e.errno != errno.EINVAL: + raise + else: + print('kernel does not support fcntl.F_OFD_SETLK') + return + + lockdata = struct.pack('hhllhh', fcntl.F_WRLCK, 0, 10, 10, 0, 0) + fcntl.fcntl(f2, fcntl.F_OFD_SETLK, lockdata) + + """ + is posix lock interruptible? + """ + signal.signal(signal.SIGALRM, handler) + signal.alarm(5) + try: + lockdata = struct.pack('hhllhh', fcntl.F_WRLCK, 0, 0, 0, 0, 0) + fcntl.fcntl(f2, fcntl.F_OFD_SETLKW, lockdata) + except IOError as e: + if e.errno != errno.EINTR: + raise + else: + raise RuntimeError("expect posix lock to block") + + """ + file handler 2 should still hold lock on 10~10 + """ + try: + lockdata = struct.pack('hhllhh', fcntl.F_WRLCK, 0, 10, 10, 0, 0) + fcntl.fcntl(f1, fcntl.F_OFD_SETLK, lockdata) + except IOError as e: + if e.errno == errno.EAGAIN: + pass + else: + raise RuntimeError("expect file handler 2 to hold lock on 10~10") + + lockdata = struct.pack('hhllhh', fcntl.F_UNLCK, 0, 0, 0, 0, 0) + fcntl.fcntl(f1, fcntl.F_OFD_SETLK, lockdata) + fcntl.fcntl(f2, fcntl.F_OFD_SETLK, lockdata) + + print('ok') + + +main() diff --git a/qa/workunits/fs/misc/i_complete_vs_rename.sh b/qa/workunits/fs/misc/i_complete_vs_rename.sh new file mode 100755 index 00000000..a9b98271 --- /dev/null +++ b/qa/workunits/fs/misc/i_complete_vs_rename.sh @@ -0,0 +1,31 @@ +#!/bin/sh + +set -e + +mkdir x +cd x +touch a +touch b +touch c +touch d +ls +chmod 777 . +stat e || true +touch f +touch g + +# over existing file +echo attempting rename over existing file... +touch ../xx +mv ../xx f +ls | grep f || false +echo rename over existing file is okay + +# over negative dentry +echo attempting rename over negative dentry... +touch ../xx +mv ../xx e +ls | grep e || false +echo rename over negative dentry is ok + +echo OK diff --git a/qa/workunits/fs/misc/layout_vxattrs.sh b/qa/workunits/fs/misc/layout_vxattrs.sh new file mode 100755 index 00000000..81133627 --- /dev/null +++ b/qa/workunits/fs/misc/layout_vxattrs.sh @@ -0,0 +1,115 @@ +#!/usr/bin/env bash + +set -ex + +# detect data pool +datapool= +dir=. +while true ; do + echo $dir + datapool=$(getfattr -n ceph.dir.layout.pool $dir --only-values) && break + dir=$dir/.. +done + +# file +rm -f file file2 +touch file file2 + +getfattr -n ceph.file.layout file +getfattr -n ceph.file.layout file | grep -q object_size= +getfattr -n ceph.file.layout file | grep -q stripe_count= +getfattr -n ceph.file.layout file | grep -q stripe_unit= +getfattr -n ceph.file.layout file | grep -q pool= +getfattr -n ceph.file.layout.pool file +getfattr -n ceph.file.layout.pool_namespace file +getfattr -n ceph.file.layout.stripe_unit file +getfattr -n ceph.file.layout.stripe_count file +getfattr -n ceph.file.layout.object_size file + +getfattr -n ceph.file.layout.bogus file 2>&1 | grep -q 'No such attribute' +getfattr -n ceph.dir.layout file 2>&1 | grep -q 'No such attribute' + +setfattr -n ceph.file.layout.stripe_unit -v 1048576 file2 +setfattr -n ceph.file.layout.stripe_count -v 8 file2 +setfattr -n ceph.file.layout.object_size -v 10485760 file2 + +setfattr -n ceph.file.layout.pool -v $datapool file2 +getfattr -n ceph.file.layout.pool file2 | grep -q $datapool +setfattr -n ceph.file.layout.pool_namespace -v foons file2 +getfattr -n ceph.file.layout.pool_namespace file2 | grep -q foons +setfattr -x ceph.file.layout.pool_namespace file2 +getfattr -n ceph.file.layout.pool_namespace file2 | grep -q -v foons + +getfattr -n ceph.file.layout.stripe_unit file2 | grep -q 1048576 +getfattr -n ceph.file.layout.stripe_count file2 | grep -q 8 +getfattr -n ceph.file.layout.object_size file2 | grep -q 10485760 + +setfattr -n ceph.file.layout -v "stripe_unit=4194304 stripe_count=16 object_size=41943040 pool=$datapool pool_namespace=foons" file2 +getfattr -n ceph.file.layout.stripe_unit file2 | grep -q 4194304 +getfattr -n ceph.file.layout.stripe_count file2 | grep -q 16 +getfattr -n ceph.file.layout.object_size file2 | grep -q 41943040 +getfattr -n ceph.file.layout.pool file2 | grep -q $datapool +getfattr -n ceph.file.layout.pool_namespace file2 | grep -q foons + +setfattr -n ceph.file.layout -v "stripe_unit=1048576" file2 +getfattr -n ceph.file.layout.stripe_unit file2 | grep -q 1048576 +getfattr -n ceph.file.layout.stripe_count file2 | grep -q 16 +getfattr -n ceph.file.layout.object_size file2 | grep -q 41943040 +getfattr -n ceph.file.layout.pool file2 | grep -q $datapool +getfattr -n ceph.file.layout.pool_namespace file2 | grep -q foons + +setfattr -n ceph.file.layout -v "stripe_unit=2097152 stripe_count=4 object_size=2097152 pool=$datapool pool_namespace=barns" file2 +getfattr -n ceph.file.layout.stripe_unit file2 | grep -q 2097152 +getfattr -n ceph.file.layout.stripe_count file2 | grep -q 4 +getfattr -n ceph.file.layout.object_size file2 | grep -q 2097152 +getfattr -n ceph.file.layout.pool file2 | grep -q $datapool +getfattr -n ceph.file.layout.pool_namespace file2 | grep -q barns + +# dir +rm -f dir/file || true +rmdir dir || true +mkdir -p dir + +getfattr -d -m - dir | grep -q ceph.dir.layout && exit 1 || true +getfattr -d -m - dir | grep -q ceph.file.layout && exit 1 || true +getfattr -n ceph.dir.layout dir && exit 1 || true + +setfattr -n ceph.dir.layout.stripe_unit -v 1048576 dir +setfattr -n ceph.dir.layout.stripe_count -v 8 dir +setfattr -n ceph.dir.layout.object_size -v 10485760 dir +setfattr -n ceph.dir.layout.pool -v $datapool dir +setfattr -n ceph.dir.layout.pool_namespace -v dirns dir + +getfattr -n ceph.dir.layout dir +getfattr -n ceph.dir.layout dir | grep -q object_size=10485760 +getfattr -n ceph.dir.layout dir | grep -q stripe_count=8 +getfattr -n ceph.dir.layout dir | grep -q stripe_unit=1048576 +getfattr -n ceph.dir.layout dir | grep -q pool=$datapool +getfattr -n ceph.dir.layout dir | grep -q pool_namespace=dirns +getfattr -n ceph.dir.layout.pool dir | grep -q $datapool +getfattr -n ceph.dir.layout.stripe_unit dir | grep -q 1048576 +getfattr -n ceph.dir.layout.stripe_count dir | grep -q 8 +getfattr -n ceph.dir.layout.object_size dir | grep -q 10485760 +getfattr -n ceph.dir.layout.pool_namespace dir | grep -q dirns + + +setfattr -n ceph.file.layout -v "stripe_count=16" file2 +getfattr -n ceph.file.layout.stripe_count file2 | grep -q 16 +setfattr -n ceph.file.layout -v "object_size=10485760 stripe_count=8 stripe_unit=1048576 pool=$datapool pool_namespace=dirns" file2 +getfattr -n ceph.file.layout.stripe_count file2 | grep -q 8 + +touch dir/file +getfattr -n ceph.file.layout.pool dir/file | grep -q $datapool +getfattr -n ceph.file.layout.stripe_unit dir/file | grep -q 1048576 +getfattr -n ceph.file.layout.stripe_count dir/file | grep -q 8 +getfattr -n ceph.file.layout.object_size dir/file | grep -q 10485760 +getfattr -n ceph.file.layout.pool_namespace dir/file | grep -q dirns + +setfattr -x ceph.dir.layout.pool_namespace dir +getfattr -n ceph.dir.layout dir | grep -q -v pool_namespace=dirns + +setfattr -x ceph.dir.layout dir +getfattr -n ceph.dir.layout dir 2>&1 | grep -q 'No such attribute' + +echo OK + diff --git a/qa/workunits/fs/misc/mkpool_layout_vxattrs.sh b/qa/workunits/fs/misc/mkpool_layout_vxattrs.sh new file mode 100755 index 00000000..6b2fecbc --- /dev/null +++ b/qa/workunits/fs/misc/mkpool_layout_vxattrs.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash + +set -e + +touch foo.$$ +ceph osd pool create foo.$$ 8 +ceph fs add_data_pool cephfs foo.$$ +setfattr -n ceph.file.layout.pool -v foo.$$ foo.$$ + +# cleanup +rm foo.$$ +ceph fs rm_data_pool cephfs foo.$$ +ceph osd pool rm foo.$$ foo.$$ --yes-i-really-really-mean-it + +echo OK diff --git a/qa/workunits/fs/misc/multiple_rsync.sh b/qa/workunits/fs/misc/multiple_rsync.sh new file mode 100755 index 00000000..4397c1e7 --- /dev/null +++ b/qa/workunits/fs/misc/multiple_rsync.sh @@ -0,0 +1,25 @@ +#!/bin/sh -ex + + +# Populate with some arbitrary files from the local system. Take +# a copy to protect against false fails from system updates during test. +export PAYLOAD=/tmp/multiple_rsync_payload.$$ +sudo cp -r /usr/lib/ $PAYLOAD + +set -e + +sudo rsync -av $PAYLOAD payload.1 +sudo rsync -av $PAYLOAD payload.2 + +# this shouldn't transfer any additional files +echo we should get 4 here if no additional files are transferred +sudo rsync -auv $PAYLOAD payload.1 | tee /tmp/$$ +hexdump -C /tmp/$$ +wc -l /tmp/$$ | grep 4 +sudo rsync -auv $PAYLOAD payload.2 | tee /tmp/$$ +hexdump -C /tmp/$$ +wc -l /tmp/$$ | grep 4 +echo OK + +rm /tmp/$$ +sudo rm -rf $PAYLOAD diff --git a/qa/workunits/fs/misc/rstats.sh b/qa/workunits/fs/misc/rstats.sh new file mode 100755 index 00000000..4c32edb2 --- /dev/null +++ b/qa/workunits/fs/misc/rstats.sh @@ -0,0 +1,80 @@ +#!/usr/bin/env bash + +set -x + +timeout=30 +old_value="" +new_value="" + +wait_until_changed() { + name=$1 + wait=0 + while [ $wait -lt $timeout ]; do + new_value=`getfattr --only-value -n ceph.dir.$name .` + [ $new_value == $old_value ] || return 0 + sleep 1 + wait=$(($wait + 1)) + done + return 1 +} + +check_rctime() { + old_sec=$(echo $old_value | cut -d. -f1) + old_nsec=$(echo $old_value | cut -d. -f2) + new_sec=$(echo $new_value | cut -d. -f1) + new_nsec=$(echo $new_value | cut -d. -f2) + [ "$old_sec" -lt "$new_sec" ] && return 0 + [ "$old_sec" -gt "$new_sec" ] && return 1 + [ "$old_nsec" -lt "$new_nsec" ] && return 0 + return 1 +} + +# sync(3) does not make ceph-fuse flush dirty caps, because fuse kernel module +# does not notify ceph-fuse about it. Use fsync(3) instead. +fsync_path() { + cmd="import os; fd=os.open(\"$1\", os.O_RDONLY); os.fsync(fd); os.close(fd)" + python -c "$cmd" +} + +set -e + +mkdir -p rstats_testdir/d1/d2 +cd rstats_testdir + +# rfiles +old_value=`getfattr --only-value -n ceph.dir.rfiles .` +[ $old_value == 0 ] || false +touch d1/d2/f1 +wait_until_changed rfiles +[ $new_value == $(($old_value + 1)) ] || false + +# rsubdirs +old_value=`getfattr --only-value -n ceph.dir.rsubdirs .` +[ $old_value == 3 ] || false +mkdir d1/d2/d3 +wait_until_changed rsubdirs +[ $new_value == $(($old_value + 1)) ] || false + +# rbytes +old_value=`getfattr --only-value -n ceph.dir.rbytes .` +[ $old_value == 0 ] || false +echo hello > d1/d2/f2 +fsync_path d1/d2/f2 +wait_until_changed rbytes +[ $new_value == $(($old_value + 6)) ] || false + +#rctime +old_value=`getfattr --only-value -n ceph.dir.rctime .` +touch d1/d2/d3 # touch existing file +fsync_path d1/d2/d3 +wait_until_changed rctime +check_rctime + +old_value=`getfattr --only-value -n ceph.dir.rctime .` +touch d1/d2/f3 # create new file +wait_until_changed rctime +check_rctime + +cd .. +rm -rf rstats_testdir +echo OK diff --git a/qa/workunits/fs/misc/subvolume.sh b/qa/workunits/fs/misc/subvolume.sh new file mode 100755 index 00000000..75716a6c --- /dev/null +++ b/qa/workunits/fs/misc/subvolume.sh @@ -0,0 +1,63 @@ +#!/bin/sh -x + +expect_failure() { + if "$@"; then return 1; else return 0; fi +} + +set -e + +mkdir group +mkdir group/subvol1 + +setfattr -n ceph.dir.subvolume -v 1 group/subvol1 + +# rename subvolume +mv group/subvol1 group/subvol2 + +# move file out of the subvolume +touch group/subvol2/file1 +expect_failure python3 -c "import os; os.rename('group/subvol2/file1', 'group/file1')" +# move file into the subvolume +touch group/file2 +expect_failure python3 -c "import os; os.rename('group/file2', 'group/subvol2/file2')" + +# create hardlink within subvolume +ln group/subvol2/file1 group/subvol2/file1_ + +# create hardlink out of subvolume +expect_failure ln group/subvol2/file1 group/file1_ +expect_failure ln group/file2 group/subvol1/file2_ + +# create snapshot at subvolume root +mkdir group/subvol2/.snap/s1 + +# create snapshot at descendent dir of subvolume +mkdir group/subvol2/dir +expect_failure mkdir group/subvol2/dir/.snap/s2 + +mkdir group/subvol3 +setfattr -n ceph.dir.subvolume -v 1 group/subvol3 + +# move file across subvolumes +expect_failure python3 -c "import os; os.rename('group/subvol2/file1', 'group/subvol3/file1')" + +# create hardlink across subvolumes +expect_failure ln group/subvol2/file1 group/subvol3/file1 + +# create subvolume inside existing subvolume +expect_failure setfattr -n ceph.dir.subvolume -v 1 group/subvol2/dir + +# clear subvolume flag +setfattr -n ceph.dir.subvolume -v 0 group/subvol2 +mkdir group/subvol2/dir/.snap/s2 + +# parent subvolume override child subvolume +setfattr -n ceph.dir.subvolume -v 1 group/subvol2/dir +setfattr -n ceph.dir.subvolume -v 1 group/subvol2 +expect_failure mkdir group/subvol2/dir/.snap/s3 + +rmdir group/subvol2/.snap/s1 +rmdir group/subvol2/dir/.snap/s2 +rm -rf group + +echo OK diff --git a/qa/workunits/fs/misc/trivial_sync.sh b/qa/workunits/fs/misc/trivial_sync.sh new file mode 100755 index 00000000..7c8c4e2b --- /dev/null +++ b/qa/workunits/fs/misc/trivial_sync.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash + +set -e + +mkdir foo +echo foo > bar +sync diff --git a/qa/workunits/fs/misc/xattrs.sh b/qa/workunits/fs/misc/xattrs.sh new file mode 100755 index 00000000..fcd94d22 --- /dev/null +++ b/qa/workunits/fs/misc/xattrs.sh @@ -0,0 +1,14 @@ +#!/bin/sh -x + +set -e + +touch file + +setfattr -n user.foo -v foo file +setfattr -n user.bar -v bar file +setfattr -n user.empty file +getfattr -d file | grep foo +getfattr -d file | grep bar +getfattr -d file | grep empty + +echo OK. diff --git a/qa/workunits/fs/multiclient_sync_read_eof.py b/qa/workunits/fs/multiclient_sync_read_eof.py new file mode 100755 index 00000000..1d5bb650 --- /dev/null +++ b/qa/workunits/fs/multiclient_sync_read_eof.py @@ -0,0 +1,42 @@ +#!/usr/bin/python + +import argparse +import os + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument('mnt1') + parser.add_argument('mnt2') + parser.add_argument('fn') + args = parser.parse_args() + + open(os.path.join(args.mnt1, args.fn), 'w') + f1 = open(os.path.join(args.mnt1, args.fn), 'r+') + f2 = open(os.path.join(args.mnt2, args.fn), 'r+') + + f1.write('foo') + f1.flush() + a = f2.read(3) + print('got "%s"' % a) + assert a == 'foo' + f2.write('bar') + f2.flush() + a = f1.read(3) + print('got "%s"' % a) + assert a == 'bar' + + ## test short reads + f1.write('short') + f1.flush() + a = f2.read(100) + print('got "%s"' % a) + assert a == 'short' + f2.write('longer') + f2.flush() + a = f1.read(1000) + print('got "%s"' % a) + assert a == 'longer' + + print('ok') + +main() diff --git a/qa/workunits/fs/norstats/kernel_untar_tar.sh b/qa/workunits/fs/norstats/kernel_untar_tar.sh new file mode 100755 index 00000000..6a175dcd --- /dev/null +++ b/qa/workunits/fs/norstats/kernel_untar_tar.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash +# check if there is file changed while being archived + +set -e + +KERNEL=linux-4.0.5 + +wget -q http://download.ceph.com/qa/$KERNEL.tar.xz + +mkdir untar_tar +cd untar_tar + +tar Jxvf ../$KERNEL.tar.xz $KERNEL/Documentation/ +tar cf doc.tar $KERNEL + +tar xf doc.tar +sync +tar c $KERNEL >/dev/null + +rm -rf $KERNEL + +tar xf doc.tar +sync +tar c $KERNEL >/dev/null + +echo Ok diff --git a/qa/workunits/fs/quota/quota.sh b/qa/workunits/fs/quota/quota.sh new file mode 100755 index 00000000..1315be6d --- /dev/null +++ b/qa/workunits/fs/quota/quota.sh @@ -0,0 +1,128 @@ +#!/usr/bin/env bash + +set -ex + +function expect_false() +{ + set -x + if "$@"; then return 1; else return 0; fi +} + +function write_file() +{ + set +x + for ((i=1;i<=$2;i++)) + do + dd if=/dev/zero of=$1 bs=1M count=1 conv=notrunc oflag=append 2>/dev/null >/dev/null + if [ $? != 0 ]; then + echo Try to write $(($i * 1048576)) + set -x + return 1 + fi + sleep 0.05 + done + set -x + return 0 +} + +mkdir quota-test +cd quota-test + +# bytes +setfattr . -n ceph.quota.max_bytes -v 100000000 # 100m +expect_false write_file big 1000 # 1g +expect_false write_file second 10 +setfattr . -n ceph.quota.max_bytes -v 0 +dd if=/dev/zero of=third bs=1M count=10 +dd if=/dev/zero of=big2 bs=1M count=100 + + +rm -rf * + +# files +setfattr . -n ceph.quota.max_files -v 5 +mkdir ok +touch ok/1 +touch ok/2 +touch 3 +expect_false touch shouldbefail # 5 files will include the "." +expect_false touch ok/shouldbefail # 5 files will include the "." +setfattr . -n ceph.quota.max_files -v 0 +touch shouldbecreated +touch shouldbecreated2 + + +rm -rf * + +# mix +mkdir bytes bytes/files + +setfattr bytes -n ceph.quota.max_bytes -v 10000000 #10m +setfattr bytes/files -n ceph.quota.max_files -v 5 +dd if=/dev/zero of=bytes/files/1 bs=1M count=4 +dd if=/dev/zero of=bytes/files/2 bs=1M count=4 +expect_false write_file bytes/files/3 1000 +expect_false write_file bytes/files/4 1000 +expect_false write_file bytes/files/5 1000 +stat --printf="%n %s\n" bytes/files/1 #4M +stat --printf="%n %s\n" bytes/files/2 #4M +stat --printf="%n %s\n" bytes/files/3 #bigger than 2M +stat --printf="%n %s\n" bytes/files/4 #should be zero +expect_false stat bytes/files/5 #shouldn't be exist + + + + +rm -rf * + +#mv +mkdir files limit +truncate files/file -s 10G +setfattr limit -n ceph.quota.max_bytes -v 1000000 #1m +expect_false mv files limit/ + + + +rm -rf * + +#limit by ancestor + +mkdir -p ancestor/p1/p2/parent/p3 +setfattr ancestor -n ceph.quota.max_bytes -v 1000000 +setfattr ancestor/p1/p2/parent -n ceph.quota.max_bytes -v 1000000000 #1g +expect_false write_file ancestor/p1/p2/parent/p3/file1 900 #900m +stat --printf="%n %s\n" ancestor/p1/p2/parent/p3/file1 + + +#get/set attribute + +setfattr -n ceph.quota.max_bytes -v 0 . +setfattr -n ceph.quota.max_bytes -v 1 . +setfattr -n ceph.quota.max_bytes -v 9223372036854775807 . +expect_false setfattr -n ceph.quota.max_bytes -v 9223372036854775808 . +expect_false setfattr -n ceph.quota.max_bytes -v -1 . +expect_false setfattr -n ceph.quota.max_bytes -v -9223372036854775808 . +expect_false setfattr -n ceph.quota.max_bytes -v -9223372036854775809 . + +setfattr -n ceph.quota.max_files -v 0 . +setfattr -n ceph.quota.max_files -v 1 . +setfattr -n ceph.quota.max_files -v 9223372036854775807 . +expect_false setfattr -n ceph.quota.max_files -v 9223372036854775808 . +expect_false setfattr -n ceph.quota.max_files -v -1 . +expect_false setfattr -n ceph.quota.max_files -v -9223372036854775808 . +expect_false setfattr -n ceph.quota.max_files -v -9223372036854775809 . + +setfattr -n ceph.quota -v "max_bytes=0 max_files=0" . +setfattr -n ceph.quota -v "max_bytes=1 max_files=0" . +setfattr -n ceph.quota -v "max_bytes=0 max_files=1" . +setfattr -n ceph.quota -v "max_bytes=1 max_files=1" . +expect_false setfattr -n ceph.quota -v "max_bytes=-1 max_files=0" . +expect_false setfattr -n ceph.quota -v "max_bytes=0 max_files=-1" . +expect_false setfattr -n ceph.quota -v "max_bytes=-1 max_files=-1" . + +#addme + +cd .. +rm -rf quota-test + +echo OK diff --git a/qa/workunits/fs/snap-hierarchy.sh b/qa/workunits/fs/snap-hierarchy.sh new file mode 100755 index 00000000..67f0e014 --- /dev/null +++ b/qa/workunits/fs/snap-hierarchy.sh @@ -0,0 +1,24 @@ +#!/bin/sh + +set -ex + +if [ -d "$1" ]; then + mkdir -p -- "$1" && cd "$1" +fi + +[ "$VERIFY" != verify ] && mkdir 1 +[ "$VERIFY" != verify ] && mkdir 1/.snap/first +stat 1/.snap/first +[ "$VERIFY" != verify ] && mkdir 1/2 +stat 1/.snap/first/2 && exit 1 +[ "$VERIFY" != verify ] && mkdir 1/2/.snap/second +stat 1/2/.snap/second +[ "$VERIFY" != verify ] && touch 1/foo +stat 1/.snap/first/foo && exit 1 +[ "$VERIFY" != verify ] && mkdir 1/.snap/third +stat 1/.snap/third/foo || exit 1 +[ "$VERIFY" != verify ] && mkdir 1/2/3 +[ "$VERIFY" != verify ] && mkdir 1/2/.snap/fourth +stat 1/2/.snap/fourth/3 + +exit 0 diff --git a/qa/workunits/fs/snaps/snap-rm-diff.sh b/qa/workunits/fs/snaps/snap-rm-diff.sh new file mode 100755 index 00000000..63f64287 --- /dev/null +++ b/qa/workunits/fs/snaps/snap-rm-diff.sh @@ -0,0 +1,11 @@ +#!/bin/sh -ex + +ceph fs set cephfs allow_new_snaps true --yes-i-really-mean-it +wget -q http://download.ceph.com/qa/linux-2.6.33.tar.bz2 +mkdir foo +cp linux* foo +mkdir foo/.snap/barsnap +rm foo/linux* +diff -q foo/.snap/barsnap/linux* linux* && echo "passed: files are identical" +rmdir foo/.snap/barsnap +echo OK diff --git a/qa/workunits/fs/snaps/snaptest-0.sh b/qa/workunits/fs/snaps/snaptest-0.sh new file mode 100755 index 00000000..791caf9e --- /dev/null +++ b/qa/workunits/fs/snaps/snaptest-0.sh @@ -0,0 +1,27 @@ +#!/bin/sh -x + +expect_failure() { + if "$@"; then return 1; else return 0; fi +} +set -e + +ceph fs set cephfs allow_new_snaps false +expect_failure mkdir .snap/foo +ceph fs set cephfs allow_new_snaps true --yes-i-really-mean-it + +echo asdf > foo +mkdir .snap/foo +grep asdf .snap/foo/foo +rmdir .snap/foo + +echo asdf > bar +mkdir .snap/bar +rm bar +grep asdf .snap/bar/bar +rmdir .snap/bar +rm foo + +ceph fs set cephfs allow_new_snaps false +expect_failure mkdir .snap/baz + +echo OK diff --git a/qa/workunits/fs/snaps/snaptest-1.sh b/qa/workunits/fs/snaps/snaptest-1.sh new file mode 100755 index 00000000..476531fc --- /dev/null +++ b/qa/workunits/fs/snaps/snaptest-1.sh @@ -0,0 +1,31 @@ +#!/usr/bin/env bash + +set -ex + +ceph fs set cephfs allow_new_snaps true --yes-i-really-mean-it + +echo 1 > file1 +echo 2 > file2 +echo 3 > file3 +[ -e file4 ] && rm file4 +mkdir .snap/snap1 +echo 4 > file4 +now=`ls` +then=`ls .snap/snap1` +rmdir .snap/snap1 +if [ "$now" = "$then" ]; then + echo live and snap contents are identical? + false +fi + +# do it again +echo 1 > file1 +echo 2 > file2 +echo 3 > file3 +mkdir .snap/snap1 +echo 4 > file4 +rmdir .snap/snap1 + +rm file? + +echo OK diff --git a/qa/workunits/fs/snaps/snaptest-2.sh b/qa/workunits/fs/snaps/snaptest-2.sh new file mode 100755 index 00000000..6ded7b66 --- /dev/null +++ b/qa/workunits/fs/snaps/snaptest-2.sh @@ -0,0 +1,61 @@ +#!/usr/bin/env bash + +ceph fs set cephfs allow_new_snaps true --yes-i-really-mean-it + +echo "Create dir 100 to 199 ..." +for i in $(seq 100 199); do + echo " create dir $i" + mkdir "$i" + for y in $(seq 10 20); do + echo "This is a test file before any snapshot was taken." >"$i/$y" + done +done + +echo "Take first snapshot .snap/test1" +mkdir .snap/test1 + +echo "Create dir 200 to 299 ..." +for i in $(seq 200 299); do + echo " create dir $i" + mkdir $i + for y in $(seq 20 29); do + echo "This is a test file. Created after .snap/test1" >"$i/$y" + done +done + +echo "Create a snapshot in every first level dir ..." +for dir in $(ls); do + echo " create $dir/.snap/snap-subdir-test" + mkdir "$dir/.snap/snap-subdir-test" + for y in $(seq 30 39); do + echo " create $dir/$y file after the snapshot" + echo "This is a test file. Created after $dir/.snap/snap-subdir-test" >"$dir/$y" + done +done + +echo "Take second snapshot .snap/test2" +mkdir .snap/test2 + +echo "Copy content of .snap/test1 to copyofsnap1 ..." +mkdir copyofsnap1 +cp -Rv .snap/test1 copyofsnap1/ + + +echo "Take third snapshot .snap/test3" +mkdir .snap/test3 + +echo "Delete the snapshots..." + +find ./ -type d -print | \ + xargs -I% -n1 find %/.snap -mindepth 1 -maxdepth 1 \ + \( ! -name "_*" \) -print 2>/dev/null + +find ./ -type d -print | \ + xargs -I% -n1 find %/.snap -mindepth 1 -maxdepth 1 \ + \( ! -name "_*" \) -print 2>/dev/null | \ + xargs -n1 rmdir + +echo "Delete all the files and directories ..." +rm -Rfv ./* + +echo OK diff --git a/qa/workunits/fs/snaps/snaptest-authwb.sh b/qa/workunits/fs/snaps/snaptest-authwb.sh new file mode 100755 index 00000000..2c53e2a6 --- /dev/null +++ b/qa/workunits/fs/snaps/snaptest-authwb.sh @@ -0,0 +1,14 @@ +#!/bin/sh -x + +set -e + +ceph fs set cephfs allow_new_snaps true --yes-i-really-mean-it + +touch foo +chmod +x foo +mkdir .snap/s +find .snap/s/foo -executable | grep foo +rmdir .snap/s +rm foo + +echo OK \ No newline at end of file diff --git a/qa/workunits/fs/snaps/snaptest-capwb.sh b/qa/workunits/fs/snaps/snaptest-capwb.sh new file mode 100755 index 00000000..f36d38ab --- /dev/null +++ b/qa/workunits/fs/snaps/snaptest-capwb.sh @@ -0,0 +1,35 @@ +#!/bin/sh -x + +set -e + +mkdir foo + +ceph fs set cephfs allow_new_snaps true --yes-i-really-mean-it + +# make sure mds handles it when the client does not send flushsnap +echo x > foo/x +sync +mkdir foo/.snap/ss +ln foo/x foo/xx +cat foo/.snap/ss/x +rmdir foo/.snap/ss + +# +echo a > foo/a +echo b > foo/b +mkdir foo/.snap/s +r=`cat foo/.snap/s/a` +[ -z "$r" ] && echo "a appears empty in snapshot" && false + +ln foo/b foo/b2 +cat foo/.snap/s/b + +echo "this used to hang:" +echo more >> foo/b2 +echo "oh, it didn't hang! good job." +cat foo/b +rmdir foo/.snap/s + +rm -r foo + +echo OK \ No newline at end of file diff --git a/qa/workunits/fs/snaps/snaptest-dir-rename.sh b/qa/workunits/fs/snaps/snaptest-dir-rename.sh new file mode 100755 index 00000000..85b929a2 --- /dev/null +++ b/qa/workunits/fs/snaps/snaptest-dir-rename.sh @@ -0,0 +1,19 @@ +#!/bin/sh -x + +set -e + +ceph fs set cephfs allow_new_snaps true --yes-i-really-mean-it + +# +# make sure we keep an existing dn's seq +# + +mkdir a +mkdir .snap/bar +mkdir a/.snap/foo +rmdir a/.snap/foo +rmdir a +stat .snap/bar/a +rmdir .snap/bar + +echo OK \ No newline at end of file diff --git a/qa/workunits/fs/snaps/snaptest-double-null.sh b/qa/workunits/fs/snaps/snaptest-double-null.sh new file mode 100755 index 00000000..49a1b271 --- /dev/null +++ b/qa/workunits/fs/snaps/snaptest-double-null.sh @@ -0,0 +1,25 @@ +#!/bin/sh -x + +set -e + +ceph fs set cephfs allow_new_snaps true --yes-i-really-mean-it + +# multiple intervening snapshots with no modifications, and thus no +# snapflush client_caps messages. make sure the mds can handle this. + +for f in `seq 1 20` ; do + +mkdir a +cat > a/foo & +mkdir a/.snap/one +mkdir a/.snap/two +chmod 777 a/foo +sync # this might crash the mds +ps +rmdir a/.snap/* +rm a/foo +rmdir a + +done + +echo OK diff --git a/qa/workunits/fs/snaps/snaptest-estale.sh b/qa/workunits/fs/snaps/snaptest-estale.sh new file mode 100755 index 00000000..e005b9a8 --- /dev/null +++ b/qa/workunits/fs/snaps/snaptest-estale.sh @@ -0,0 +1,15 @@ +#!/bin/sh -x + +ceph fs set cephfs allow_new_snaps true --yes-i-really-mean-it + +mkdir .snap/foo + +echo "We want ENOENT, not ESTALE, here." +for f in `seq 1 100` +do + stat .snap/foo/$f 2>&1 | grep 'No such file' +done + +rmdir .snap/foo + +echo "OK" diff --git a/qa/workunits/fs/snaps/snaptest-git-ceph.sh b/qa/workunits/fs/snaps/snaptest-git-ceph.sh new file mode 100755 index 00000000..50b854a5 --- /dev/null +++ b/qa/workunits/fs/snaps/snaptest-git-ceph.sh @@ -0,0 +1,35 @@ +#!/bin/sh -x + +set -e + +ceph fs set cephfs allow_new_snaps true --yes-i-really-mean-it + +git clone git://git.ceph.com/ceph.git +cd ceph + +versions=`seq 1 21` + +for v in $versions +do + ver="v0.$v" + echo $ver + git reset --hard $ver + mkdir .snap/$ver +done + +for v in $versions +do + ver="v0.$v" + echo checking $ver + cd .snap/$ver + git diff --exit-code + cd ../.. +done + +for v in $versions +do + ver="v0.$v" + rmdir .snap/$ver +done + +echo OK diff --git a/qa/workunits/fs/snaps/snaptest-hardlink.sh b/qa/workunits/fs/snaps/snaptest-hardlink.sh new file mode 100755 index 00000000..9848a019 --- /dev/null +++ b/qa/workunits/fs/snaps/snaptest-hardlink.sh @@ -0,0 +1,27 @@ +#!/bin/sh -x + +set -e + +ceph fs set cephfs allow_new_snaps true --yes-i-really-mean-it + +mkdir 1 2 +echo asdf >1/file1 +echo asdf >1/file2 + +ln 1/file1 2/file1 +ln 1/file2 2/file2 + +mkdir 2/.snap/s1 + +echo qwer >1/file1 +grep asdf 2/.snap/s1/file1 + +rm -f 1/file2 +grep asdf 2/.snap/s1/file2 +rm -f 2/file2 +grep asdf 2/.snap/s1/file2 + +rmdir 2/.snap/s1 +rm -rf 1 2 + +echo OK diff --git a/qa/workunits/fs/snaps/snaptest-intodir.sh b/qa/workunits/fs/snaps/snaptest-intodir.sh new file mode 100755 index 00000000..94af4422 --- /dev/null +++ b/qa/workunits/fs/snaps/snaptest-intodir.sh @@ -0,0 +1,24 @@ +#!/bin/sh -ex + +ceph fs set cephfs allow_new_snaps true --yes-i-really-mean-it + +# this tests fix for #1399 +mkdir foo +mkdir foo/.snap/one +touch bar +mv bar foo +sync +# should not crash :) + +mkdir baz +mkdir baz/.snap/two +mv baz foo +sync +# should not crash :) + +# clean up. +rmdir foo/baz/.snap/two +rmdir foo/.snap/one +rm -r foo + +echo OK \ No newline at end of file diff --git a/qa/workunits/fs/snaps/snaptest-multiple-capsnaps.sh b/qa/workunits/fs/snaps/snaptest-multiple-capsnaps.sh new file mode 100755 index 00000000..56ceaa8a --- /dev/null +++ b/qa/workunits/fs/snaps/snaptest-multiple-capsnaps.sh @@ -0,0 +1,44 @@ +#!/bin/sh -x + +set -e + +ceph fs set cephfs allow_new_snaps true --yes-i-really-mean-it + +echo asdf > a +mkdir .snap/1 +chmod 777 a +mkdir .snap/2 +echo qwer > a +mkdir .snap/3 +chmod 666 a +mkdir .snap/4 +echo zxcv > a +mkdir .snap/5 + +ls -al .snap/?/a + +grep asdf .snap/1/a +stat .snap/1/a | grep 'Size: 5' + +grep asdf .snap/2/a +stat .snap/2/a | grep 'Size: 5' +stat .snap/2/a | grep -- '-rwxrwxrwx' + +grep qwer .snap/3/a +stat .snap/3/a | grep 'Size: 5' +stat .snap/3/a | grep -- '-rwxrwxrwx' + +grep qwer .snap/4/a +stat .snap/4/a | grep 'Size: 5' +stat .snap/4/a | grep -- '-rw-rw-rw-' + +grep zxcv .snap/5/a +stat .snap/5/a | grep 'Size: 5' +stat .snap/5/a | grep -- '-rw-rw-rw-' + +rmdir .snap/[12345] + +echo "OK" + + + diff --git a/qa/workunits/fs/snaps/snaptest-parents.sh b/qa/workunits/fs/snaps/snaptest-parents.sh new file mode 100755 index 00000000..a66a977f --- /dev/null +++ b/qa/workunits/fs/snaps/snaptest-parents.sh @@ -0,0 +1,41 @@ +#!/bin/sh + +set -e + +ceph fs set cephfs allow_new_snaps true --yes-i-really-mean-it + +echo "making directory tree and files" +mkdir -p 1/a/b/c/ +echo "i'm file1" > 1/a/file1 +echo "i'm file2" > 1/a/b/file2 +echo "i'm file3" > 1/a/b/c/file3 +echo "snapshotting" +mkdir 1/.snap/foosnap1 +mkdir 2 +echo "moving tree" +mv 1/a 2 +echo "checking snapshot contains tree..." +dir1=`find 1/.snap/foosnap1 | wc -w` +dir2=`find 2/ | wc -w` +#diff $dir1 $dir2 && echo "Success!" +test $dir1==$dir2 && echo "Success!" +echo "adding folder and file to tree..." +mkdir 2/a/b/c/d +echo "i'm file 4!" > 2/a/b/c/d/file4 +echo "snapshotting tree 2" +mkdir 2/.snap/barsnap2 +echo "comparing snapshots" +dir1=`find 1/.snap/foosnap1/ -maxdepth 2 | wc -w` +dir2=`find 2/.snap/barsnap2/ -maxdepth 2 | wc -w` +#diff $dir1 $dir2 && echo "Success!" +test $dir1==$dir2 && echo "Success!" +echo "moving subtree to first folder" +mv 2/a/b/c 1 +echo "comparing snapshots and new tree" +dir1=`find 1/ | wc -w` +dir2=`find 2/.snap/barsnap2/a/b/c | wc -w` +#diff $dir1 $dir2 && echo "Success!" +test $dir1==$dir2 && echo "Success!" +rmdir 1/.snap/* +rmdir 2/.snap/* +echo "OK" diff --git a/qa/workunits/fs/snaps/snaptest-realm-split.sh b/qa/workunits/fs/snaps/snaptest-realm-split.sh new file mode 100755 index 00000000..3f01fd54 --- /dev/null +++ b/qa/workunits/fs/snaps/snaptest-realm-split.sh @@ -0,0 +1,33 @@ +#!/bin/sh -x + +set -e + +ceph fs set cephfs allow_new_snaps true --yes-i-really-mean-it + +mkdir -p 1/a +exec 3<> 1/a/file1 + +echo -n a >&3 + +mkdir 1/.snap/s1 + +echo -n b >&3 + +mkdir 2 +# create new snaprealm at dir a, file1's cap should be attached to the new snaprealm +mv 1/a 2 + +mkdir 2/.snap/s2 + +echo -n c >&3 + +exec 3>&- + +grep '^a$' 1/.snap/s1/a/file1 +grep '^ab$' 2/.snap/s2/a/file1 +grep '^abc$' 2/a/file1 + +rmdir 1/.snap/s1 +rmdir 2/.snap/s2 +rm -rf 1 2 +echo OK diff --git a/qa/workunits/fs/snaps/snaptest-snap-rename.sh b/qa/workunits/fs/snaps/snaptest-snap-rename.sh new file mode 100755 index 00000000..414ba0e3 --- /dev/null +++ b/qa/workunits/fs/snaps/snaptest-snap-rename.sh @@ -0,0 +1,35 @@ +#!/bin/sh -x + +expect_failure() { + if "$@"; then return 1; else return 0; fi +} +set -e + +ceph fs set cephfs allow_new_snaps true --yes-i-really-mean-it + +mkdir -p d1/d2 +mkdir -p d1/d3 +mkdir d1/.snap/foo +mkdir d1/d2/.snap/foo +mkdir d1/d3/.snap/foo +mkdir d1/d3/.snap/bar +mv d1/d2/.snap/foo d1/d2/.snap/bar +# snapshot name can't start with _ +expect_failure mv d1/d2/.snap/bar d1/d2/.snap/_bar +# can't rename parent snapshot +expect_failure mv d1/d2/.snap/_foo_* d1/d2/.snap/foo +expect_failure mv d1/d2/.snap/_foo_* d1/d2/.snap/_foo_1 +# can't rename snapshot to different directroy +expect_failure mv d1/d2/.snap/bar d1/.snap/ +# can't overwrite existing snapshot +expect_failure python -c "import os; os.rename('d1/d3/.snap/foo', 'd1/d3/.snap/bar')" +# can't move snaphost out of snapdir +expect_failure python -c "import os; os.rename('d1/.snap/foo', 'd1/foo')" + +rmdir d1/.snap/foo +rmdir d1/d2/.snap/bar +rmdir d1/d3/.snap/foo +rmdir d1/d3/.snap/bar +rm -rf d1 + +echo OK diff --git a/qa/workunits/fs/snaps/snaptest-snap-rm-cmp.sh b/qa/workunits/fs/snaps/snaptest-snap-rm-cmp.sh new file mode 100755 index 00000000..c5bd65e9 --- /dev/null +++ b/qa/workunits/fs/snaps/snaptest-snap-rm-cmp.sh @@ -0,0 +1,26 @@ +#!/bin/sh -x + +set -e + +ceph fs set cephfs allow_new_snaps true --yes-i-really-mean-it + +file=linux-2.6.33.tar.bz2 +wget -q http://download.ceph.com/qa/$file + +real=`md5sum $file | awk '{print $1}'` + +for f in `seq 1 20` +do + echo $f + cp $file a + mkdir .snap/s + rm a + cp .snap/s/a /tmp/a + cur=`md5sum /tmp/a | awk '{print $1}'` + if [ "$cur" != "$real" ]; then + echo "FAIL: bad match, /tmp/a $cur != real $real" + false + fi + rmdir .snap/s +done +rm $file diff --git a/qa/workunits/fs/snaps/snaptest-upchildrealms.sh b/qa/workunits/fs/snaps/snaptest-upchildrealms.sh new file mode 100755 index 00000000..a4cc9ab3 --- /dev/null +++ b/qa/workunits/fs/snaps/snaptest-upchildrealms.sh @@ -0,0 +1,30 @@ +#!/bin/sh -x + +set -e + +ceph fs set cephfs allow_new_snaps true --yes-i-really-mean-it + +# +# verify that a snap update on a parent realm will induce +# snap cap writeback for inodes child realms +# + +mkdir a +mkdir a/b +mkdir a/.snap/a1 +mkdir a/b/.snap/b1 +echo asdf > a/b/foo +mkdir a/.snap/a2 +# client _should_ have just queued a capsnap for writeback +ln a/b/foo a/b/bar # make the server cow the inode + +echo "this should not hang..." +cat a/b/.snap/_a2_*/foo +echo "good, it did not hang." + +rmdir a/b/.snap/b1 +rmdir a/.snap/a1 +rmdir a/.snap/a2 +rm -r a + +echo "OK" \ No newline at end of file diff --git a/qa/workunits/fs/snaps/snaptest-xattrwb.sh b/qa/workunits/fs/snaps/snaptest-xattrwb.sh new file mode 100755 index 00000000..09398878 --- /dev/null +++ b/qa/workunits/fs/snaps/snaptest-xattrwb.sh @@ -0,0 +1,31 @@ +#!/bin/sh -x + +set -e + +ceph fs set cephfs allow_new_snaps true --yes-i-really-mean-it + +echo "testing simple xattr wb" +touch x +setfattr -n user.foo x +mkdir .snap/s1 +getfattr -n user.foo .snap/s1/x | grep user.foo +rm x +rmdir .snap/s1 + +echo "testing wb with pre-wb server cow" +mkdir a +mkdir a/b +mkdir a/b/c +# b now has As but not Ax +setfattr -n user.foo a/b +mkdir a/.snap/s +mkdir a/b/cc +# b now has been cowed on the server, but we still have dirty xattr caps +getfattr -n user.foo a/b # there they are... +getfattr -n user.foo a/.snap/s/b | grep user.foo # should be there, too! + +# ok, clean up +rmdir a/.snap/s +rm -r a + +echo OK \ No newline at end of file diff --git a/qa/workunits/fs/snaps/untar_snap_rm.sh b/qa/workunits/fs/snaps/untar_snap_rm.sh new file mode 100755 index 00000000..928e8911 --- /dev/null +++ b/qa/workunits/fs/snaps/untar_snap_rm.sh @@ -0,0 +1,20 @@ +#!/bin/sh + +set -e + +ceph fs set cephfs allow_new_snaps true --yes-i-really-mean-it + +do_tarball() { + wget http://download.ceph.com/qa/$1 + tar xvf$2 $1 + mkdir .snap/k + sync + rm -rv $3 + cp -av .snap/k . + rmdir .snap/k + rm -rv k + rm $1 +} + +do_tarball coreutils_8.5.orig.tar.gz z coreutils-8.5 +do_tarball linux-2.6.33.tar.bz2 j linux-2.6.33 diff --git a/qa/workunits/fs/test_o_trunc.c b/qa/workunits/fs/test_o_trunc.c new file mode 100644 index 00000000..1ce19e4b --- /dev/null +++ b/qa/workunits/fs/test_o_trunc.c @@ -0,0 +1,45 @@ +#include +#include +#include +#include +#include +#include +#include + +int main(int argc, char *argv[]) +{ + char obuf[32], ibuf[1024]; + int n, max = 0; + + if (argc > 2) + max = atoi(argv[2]); + if (!max) + max = 600; + + memset(obuf, 0xff, sizeof(obuf)); + + for (n = 1; n <= max; ++n) { + int fd, ret; + fd = open(argv[1], O_RDWR | O_CREAT | O_TRUNC, 0644); + printf("%d/%d: open fd = %d\n", n, max, fd); + + ret = write(fd, obuf, sizeof(obuf)); + printf("write ret = %d\n", ret); + + sleep(1); + + ret = write(fd, obuf, sizeof(obuf)); + printf("write ret = %d\n", ret); + + ret = pread(fd, ibuf, sizeof(ibuf), 0); + printf("pread ret = %d\n", ret); + + if (memcmp(obuf, ibuf, sizeof(obuf))) { + printf("mismatch\n"); + close(fd); + break; + } + close(fd); + } + return 0; +} diff --git a/qa/workunits/fs/test_o_trunc.sh b/qa/workunits/fs/test_o_trunc.sh new file mode 100755 index 00000000..90a72600 --- /dev/null +++ b/qa/workunits/fs/test_o_trunc.sh @@ -0,0 +1,7 @@ +#!/bin/sh -ex + +mydir=`dirname $0` +$mydir/test_o_trunc trunc.foo 600 + +echo OK + diff --git a/qa/workunits/fs/test_python.sh b/qa/workunits/fs/test_python.sh new file mode 100755 index 00000000..656d89f0 --- /dev/null +++ b/qa/workunits/fs/test_python.sh @@ -0,0 +1,6 @@ +#!/bin/sh -ex + +# Running as root because the filesystem root directory will be +# owned by uid 0, and that's where we're writing. +sudo nosetests -v $(dirname $0)/../../../src/test/pybind/test_cephfs.py +exit 0 diff --git a/qa/workunits/fs/upgrade/volume_client b/qa/workunits/fs/upgrade/volume_client new file mode 100755 index 00000000..37ee954c --- /dev/null +++ b/qa/workunits/fs/upgrade/volume_client @@ -0,0 +1,110 @@ +#!/bin/bash + +set -ex + +PYTHON="python2" + +function run_payload { + local payload="$1" + sudo "$PYTHON" <&2 + sudo touch -- "$keyring" + sudo ceph-authtool "$keyring" --import-keyring "$T" + rm -f -- "$T" +} + +function conf_keys { + local client="$1" + ls /etc/ceph >&2 + ceph auth get-or-create "client.manila" mds 'allow *' osd 'allow rw' mon 'allow *' | import_key "$client" /etc/ceph/ceph.keyring +} + +function create_data_isolated { + local PAYLOAD=' +vp = VolumePath(None, "vol_isolated") +vc.create_volume(vp, (1<<33), data_isolated=True) +auth_result = vc.authorize(vp, "vol_data_isolated", tenant_id="test") +print("[client.vol_data_isolated]\n\tkey = ", auth_result["auth_key"]) +' + + run_payload "$PAYLOAD" | import_key "vol_data_isolated" +} + +function create_default { + local PAYLOAD=' +vp = VolumePath(None, "vol_default") +vc.create_volume(vp, (1<<33)) +auth_result = vc.authorize(vp, "vol_default", tenant_id="test") +print("[client.vol_default]\n\tkey = ", auth_result["auth_key"]) +' + run_payload "$PAYLOAD" | import_key "vol_default" +} + +function create { + create_data_isolated + create_default +} + +function populate { + pwd + df -h . + ls -l + cp -a /usr/bin . +} + +function verify_data_isolated { + ceph fs subvolume getpath cephfs vol_isolated + stat bin + ls bin | tail +} + +function verify_default { + ceph fs subvolume getpath cephfs vol_default + stat bin + ls bin | tail +} + +function verify { + diff <(ceph fs subvolume ls cephfs | jq -cS 'sort_by(.name)' | tee /dev/stderr) <(printf '[{"name":"vol_isolated"},{"name":"vol_default"}]' | jq -cS 'sort_by(.name)') + verify_data_isolated + verify_default +} + +function main { + if [ "$1" = create ]; then + conf_keys + create + elif [ "$1" = populate ]; then + populate + elif [ "$1" = verify ]; then + # verify (sub)volumes still exist and are configured correctly + verify + else + exit 1 + fi +} + +main "$ACTION" diff --git a/qa/workunits/hadoop/repl.sh b/qa/workunits/hadoop/repl.sh new file mode 100755 index 00000000..84f6150a --- /dev/null +++ b/qa/workunits/hadoop/repl.sh @@ -0,0 +1,42 @@ +#!/usr/bin/env bash + +set -e +set -x + +# bail if $TESTDIR is not set as this test will fail in that scenario +[ -z $TESTDIR ] && { echo "\$TESTDIR needs to be set, but is not. Exiting."; exit 1; } + +# if HADOOP_PREFIX is not set, use default +[ -z $HADOOP_PREFIX ] && { HADOOP_PREFIX=$TESTDIR/hadoop; } + +# create pools with different replication factors +for repl in 2 3 7 8 9; do + name=hadoop.$repl + ceph osd pool create $name 8 8 + ceph osd pool set $name size $repl + + id=`ceph osd dump | sed -n "s/^pool \([0-9]*\) '$name'.*/\1/p"` + ceph fs add_data_pool cephfs $id +done + +# create a file in each of the pools +for repl in 2 3 7 8 9; do + name=hadoop.$repl + $HADOOP_PREFIX/bin/hadoop fs -rm -f /$name.dat + dd if=/dev/zero bs=1048576 count=1 | \ + $HADOOP_PREFIX/bin/hadoop fs -Dceph.data.pools="$name" \ + -put - /$name.dat +done + +# check that hadoop reports replication matching +# that of the pool the file was written into +for repl in 2 3 7 8 9; do + name=hadoop.$repl + repl2=$($HADOOP_PREFIX/bin/hadoop fs -ls /$name.dat | awk '{print $2}') + if [ $repl -ne $repl2 ]; then + echo "replication factors didn't match!" + exit 1 + fi +done + +exit 0 diff --git a/qa/workunits/hadoop/terasort.sh b/qa/workunits/hadoop/terasort.sh new file mode 100755 index 00000000..3d6988a2 --- /dev/null +++ b/qa/workunits/hadoop/terasort.sh @@ -0,0 +1,76 @@ +#!/usr/bin/env bash + +set -e +set -x + +INPUT=/terasort-input +OUTPUT=/terasort-output +REPORT=/tersort-report + +num_records=100000 +[ ! -z $NUM_RECORDS ] && num_records=$NUM_RECORDS + +# bail if $TESTDIR is not set as this test will fail in that scenario +[ -z $TESTDIR ] && { echo "\$TESTDIR needs to be set, but is not. Exiting."; exit 1; } + +# if HADOOP_PREFIX is not set, use default +[ -z $HADOOP_PREFIX ] && { HADOOP_PREFIX=$TESTDIR/hadoop; } + +# Nuke hadoop directories +$HADOOP_PREFIX/bin/hadoop fs -rm -r $INPUT $OUTPUT $REPORT || true + +# Generate terasort data +# +#-Ddfs.blocksize=512M \ +#-Dio.file.buffer.size=131072 \ +#-Dmapreduce.map.java.opts=-Xmx1536m \ +#-Dmapreduce.map.memory.mb=2048 \ +#-Dmapreduce.task.io.sort.mb=256 \ +#-Dyarn.app.mapreduce.am.resource.mb=1024 \ +#-Dmapred.map.tasks=64 \ +$HADOOP_PREFIX/bin/hadoop jar \ + $HADOOP_PREFIX/share/hadoop/mapreduce/hadoop-mapreduce-examples-*.jar \ + teragen \ + -Dmapred.map.tasks=9 \ + $num_records \ + $INPUT + +# Run the sort job +# +#-Ddfs.blocksize=512M \ +#-Dio.file.buffer.size=131072 \ +#-Dmapreduce.map.java.opts=-Xmx1536m \ +#-Dmapreduce.map.memory.mb=2048 \ +#-Dmapreduce.map.output.compress=true \ +#-Dmapreduce.map.output.compress.codec=org.apache.hadoop.io.compress.Lz4Codec \ +#-Dmapreduce.reduce.java.opts=-Xmx1536m \ +#-Dmapreduce.reduce.memory.mb=2048 \ +#-Dmapreduce.task.io.sort.factor=100 \ +#-Dmapreduce.task.io.sort.mb=768 \ +#-Dyarn.app.mapreduce.am.resource.mb=1024 \ +#-Dmapred.reduce.tasks=100 \ +#-Dmapreduce.terasort.output.replication=1 \ +$HADOOP_PREFIX/bin/hadoop jar \ + $HADOOP_PREFIX/share/hadoop/mapreduce/hadoop-mapreduce-examples-*.jar \ + terasort \ + -Dmapred.reduce.tasks=10 \ + $INPUT $OUTPUT + +# Validate the sorted data +# +#-Ddfs.blocksize=512M \ +#-Dio.file.buffer.size=131072 \ +#-Dmapreduce.map.java.opts=-Xmx1536m \ +#-Dmapreduce.map.memory.mb=2048 \ +#-Dmapreduce.reduce.java.opts=-Xmx1536m \ +#-Dmapreduce.reduce.memory.mb=2048 \ +#-Dmapreduce.task.io.sort.mb=256 \ +#-Dyarn.app.mapreduce.am.resource.mb=1024 \ +#-Dmapred.reduce.tasks=1 \ +$HADOOP_PREFIX/bin/hadoop jar \ + $HADOOP_PREFIX/share/hadoop/mapreduce/hadoop-mapreduce-examples-*.jar \ + teravalidate \ + -Dmapred.reduce.tasks=1 \ + $OUTPUT $REPORT + +exit 0 diff --git a/qa/workunits/hadoop/wordcount.sh b/qa/workunits/hadoop/wordcount.sh new file mode 100755 index 00000000..616b08af --- /dev/null +++ b/qa/workunits/hadoop/wordcount.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash + +set -e +set -x + +WC_INPUT=/wc_input +WC_OUTPUT=/wc_output +DATA_INPUT=$(mktemp -d) + +echo "starting hadoop-wordcount test" + +# bail if $TESTDIR is not set as this test will fail in that scenario +[ -z $TESTDIR ] && { echo "\$TESTDIR needs to be set, but is not. Exiting."; exit 1; } + +# if HADOOP_PREFIX is not set, use default +[ -z $HADOOP_PREFIX ] && { HADOOP_PREFIX=$TESTDIR/hadoop; } + +# Nuke hadoop directories +$HADOOP_PREFIX/bin/hadoop fs -rm -r $WC_INPUT $WC_OUTPUT || true + +# Fetch and import testing data set +curl http://download.ceph.com/qa/hadoop_input_files.tar | tar xf - -C $DATA_INPUT +$HADOOP_PREFIX/bin/hadoop fs -copyFromLocal $DATA_INPUT $WC_INPUT +rm -rf $DATA_INPUT + +# Run the job +$HADOOP_PREFIX/bin/hadoop jar \ + $HADOOP_PREFIX/share/hadoop/mapreduce/hadoop-mapreduce-examples-*.jar \ + wordcount $WC_INPUT $WC_OUTPUT + +# Cleanup +$HADOOP_PREFIX/bin/hadoop fs -rm -r $WC_INPUT $WC_OUTPUT || true + +echo "completed hadoop-wordcount test" +exit 0 diff --git a/qa/workunits/kernel_untar_build.sh b/qa/workunits/kernel_untar_build.sh new file mode 100755 index 00000000..fbab4aae --- /dev/null +++ b/qa/workunits/kernel_untar_build.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash + +set -e + +wget -O linux.tar.gz http://download.ceph.com/qa/linux-4.17.tar.gz + +mkdir t +cd t +tar xzf ../linux.tar.gz +cd linux* +make defconfig +make -j`grep -c processor /proc/cpuinfo` +cd .. +if ! rm -rv linux* ; then + echo "uh oh rm -r failed, it left behind:" + find . + exit 1 +fi +cd .. +rm -rv t linux* diff --git a/qa/workunits/libcephfs/test.sh b/qa/workunits/libcephfs/test.sh new file mode 100755 index 00000000..9d3656be --- /dev/null +++ b/qa/workunits/libcephfs/test.sh @@ -0,0 +1,8 @@ +#!/bin/sh -e + +ceph_test_libcephfs +ceph_test_libcephfs_access +ceph_test_libcephfs_reclaim +ceph_test_libcephfs_lazyio + +exit 0 diff --git a/qa/workunits/mgr/test_localpool.sh b/qa/workunits/mgr/test_localpool.sh new file mode 100755 index 00000000..40a749e8 --- /dev/null +++ b/qa/workunits/mgr/test_localpool.sh @@ -0,0 +1,21 @@ +#!/bin/sh -ex + +ceph config set mgr mgr/localpool/subtree host +ceph config set mgr mgr/localpool/failure_domain osd +ceph mgr module enable localpool + +while ! ceph osd pool ls | grep '^by-host-' +do + sleep 5 +done + +ceph mgr module disable localpool +for p in `ceph osd pool ls | grep '^by-host-'` +do + ceph osd pool rm $p $p --yes-i-really-really-mean-it +done + +ceph config rm mgr mgr/localpool/subtree +ceph config rm mgr mgr/localpool/failure_domain + +echo OK diff --git a/qa/workunits/mon/auth_caps.sh b/qa/workunits/mon/auth_caps.sh new file mode 100755 index 00000000..1f59ae1f --- /dev/null +++ b/qa/workunits/mon/auth_caps.sh @@ -0,0 +1,130 @@ +#!/usr/bin/env bash + +set -e +set -x +declare -A keymap + +combinations="r w x rw rx wx rwx" + +for i in ${combinations}; do + k="foo_$i" + k=`ceph auth get-or-create-key client.$i mon "allow $i"` || exit 1 + keymap["$i"]=$k +done + +# add special caps +keymap["all"]=`ceph auth get-or-create-key client.all mon 'allow *'` || exit 1 + +tmp=`mktemp` +ceph auth export > $tmp + +trap "rm $tmp" INT ERR EXIT QUIT 0 + +expect() { + + set +e + + local expected_ret=$1 + local ret + + shift + cmd=$@ + + eval $cmd + ret=$? + + set -e + + if [[ $ret -ne $expected_ret ]]; then + echo "ERROR: running \'$cmd\': expected $expected_ret got $ret" + return 1 + fi + + return 0 +} + +read_ops() { + local caps=$1 + local has_read=1 has_exec=1 + local ret + local args + + ( echo $caps | grep 'r' ) || has_read=0 + ( echo $caps | grep 'x' ) || has_exec=0 + + if [[ "$caps" == "all" ]]; then + has_read=1 + has_exec=1 + fi + + ret=13 + if [[ $has_read -gt 0 && $has_exec -gt 0 ]]; then + ret=0 + fi + + args="--id $caps --key ${keymap[$caps]}" + + expect $ret ceph auth get client.admin $args + expect $ret ceph auth get-key client.admin $args + expect $ret ceph auth export $args + expect $ret ceph auth export client.admin $args + expect $ret ceph auth ls $args + expect $ret ceph auth print-key client.admin $args + expect $ret ceph auth print_key client.admin $args +} + +write_ops() { + + local caps=$1 + local has_read=1 has_write=1 has_exec=1 + local ret + local args + + ( echo $caps | grep 'r' ) || has_read=0 + ( echo $caps | grep 'w' ) || has_write=0 + ( echo $caps | grep 'x' ) || has_exec=0 + + if [[ "$caps" == "all" ]]; then + has_read=1 + has_write=1 + has_exec=1 + fi + + ret=13 + if [[ $has_read -gt 0 && $has_write -gt 0 && $has_exec -gt 0 ]]; then + ret=0 + fi + + args="--id $caps --key ${keymap[$caps]}" + + expect $ret ceph auth add client.foo $args + expect $ret "ceph auth caps client.foo mon 'allow *' $args" + expect $ret ceph auth get-or-create client.admin $args + expect $ret ceph auth get-or-create-key client.admin $args + expect $ret ceph auth get-or-create-key client.baz $args + expect $ret ceph auth del client.foo $args + expect $ret ceph auth del client.baz $args + expect $ret ceph auth import -i $tmp $args +} + +echo "running combinations: ${!keymap[@]}" + +subcmd=$1 + +for i in ${!keymap[@]}; do + echo "caps: $i" + if [[ -z "$subcmd" || "$subcmd" == "read" || "$subcmd" == "all" ]]; then + read_ops $i + fi + + if [[ -z "$subcmd" || "$subcmd" == "write" || "$subcmd" == "all" ]]; then + write_ops $i + fi +done + +# cleanup +for i in ${combinations} all; do + ceph auth del client.$i || exit 1 +done + +echo "OK" diff --git a/qa/workunits/mon/caps.py b/qa/workunits/mon/caps.py new file mode 100644 index 00000000..2634f776 --- /dev/null +++ b/qa/workunits/mon/caps.py @@ -0,0 +1,362 @@ +#!/usr/bin/python + +from __future__ import print_function + +import subprocess +import shlex +import errno +import sys +import os +import io +import re + +import six + +from ceph_argparse import * # noqa + +keyring_base = '/tmp/cephtest-caps.keyring' + +class UnexpectedReturn(Exception): + def __init__(self, cmd, ret, expected, msg): + if isinstance(cmd, list): + self.cmd = ' '.join(cmd) + else: + assert isinstance(cmd, str) or isinstance(cmd, six.text_type), \ + 'cmd needs to be either a list or a str' + self.cmd = cmd + self.cmd = str(self.cmd) + self.ret = int(ret) + self.expected = int(expected) + self.msg = str(msg) + + def __str__(self): + return repr('{c}: expected return {e}, got {r} ({o})'.format( + c=self.cmd, e=self.expected, r=self.ret, o=self.msg)) + +def call(cmd): + if isinstance(cmd, list): + args = cmd + elif isinstance(cmd, str) or isinstance(cmd, six.text_type): + args = shlex.split(cmd) + else: + assert False, 'cmd is not a string/unicode nor a list!' + + print('call: {0}'.format(args)) + proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + ret = proc.wait() + + return (ret, proc) + +def expect(cmd, expected_ret): + + try: + (r, p) = call(cmd) + except ValueError as e: + print('unable to run {c}: {err}'.format(c=repr(cmd), err=e.message), + file=sys.stderr) + return errno.EINVAL + + assert r == p.returncode, \ + 'wth? r was supposed to match returncode!' + + if r != expected_ret: + raise UnexpectedReturn(repr(cmd), r, expected_ret, str(p.stderr.read())) + + return p + +def expect_to_file(cmd, expected_ret, out_file, mode='a'): + + # Let the exception be propagated to the caller + p = expect(cmd, expected_ret) + assert p.returncode == expected_ret, \ + 'expected result doesn\'t match and no exception was thrown!' + + with io.open(out_file, mode) as file: + file.write(six.text_type(p.stdout.read())) + + return p + +class Command: + def __init__(self, cid, j): + self.cid = cid[3:] + self.perms = j['perm'] + self.module = j['module'] + + self.sig = '' + self.args = [] + for s in j['sig']: + if not isinstance(s, dict): + assert isinstance(s, str) or isinstance(s,six.text_type), \ + 'malformatted signature cid {0}: {1}\n{2}'.format(cid,s,j) + if len(self.sig) > 0: + self.sig += ' ' + self.sig += s + else: + self.args.append(s) + + def __str__(self): + return repr('command {0}: {1} (requires \'{2}\')'.format(self.cid,\ + self.sig, self.perms)) + + +def destroy_keyring(path): + if not os.path.exists(path): + raise Exception('oops! cannot remove inexistent keyring {0}'.format(path)) + + # grab all client entities from the keyring + entities = [m.group(1) for m in [re.match(r'\[client\.(.*)\]', l) + for l in [str(line.strip()) + for line in io.open(path,'r')]] if m is not None] + + # clean up and make sure each entity is gone + for e in entities: + expect('ceph auth del client.{0}'.format(e), 0) + expect('ceph auth get client.{0}'.format(e), errno.ENOENT) + + # remove keyring + os.unlink(path) + + return True + +def test_basic_auth(): + # make sure we can successfully add/del entities, change their caps + # and import/export keyrings. + + expect('ceph auth add client.basicauth', 0) + expect('ceph auth caps client.basicauth mon \'allow *\'', 0) + # entity exists and caps do not match + expect('ceph auth add client.basicauth', errno.EINVAL) + # this command attempts to change an existing state and will fail + expect('ceph auth add client.basicauth mon \'allow w\'', errno.EINVAL) + expect('ceph auth get-or-create client.basicauth', 0) + expect('ceph auth get-key client.basicauth', 0) + expect('ceph auth get-or-create client.basicauth2', 0) + # cleanup + expect('ceph auth del client.basicauth', 0) + expect('ceph auth del client.basicauth2', 0) + + return True + +def gen_module_keyring(module): + module_caps = [ + ('all', '{t} \'allow service {s} rwx\'', 0), + ('none', '', errno.EACCES), + ('wrong', '{t} \'allow service foobar rwx\'', errno.EACCES), + ('right', '{t} \'allow service {s} {p}\'', 0), + ('no-execute', '{t} \'allow service {s} x\'', errno.EACCES) + ] + + keyring = '{0}.service-{1}'.format(keyring_base,module) + for perms in 'r rw x'.split(): + for (n,p,r) in module_caps: + c = p.format(t='mon', s=module, p=perms) + expect_to_file( + 'ceph auth get-or-create client.{cn}-{cp} {caps}'.format( + cn=n,cp=perms,caps=c), 0, keyring) + + return keyring + + +def test_all(): + + + perms = { + 'good': { + 'broad':[ + ('rwx', 'allow *'), + ('r', 'allow r'), + ('rw', 'allow rw'), + ('x', 'allow x'), + ], + 'service':[ + ('rwx', 'allow service {s} rwx'), + ('r', 'allow service {s} r'), + ('rw', 'allow service {s} rw'), + ('x', 'allow service {s} x'), + ], + 'command':[ + ('rwx', 'allow command "{c}"'), + ], + 'command-with':[ + ('rwx', 'allow command "{c}" with {kv}') + ], + 'command-with-prefix':[ + ('rwx', 'allow command "{c}" with {key} prefix {val}') + ] + }, + 'bad': { + 'broad':[ + ('none', ''), + ], + 'service':[ + ('none1', 'allow service foo rwx'), + ('none2', 'allow service foo r'), + ('none3', 'allow service foo rw'), + ('none4', 'allow service foo x'), + ], + 'command':[ + ('none', 'allow command foo'), + ], + 'command-with':[ + ('none', 'allow command "{c}" with foo=bar'), + ], + 'command-with-prefix':[ + ('none', 'allow command "{c}" with foo prefix bar'), + ], + } + } + + cmds = { + '':[ + { + 'cmd':('status', '', 'r') + }, + { + 'pre':'heap start_profiler', + 'cmd':('heap', 'heapcmd=stats', 'rw'), + 'post':'heap stop_profiler' + } + ], + 'auth':[ + { + 'pre':'', + 'cmd':('auth ls', '', 'r'), + 'post':'' + }, + { + 'pre':'auth get-or-create client.foo mon \'allow *\'', + 'cmd':('auth caps', 'entity="client.foo"', 'rw'), + 'post':'auth del client.foo' + } + ], + 'pg':[ + { + 'cmd':('pg getmap', '', 'r'), + }, + ], + 'mds':[ + { + 'cmd':('mds getmap', '', 'r'), + }, + ], + 'mon':[ + { + 'cmd':('mon getmap', '', 'r') + }, + { + 'cmd':('mon remove', 'name=a', 'rw') + } + ], + 'osd':[ + { + 'cmd':('osd getmap', '', 'r'), + }, + { + 'cmd':('osd pause', '', 'rw'), + 'post':'osd unpause' + }, + { + 'cmd':('osd crush dump', '', 'r') + }, + ], + 'config-key':[ + { + 'pre':'config-key set foo bar', + 'cmd':('config-key get', 'key=foo', 'r') + }, + { + 'pre':'config-key set foo bar', + 'cmd':('config-key del', 'key=foo', 'rw') + } + ] + } + + for (module,cmd_lst) in cmds.items(): + k = keyring_base + '.' + module + for cmd in cmd_lst: + + (cmd_cmd, cmd_args, cmd_perm) = cmd['cmd'] + cmd_args_key = '' + cmd_args_val = '' + if len(cmd_args) > 0: + (cmd_args_key, cmd_args_val) = cmd_args.split('=') + + print('generating keyring for {m}/{c}'.format(m=module,c=cmd_cmd)) + # gen keyring + for (good_or_bad,kind_map) in perms.items(): + for (kind,lst) in kind_map.items(): + for (perm, cap) in lst: + cap_formatted = cap.format( + s=module, + c=cmd_cmd, + kv=cmd_args, + key=cmd_args_key, + val=cmd_args_val) + + if len(cap_formatted) == 0: + run_cap = '' + else: + run_cap = 'mon \'{fc}\''.format(fc=cap_formatted) + + cname = 'client.{gb}-{kind}-{p}'.format( + gb=good_or_bad,kind=kind,p=perm) + expect_to_file( + 'ceph auth get-or-create {n} {c}'.format( + n=cname,c=run_cap), 0, k) + # keyring generated + print('testing {m}/{c}'.format(m=module,c=cmd_cmd)) + + # test + for good_bad in perms.keys(): + for (kind,lst) in perms[good_bad].items(): + for (perm,_) in lst: + cname = 'client.{gb}-{k}-{p}'.format(gb=good_bad,k=kind,p=perm) + + if good_bad == 'good': + expect_ret = 0 + else: + expect_ret = errno.EACCES + + if ( cmd_perm not in perm ): + expect_ret = errno.EACCES + if 'with' in kind and len(cmd_args) == 0: + expect_ret = errno.EACCES + if 'service' in kind and len(module) == 0: + expect_ret = errno.EACCES + + if 'pre' in cmd and len(cmd['pre']) > 0: + expect('ceph {0}'.format(cmd['pre']), 0) + expect('ceph -n {cn} -k {k} {c} {arg_val}'.format( + cn=cname,k=k,c=cmd_cmd,arg_val=cmd_args_val), expect_ret) + if 'post' in cmd and len(cmd['post']) > 0: + expect('ceph {0}'.format(cmd['post']), 0) + # finish testing + destroy_keyring(k) + + + return True + + +def test_misc(): + + k = keyring_base + '.misc' + expect_to_file( + 'ceph auth get-or-create client.caps mon \'allow command "auth caps"' \ + ' with entity="client.caps"\'', 0, k) + expect('ceph -n client.caps -k {kf} mon_status'.format(kf=k), errno.EACCES) + expect('ceph -n client.caps -k {kf} auth caps client.caps mon \'allow *\''.format(kf=k), 0) + expect('ceph -n client.caps -k {kf} mon_status'.format(kf=k), 0) + destroy_keyring(k) + +def main(): + + test_basic_auth() + test_all() + test_misc() + + print('OK') + + return 0 + +if __name__ == '__main__': + main() diff --git a/qa/workunits/mon/caps.sh b/qa/workunits/mon/caps.sh new file mode 100755 index 00000000..c5db5650 --- /dev/null +++ b/qa/workunits/mon/caps.sh @@ -0,0 +1,90 @@ +#!/usr/bin/env bash + +set -x + +tmp=/tmp/cephtest-mon-caps-madness + +exit_on_error=1 + +[[ ! -z $TEST_EXIT_ON_ERROR ]] && exit_on_error=$TEST_EXIT_ON_ERROR + +if [ `uname` = FreeBSD ]; then + ETIMEDOUT=60 +else + ETIMEDOUT=110 +fi + +expect() +{ + cmd=$1 + expected_ret=$2 + + echo $cmd + eval $cmd >&/dev/null + ret=$? + + if [[ $ret -ne $expected_ret ]]; then + echo "Error: Expected return $expected_ret, got $ret" + [[ $exit_on_error -eq 1 ]] && exit 1 + return 1 + fi + + return 0 +} + +expect "ceph auth get-or-create client.bazar > $tmp.bazar.keyring" 0 +expect "ceph -k $tmp.bazar.keyring --user bazar mon_status" 13 +ceph auth del client.bazar + +c="'allow command \"auth ls\", allow command mon_status'" +expect "ceph auth get-or-create client.foo mon $c > $tmp.foo.keyring" 0 +expect "ceph -k $tmp.foo.keyring --user foo mon_status" 0 +expect "ceph -k $tmp.foo.keyring --user foo auth ls" 0 +expect "ceph -k $tmp.foo.keyring --user foo auth export" 13 +expect "ceph -k $tmp.foo.keyring --user foo auth del client.bazar" 13 +expect "ceph -k $tmp.foo.keyring --user foo osd dump" 13 + +# monitor drops the subscribe message from client if it does not have enough caps +# for read from mon. in that case, the client will be waiting for mgrmap in vain, +# if it is instructed to send a command to mgr. "pg dump" is served by mgr. so, +# we need to set a timeout for testing this scenario. +# +# leave plenty of time here because the mons might be thrashing. +export CEPH_ARGS='--rados-mon-op-timeout=300' +expect "ceph -k $tmp.foo.keyring --user foo pg dump" $ETIMEDOUT +export CEPH_ARGS='' + +expect "ceph -k $tmp.foo.keyring --user foo quorum_status" 13 +ceph auth del client.foo + +c="'allow command service with prefix=list, allow command mon_status'" +expect "ceph auth get-or-create client.bar mon $c > $tmp.bar.keyring" 0 +expect "ceph -k $tmp.bar.keyring --user bar mon_status" 0 +expect "ceph -k $tmp.bar.keyring --user bar auth ls" 13 +expect "ceph -k $tmp.bar.keyring --user bar auth export" 13 +expect "ceph -k $tmp.bar.keyring --user bar auth del client.foo" 13 +expect "ceph -k $tmp.bar.keyring --user bar osd dump" 13 + +# again, we'll need to timeout. +export CEPH_ARGS='--rados-mon-op-timeout=300' +expect "ceph -k $tmp.bar.keyring --user bar pg dump" $ETIMEDOUT +export CEPH_ARGS='' + +expect "ceph -k $tmp.bar.keyring --user bar quorum_status" 13 +ceph auth del client.bar + +rm $tmp.bazar.keyring $tmp.foo.keyring $tmp.bar.keyring + +# invalid caps health warning +cat < $t1 +[osd.0] +keyring = foo +debug_xio = 66 +EOF +ceph config assimilate-conf -i $t1 | tee $t2 + +grep keyring $t2 +expect_false grep debug_xio $t2 +rm -f $t1 $t2 + +echo OK diff --git a/qa/workunits/mon/crush_ops.sh b/qa/workunits/mon/crush_ops.sh new file mode 100755 index 00000000..4ad8f354 --- /dev/null +++ b/qa/workunits/mon/crush_ops.sh @@ -0,0 +1,239 @@ +#!/usr/bin/env bash + +set -ex + +function expect_false() +{ + set -x + if "$@"; then return 1; else return 0; fi +} + +ceph osd crush dump + +# rules +ceph osd crush rule dump +ceph osd crush rule ls +ceph osd crush rule list + +ceph osd crush rule create-simple foo default host +ceph osd crush rule create-simple foo default host +ceph osd crush rule create-simple bar default host + +# make sure we're at luminous+ before using crush device classes +ceph osd require-osd-release nautilus +ceph osd crush rm-device-class all +ceph osd crush set-device-class ssd osd.0 +ceph osd crush set-device-class hdd osd.1 +ceph osd crush rule create-replicated foo-ssd default host ssd +ceph osd crush rule create-replicated foo-hdd default host hdd +ceph osd crush rule ls-by-class ssd | grep 'foo-ssd' +ceph osd crush rule ls-by-class ssd | expect_false grep 'foo-hdd' +ceph osd crush rule ls-by-class hdd | grep 'foo-hdd' +ceph osd crush rule ls-by-class hdd | expect_false grep 'foo-ssd' + +ceph osd erasure-code-profile set ec-foo-ssd crush-device-class=ssd m=2 k=2 +ceph osd pool create ec-foo 2 erasure ec-foo-ssd +ceph osd pool rm ec-foo ec-foo --yes-i-really-really-mean-it + +ceph osd crush rule ls | grep foo + +ceph osd crush rule rename foo foo-asdf +ceph osd crush rule rename foo foo-asdf # idempotent +ceph osd crush rule rename bar bar-asdf +ceph osd crush rule ls | grep 'foo-asdf' +ceph osd crush rule ls | grep 'bar-asdf' +ceph osd crush rule rm foo 2>&1 | grep 'does not exist' +ceph osd crush rule rm bar 2>&1 | grep 'does not exist' +ceph osd crush rule rename foo-asdf foo +ceph osd crush rule rename foo-asdf foo # idempotent +ceph osd crush rule rename bar-asdf bar +ceph osd crush rule ls | expect_false grep 'foo-asdf' +ceph osd crush rule ls | expect_false grep 'bar-asdf' +ceph osd crush rule rm foo +ceph osd crush rule rm foo # idempotent +ceph osd crush rule rm bar + +# can't delete in-use rules, tho: +ceph osd pool create pinning_pool 1 +expect_false ceph osd crush rule rm replicated_rule +ceph osd pool rm pinning_pool pinning_pool --yes-i-really-really-mean-it + +# build a simple map +expect_false ceph osd crush add-bucket foo osd +ceph osd crush add-bucket foo root +o1=`ceph osd create` +o2=`ceph osd create` +ceph osd crush add $o1 1 host=host1 root=foo +ceph osd crush add $o1 1 host=host1 root=foo # idemptoent +ceph osd crush add $o2 1 host=host2 root=foo +ceph osd crush add $o2 1 host=host2 root=foo # idempotent +ceph osd crush add-bucket bar root +ceph osd crush add-bucket bar root # idempotent +ceph osd crush link host1 root=bar +ceph osd crush link host1 root=bar # idempotent +ceph osd crush link host2 root=bar +ceph osd crush link host2 root=bar # idempotent + +ceph osd tree | grep -c osd.$o1 | grep -q 2 +ceph osd tree | grep -c host1 | grep -q 2 +ceph osd tree | grep -c osd.$o2 | grep -q 2 +ceph osd tree | grep -c host2 | grep -q 2 +expect_false ceph osd crush rm host1 foo # not empty +ceph osd crush unlink host1 foo +ceph osd crush unlink host1 foo +ceph osd tree | grep -c host1 | grep -q 1 + +expect_false ceph osd crush rm foo # not empty +expect_false ceph osd crush rm bar # not empty +ceph osd crush unlink host1 bar +ceph osd tree | grep -c host1 | grep -q 1 # now an orphan +ceph osd crush rm osd.$o1 host1 +ceph osd crush rm host1 +ceph osd tree | grep -c host1 | grep -q 0 +expect_false ceph osd tree-from host1 +ceph osd tree-from host2 +expect_false ceph osd tree-from osd.$o2 + +expect_false ceph osd crush rm bar # not empty +ceph osd crush unlink host2 + +ceph osd crush add-bucket host-for-test host root=root-for-test rack=rack-for-test +ceph osd tree | grep host-for-test +ceph osd tree | grep rack-for-test +ceph osd tree | grep root-for-test +ceph osd crush rm host-for-test +ceph osd crush rm rack-for-test +ceph osd crush rm root-for-test + +# reference foo and bar with a rule +ceph osd crush rule create-simple foo-rule foo host firstn +expect_false ceph osd crush rm foo +ceph osd crush rule rm foo-rule + +ceph osd crush rm bar +ceph osd crush rm foo +ceph osd crush rm osd.$o2 host2 +ceph osd crush rm host2 + +ceph osd crush add-bucket foo host +ceph osd crush move foo root=default rack=localrack + +ceph osd crush create-or-move osd.$o1 1.0 root=default +ceph osd crush move osd.$o1 host=foo +ceph osd find osd.$o1 | grep host | grep foo + +ceph osd crush rm osd.$o1 +ceph osd crush rm osd.$o2 + +ceph osd crush rm foo + +# test reweight +o3=`ceph osd create` +ceph osd crush add $o3 123 root=default +ceph osd tree | grep osd.$o3 | grep 123 +ceph osd crush reweight osd.$o3 113 +expect_false ceph osd crush reweight osd.$o3 123456 +ceph osd tree | grep osd.$o3 | grep 113 +ceph osd crush rm osd.$o3 +ceph osd rm osd.$o3 + +# test reweight-subtree +o4=`ceph osd create` +o5=`ceph osd create` +ceph osd crush add $o4 123 root=default host=foobaz +ceph osd crush add $o5 123 root=default host=foobaz +ceph osd tree | grep osd.$o4 | grep 123 +ceph osd tree | grep osd.$o5 | grep 123 +ceph osd crush reweight-subtree foobaz 155 +expect_false ceph osd crush reweight-subtree foobaz 123456 +ceph osd tree | grep osd.$o4 | grep 155 +ceph osd tree | grep osd.$o5 | grep 155 +ceph osd crush rm osd.$o4 +ceph osd crush rm osd.$o5 +ceph osd rm osd.$o4 +ceph osd rm osd.$o5 + +# weight sets +# make sure we require luminous before testing weight-sets +ceph osd set-require-min-compat-client luminous +ceph osd crush weight-set dump +ceph osd crush weight-set ls +expect_false ceph osd crush weight-set reweight fooset osd.0 .9 +ceph osd pool create fooset 8 +ceph osd pool create barset 8 +ceph osd pool set barset size 3 +expect_false ceph osd crush weight-set reweight fooset osd.0 .9 +ceph osd crush weight-set create fooset flat +ceph osd crush weight-set create barset positional +ceph osd crush weight-set ls | grep fooset +ceph osd crush weight-set ls | grep barset +ceph osd crush weight-set dump +ceph osd crush weight-set reweight fooset osd.0 .9 +expect_false ceph osd crush weight-set reweight fooset osd.0 .9 .9 +expect_false ceph osd crush weight-set reweight barset osd.0 .9 +ceph osd crush weight-set reweight barset osd.0 .9 .9 .9 +ceph osd crush weight-set ls | grep -c fooset | grep -q 1 +ceph osd crush weight-set rm fooset +ceph osd crush weight-set ls | grep -c fooset | grep -q 0 +ceph osd crush weight-set ls | grep barset +ceph osd crush weight-set rm barset +ceph osd crush weight-set ls | grep -c barset | grep -q 0 +ceph osd crush weight-set create-compat +ceph osd crush weight-set ls | grep '(compat)' +ceph osd crush weight-set rm-compat + +# weight set vs device classes +ceph osd pool create cool 2 +ceph osd pool create cold 2 +ceph osd pool set cold size 2 +ceph osd crush weight-set create-compat +ceph osd crush weight-set create cool flat +ceph osd crush weight-set create cold positional +ceph osd crush rm-device-class osd.0 +ceph osd crush weight-set reweight-compat osd.0 10.5 +ceph osd crush weight-set reweight cool osd.0 11.5 +ceph osd crush weight-set reweight cold osd.0 12.5 12.4 +ceph osd crush set-device-class fish osd.0 +ceph osd crush tree --show-shadow | grep osd\\.0 | grep fish | grep 10\\. +ceph osd crush tree --show-shadow | grep osd\\.0 | grep fish | grep 11\\. +ceph osd crush tree --show-shadow | grep osd\\.0 | grep fish | grep 12\\. +ceph osd crush rm-device-class osd.0 +ceph osd crush set-device-class globster osd.0 +ceph osd crush tree --show-shadow | grep osd\\.0 | grep globster | grep 10\\. +ceph osd crush tree --show-shadow | grep osd\\.0 | grep globster | grep 11\\. +ceph osd crush tree --show-shadow | grep osd\\.0 | grep globster | grep 12\\. +ceph osd crush weight-set reweight-compat osd.0 7.5 +ceph osd crush weight-set reweight cool osd.0 8.5 +ceph osd crush weight-set reweight cold osd.0 6.5 6.6 +ceph osd crush tree --show-shadow | grep osd\\.0 | grep globster | grep 7\\. +ceph osd crush tree --show-shadow | grep osd\\.0 | grep globster | grep 8\\. +ceph osd crush tree --show-shadow | grep osd\\.0 | grep globster | grep 6\\. +ceph osd crush rm-device-class osd.0 +ceph osd pool rm cool cool --yes-i-really-really-mean-it +ceph osd pool rm cold cold --yes-i-really-really-mean-it +ceph osd crush weight-set rm-compat + +# weight set vs device classes vs move +ceph osd crush weight-set create-compat +ceph osd crush add-bucket fooo host +ceph osd crush move fooo root=default +ceph osd crush add-bucket barr rack +ceph osd crush move barr root=default +ceph osd crush move fooo rack=barr +ceph osd crush rm fooo +ceph osd crush rm barr +ceph osd crush weight-set rm-compat + +# this sequence would crash at one point +ceph osd crush weight-set create-compat +ceph osd crush add-bucket r1 rack root=default +for f in `seq 1 32`; do + ceph osd crush add-bucket h$f host rack=r1 +done +for f in `seq 1 32`; do + ceph osd crush rm h$f +done +ceph osd crush rm r1 +ceph osd crush weight-set rm-compat + +echo OK diff --git a/qa/workunits/mon/osd.sh b/qa/workunits/mon/osd.sh new file mode 100755 index 00000000..535d6c13 --- /dev/null +++ b/qa/workunits/mon/osd.sh @@ -0,0 +1,24 @@ +#!/bin/sh -x + +set -e + +ua=`uuidgen` +ub=`uuidgen` + +# should get same id with same uuid +na=`ceph osd create $ua` +test $na -eq `ceph osd create $ua` + +nb=`ceph osd create $ub` +test $nb -eq `ceph osd create $ub` +test $nb -ne $na + +ceph osd rm $na +ceph osd rm $na +ceph osd rm $nb +ceph osd rm 1000 + +na2=`ceph osd create $ua` + +echo OK + diff --git a/qa/workunits/mon/pg_autoscaler.sh b/qa/workunits/mon/pg_autoscaler.sh new file mode 100755 index 00000000..706f87d0 --- /dev/null +++ b/qa/workunits/mon/pg_autoscaler.sh @@ -0,0 +1,79 @@ +#!/bin/bash -ex + +NUM_OSDS=$(ceph osd ls | wc -l) +if [ $NUM_OSDS -lt 6 ]; then + echo "test requires at least 6 OSDs" + exit 1 +fi + +NUM_POOLS=$(ceph osd pool ls | wc -l) +if [ $NUM_POOLS -gt 0 ]; then + echo "test requires no preexisting pools" + exit 1 +fi + +function wait_for() { + local sec=$1 + local cmd=$2 + + while true ; do + if bash -c "$cmd" ; then + break + fi + sec=$(( $sec - 1 )) + if [ $sec -eq 0 ]; then + echo failed + return 1 + fi + sleep 1 + done + return 0 +} + +# enable +ceph config set mgr mgr/pg_autoscaler/sleep_interval 5 +ceph mgr module enable pg_autoscaler + +# pg_num_min +ceph osd pool create a 16 --pg-num-min 4 +ceph osd pool create b 16 --pg-num-min 2 +ceph osd pool set a pg_autoscale_mode on +ceph osd pool set b pg_autoscale_mode on + +wait_for 120 "ceph osd pool get a pg_num | grep 4" +wait_for 120 "ceph osd pool get b pg_num | grep 2" + +# target ratio +ceph osd pool set a target_size_ratio 5 +ceph osd pool set b target_size_ratio 1 +sleep 10 +APGS=$(ceph osd dump -f json-pretty | jq '.pools[0].pg_num_target') +BPGS=$(ceph osd dump -f json-pretty | jq '.pools[1].pg_num_target') +test $APGS -gt 100 +test $BPGS -gt 10 + +# small ratio change does not change pg_num +ceph osd pool set a target_size_ratio 7 +ceph osd pool set b target_size_ratio 2 +sleep 10 +APGS2=$(ceph osd dump -f json-pretty | jq '.pools[0].pg_num_target') +BPGS2=$(ceph osd dump -f json-pretty | jq '.pools[1].pg_num_target') +test $APGS -eq $APGS2 +test $BPGS -eq $BPGS2 + +# target_size +ceph osd pool set a target_size_bytes 1000000000000000 +ceph osd pool set b target_size_bytes 1000000000000000 +ceph osd pool set a target_size_ratio 0 +ceph osd pool set b target_size_ratio 0 +wait_for 60 "ceph health detail | grep POOL_TARGET_SIZE_BYTES_OVERCOMMITTED" + +ceph osd pool set a target_size_bytes 1000 +ceph osd pool set b target_size_bytes 1000 +ceph osd pool set a target_size_ratio 1 +wait_for 60 "ceph health detail | grep POOL_HAS_TARGET_SIZE_BYTES_AND_RATIO" + +ceph osd pool rm a a --yes-i-really-really-mean-it +ceph osd pool rm b b --yes-i-really-really-mean-it + +echo OK diff --git a/qa/workunits/mon/ping.py b/qa/workunits/mon/ping.py new file mode 100755 index 00000000..f39da885 --- /dev/null +++ b/qa/workunits/mon/ping.py @@ -0,0 +1,108 @@ +#!/usr/bin/python + +import json +import shlex +import subprocess + +import six + + +class UnexpectedReturn(Exception): + def __init__(self, cmd, ret, expected, msg): + if isinstance(cmd, list): + self.cmd = ' '.join(cmd) + else: + assert isinstance(cmd, six.string_types) or isinstance(cmd, six.text_type), \ + 'cmd needs to be either a list or a str' + self.cmd = cmd + self.cmd = str(self.cmd) + self.ret = int(ret) + self.expected = int(expected) + self.msg = str(msg) + + def __str__(self): + return repr('{c}: expected return {e}, got {r} ({o})'.format( + c=self.cmd, e=self.expected, r=self.ret, o=self.msg)) + + +def call(cmd): + if isinstance(cmd, list): + args = cmd + elif isinstance(cmd, six.string_types) or isinstance(cmd, six.text_type): + args = shlex.split(cmd) + else: + assert False, 'cmd is not a string/unicode nor a list!' + + print('call: {0}'.format(args)) + proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + procout, procerr = proc.communicate(None) + + return proc.returncode, procout, procerr + + +def expect(cmd, expected_ret): + try: + (r, out, err) = call(cmd) + except ValueError as e: + assert False, \ + 'unable to run {c}: {err}'.format(c=repr(cmd), err=str(e)) + + if r != expected_ret: + raise UnexpectedReturn(repr(cmd), r, expected_ret, err) + + return out.decode() if isinstance(out, bytes) else out + + +def get_quorum_status(timeout=300): + cmd = 'ceph quorum_status' + if timeout > 0: + cmd += ' --connect-timeout {0}'.format(timeout) + + out = expect(cmd, 0) + j = json.loads(out) + return j + + +def main(): + quorum_status = get_quorum_status() + mon_names = [mon['name'] for mon in quorum_status['monmap']['mons']] + + print('ping all monitors') + for m in mon_names: + print('ping mon.{0}'.format(m)) + out = expect('ceph ping mon.{0}'.format(m), 0) + reply = json.loads(out) + + assert reply['mon_status']['name'] == m, \ + 'reply obtained from mon.{0}, expected mon.{1}'.format( + reply['mon_status']['name'], m) + + print('test out-of-quorum reply') + for m in mon_names: + print('testing mon.{0}'.format(m)) + expect('ceph daemon mon.{0} quorum exit'.format(m), 0) + + quorum_status = get_quorum_status() + assert m not in quorum_status['quorum_names'], \ + 'mon.{0} was not supposed to be in quorum ({1})'.format( + m, quorum_status['quorum_names']) + + out = expect('ceph ping mon.{0}'.format(m), 0) + reply = json.loads(out) + mon_status = reply['mon_status'] + + assert mon_status['name'] == m, \ + 'reply obtained from mon.{0}, expected mon.{1}'.format( + mon_status['name'], m) + + assert mon_status['state'] == 'electing', \ + 'mon.{0} is in state {1}, expected electing'.format( + m, mon_status['state']) + + expect('ceph daemon mon.{0} quorum enter'.format(m), 0) + + print('OK') + + +if __name__ == '__main__': + main() diff --git a/qa/workunits/mon/pool_ops.sh b/qa/workunits/mon/pool_ops.sh new file mode 100755 index 00000000..b0207769 --- /dev/null +++ b/qa/workunits/mon/pool_ops.sh @@ -0,0 +1,97 @@ +#!/usr/bin/env bash + +set -ex + +function expect_false() +{ + set -x + if "$@"; then return 1; else return 0; fi +} + +function get_config_value_or_die() +{ + local pool_name config_opt raw val + + pool_name=$1 + config_opt=$2 + + raw="`$SUDO ceph osd pool get $pool_name $config_opt 2>/dev/null`" + if [[ $? -ne 0 ]]; then + echo "error obtaining config opt '$config_opt' from '$pool_name': $raw" + exit 1 + fi + + raw=`echo $raw | sed -e 's/[{} "]//g'` + val=`echo $raw | cut -f2 -d:` + + echo "$val" + return 0 +} + +function expect_config_value() +{ + local pool_name config_opt expected_val val + pool_name=$1 + config_opt=$2 + expected_val=$3 + + val=$(get_config_value_or_die $pool_name $config_opt) + + if [[ "$val" != "$expected_val" ]]; then + echo "expected '$expected_val', got '$val'" + exit 1 + fi +} + +# note: we need to pass the other args or ceph_argparse.py will take +# 'invalid' that is not replicated|erasure and assume it is the next +# argument, which is a string. +expect_false ceph osd pool create foo 123 123 invalid foo-profile foo-ruleset + +ceph osd pool create foo 123 123 replicated +ceph osd pool create fooo 123 123 erasure default +ceph osd pool create foooo 123 + +ceph osd pool create foo 123 # idempotent + +ceph osd pool set foo size 1 +expect_config_value "foo" "min_size" 1 +ceph osd pool set foo size 4 +expect_config_value "foo" "min_size" 2 +ceph osd pool set foo size 10 +expect_config_value "foo" "min_size" 5 +expect_false ceph osd pool set foo size 0 +expect_false ceph osd pool set foo size 20 + +ceph osd pool set foo size 3 +ceph osd getcrushmap -o crush +crushtool -d crush -o crush.txt +sed -i 's/max_size 10/max_size 3/' crush.txt +crushtool -c crush.txt -o crush.new +ceph osd setcrushmap -i crush.new +expect_false ceph osd pool set foo size 4 +ceph osd setcrushmap -i crush +rm -f crush crush.txt crush.new + +# should fail due to safety interlock +expect_false ceph osd pool delete foo +expect_false ceph osd pool delete foo foo +expect_false ceph osd pool delete foo foo --force +expect_false ceph osd pool delete foo fooo --yes-i-really-mean-it +expect_false ceph osd pool delete foo --yes-i-really-mean-it foo + +ceph osd pool delete foooo foooo --yes-i-really-really-mean-it +ceph osd pool delete fooo fooo --yes-i-really-really-mean-it +ceph osd pool delete foo foo --yes-i-really-really-mean-it + +# idempotent +ceph osd pool delete foo foo --yes-i-really-really-mean-it +ceph osd pool delete fooo fooo --yes-i-really-really-mean-it +ceph osd pool delete fooo fooo --yes-i-really-really-mean-it + +# non-existent pool +ceph osd pool delete fuggg fuggg --yes-i-really-really-mean-it + +echo OK + + diff --git a/qa/workunits/mon/rbd_snaps_ops.sh b/qa/workunits/mon/rbd_snaps_ops.sh new file mode 100755 index 00000000..eb88565e --- /dev/null +++ b/qa/workunits/mon/rbd_snaps_ops.sh @@ -0,0 +1,61 @@ +#!/usr/bin/env bash + +# attempt to trigger #6047 + + +cmd_no=0 +expect() +{ + cmd_no=$(($cmd_no+1)) + cmd="$1" + expected=$2 + echo "[$cmd_no] $cmd" + eval $cmd + ret=$? + if [[ $ret -ne $expected ]]; then + echo "[$cmd_no] unexpected return '$ret', expected '$expected'" + exit 1 + fi +} + +ceph osd pool delete test test --yes-i-really-really-mean-it || true +expect 'ceph osd pool create test 8 8' 0 +expect 'ceph osd pool application enable test rbd' +expect 'ceph osd pool mksnap test snapshot' 0 +expect 'ceph osd pool rmsnap test snapshot' 0 + +expect 'rbd --pool=test --rbd_validate_pool=false create --size=102400 image' 0 +expect 'rbd --pool=test snap create image@snapshot' 22 + +expect 'ceph osd pool delete test test --yes-i-really-really-mean-it' 0 +expect 'ceph osd pool create test 8 8' 0 +expect 'rbd --pool=test pool init' 0 +expect 'rbd --pool=test create --size=102400 image' 0 +expect 'rbd --pool=test snap create image@snapshot' 0 +expect 'rbd --pool=test snap ls image' 0 +expect 'rbd --pool=test snap rm image@snapshot' 0 + +expect 'ceph osd pool mksnap test snapshot' 22 + +expect 'ceph osd pool delete test test --yes-i-really-really-mean-it' 0 + +# reproduce 7210 and expect it to be fixed +# basically create such a scenario where we end up deleting what used to +# be an unmanaged snapshot from a not-unmanaged pool + +ceph osd pool delete test-foo test-foo --yes-i-really-really-mean-it || true +expect 'ceph osd pool create test-foo 8' 0 +expect 'ceph osd pool application enable test-foo rbd' +expect 'rbd --pool test-foo create --size 1024 image' 0 +expect 'rbd --pool test-foo snap create image@snapshot' 0 + +ceph osd pool delete test-bar test-bar --yes-i-really-really-mean-it || true +expect 'ceph osd pool create test-bar 8' 0 +expect 'ceph osd pool application enable test-bar rbd' +expect 'rados cppool test-foo test-bar --yes-i-really-mean-it' 0 +expect 'rbd --pool test-bar snap rm image@snapshot' 95 +expect 'ceph osd pool delete test-foo test-foo --yes-i-really-really-mean-it' 0 +expect 'ceph osd pool delete test-bar test-bar --yes-i-really-really-mean-it' 0 + + +echo OK diff --git a/qa/workunits/mon/test_config_key_caps.sh b/qa/workunits/mon/test_config_key_caps.sh new file mode 100755 index 00000000..77b4b53b --- /dev/null +++ b/qa/workunits/mon/test_config_key_caps.sh @@ -0,0 +1,201 @@ +#!/usr/bin/env bash + +set -x +set -e + +tmp=$(mktemp -d -p /tmp test_mon_config_key_caps.XXXXX) +entities=() + +function cleanup() +{ + set +e + set +x + if [[ -e $tmp/keyring ]] && [[ -e $tmp/keyring.orig ]]; then + grep '\[.*\..*\]' $tmp/keyring.orig > $tmp/entities.orig + for e in $(grep '\[.*\..*\]' $tmp/keyring | \ + diff $tmp/entities.orig - | \ + sed -n 's/^.*\[\(.*\..*\)\]/\1/p'); + do + ceph auth rm $e 2>&1 >& /dev/null + done + fi + #rm -fr $tmp +} + +trap cleanup 0 # cleanup on exit + +function expect_false() +{ + set -x + if "$@"; then return 1; else return 0; fi +} + +# for cleanup purposes +ceph auth export -o $tmp/keyring.orig + +k=$tmp/keyring + +# setup a few keys +ceph config-key ls +ceph config-key set daemon-private/osd.123/test-foo +ceph config-key set mgr/test-foo +ceph config-key set device/test-foo +ceph config-key set test/foo + +allow_aa=client.allow_aa +allow_bb=client.allow_bb +allow_cc=client.allow_cc + +mgr_a=mgr.a +mgr_b=mgr.b +osd_a=osd.100 +osd_b=osd.200 + +prefix_aa=client.prefix_aa +prefix_bb=client.prefix_bb +prefix_cc=client.prefix_cc +match_aa=client.match_aa +match_bb=client.match_bb + +fail_aa=client.fail_aa +fail_bb=client.fail_bb +fail_cc=client.fail_cc +fail_dd=client.fail_dd +fail_ee=client.fail_ee +fail_ff=client.fail_ff +fail_gg=client.fail_gg +fail_writes=client.fail_writes + +ceph auth get-or-create $allow_aa mon 'allow *' +ceph auth get-or-create $allow_bb mon 'allow service config-key rwx' +ceph auth get-or-create $allow_cc mon 'allow command "config-key get"' + +ceph auth get-or-create $mgr_a mon 'allow profile mgr' +ceph auth get-or-create $mgr_b mon 'allow profile mgr' +ceph auth get-or-create $osd_a mon 'allow profile osd' +ceph auth get-or-create $osd_b mon 'allow profile osd' + +ceph auth get-or-create $prefix_aa mon \ + "allow command \"config-key get\" with key prefix client/$prefix_aa" + +cap="allow command \"config-key set\" with key prefix client/" +cap="$cap,allow command \"config-key get\" with key prefix client/$prefix_bb" +ceph auth get-or-create $prefix_bb mon "$cap" + +cap="allow command \"config-key get\" with key prefix client/" +cap="$cap, allow command \"config-key set\" with key prefix client/" +cap="$cap, allow command \"config-key ls\"" +ceph auth get-or-create $prefix_cc mon "$cap" + +cap="allow command \"config-key get\" with key=client/$match_aa/foo" +ceph auth get-or-create $match_aa mon "$cap" +cap="allow command \"config-key get\" with key=client/$match_bb/foo" +cap="$cap,allow command \"config-key set\" with key=client/$match_bb/foo" +ceph auth get-or-create $match_bb mon "$cap" + +ceph auth get-or-create $fail_aa mon 'allow rx' +ceph auth get-or-create $fail_bb mon 'allow r,allow w' +ceph auth get-or-create $fail_cc mon 'allow rw' +ceph auth get-or-create $fail_dd mon 'allow rwx' +ceph auth get-or-create $fail_ee mon 'allow profile bootstrap-rgw' +ceph auth get-or-create $fail_ff mon 'allow profile bootstrap-rbd' +# write commands will require rw; wx is not enough +ceph auth get-or-create $fail_gg mon 'allow service config-key wx' +# read commands will only require 'r'; 'rx' should be enough. +ceph auth get-or-create $fail_writes mon 'allow service config-key rx' + +# grab keyring +ceph auth export -o $k + +# keys will all the caps can do whatever +for c in $allow_aa $allow_bb $allow_cc $mgr_a $mgr_b; do + ceph -k $k --name $c config-key get daemon-private/osd.123/test-foo + ceph -k $k --name $c config-key get mgr/test-foo + ceph -k $k --name $c config-key get device/test-foo + ceph -k $k --name $c config-key get test/foo +done + +for c in $osd_a $osd_b; do + ceph -k $k --name $c config-key put daemon-private/$c/test-foo + ceph -k $k --name $c config-key get daemon-private/$c/test-foo + expect_false ceph -k $k --name $c config-key ls + expect_false ceph -k $k --name $c config-key get mgr/test-foo + expect_false ceph -k $k --name $c config-key get device/test-foo + expect_false ceph -k $k --name $c config-key get test/foo +done + +expect_false ceph -k $k --name $osd_a get daemon-private/$osd_b/test-foo +expect_false ceph -k $k --name $osd_b get daemon-private/$osd_a/test-foo + +expect_false ceph -k $k --name $prefix_aa \ + config-key ls +expect_false ceph -k $k --name $prefix_aa \ + config-key get daemon-private/osd.123/test-foo +expect_false ceph -k $k --name $prefix_aa \ + config-key set test/bar +expect_false ceph -k $k --name $prefix_aa \ + config-key set client/$prefix_aa/foo + +# write something so we can read, use a custom entity +ceph -k $k --name $allow_bb config-key set client/$prefix_aa/foo +ceph -k $k --name $prefix_aa config-key get client/$prefix_aa/foo +# check one writes to the other's prefix, the other is able to read +ceph -k $k --name $prefix_bb config-key set client/$prefix_aa/bar +ceph -k $k --name $prefix_aa config-key get client/$prefix_aa/bar + +ceph -k $k --name $prefix_bb config-key set client/$prefix_bb/foo +ceph -k $k --name $prefix_bb config-key get client/$prefix_bb/foo + +expect_false ceph -k $k --name $prefix_bb config-key get client/$prefix_aa/bar +expect_false ceph -k $k --name $prefix_bb config-key ls +expect_false ceph -k $k --name $prefix_bb \ + config-key get daemon-private/osd.123/test-foo +expect_false ceph -k $k --name $prefix_bb config-key get mgr/test-foo +expect_false ceph -k $k --name $prefix_bb config-key get device/test-foo +expect_false ceph -k $k --name $prefix_bb config-key get test/bar +expect_false ceph -k $k --name $prefix_bb config-key set test/bar + +ceph -k $k --name $prefix_cc config-key set client/$match_aa/foo +ceph -k $k --name $prefix_cc config-key set client/$match_bb/foo +ceph -k $k --name $prefix_cc config-key get client/$match_aa/foo +ceph -k $k --name $prefix_cc config-key get client/$match_bb/foo +expect_false ceph -k $k --name $prefix_cc config-key set other/prefix +expect_false ceph -k $k --name $prefix_cc config-key get mgr/test-foo +ceph -k $k --name $prefix_cc config-key ls >& /dev/null + +ceph -k $k --name $match_aa config-key get client/$match_aa/foo +expect_false ceph -k $k --name $match_aa config-key get client/$match_bb/foo +expect_false ceph -k $k --name $match_aa config-key set client/$match_aa/foo +ceph -k $k --name $match_bb config-key get client/$match_bb/foo +ceph -k $k --name $match_bb config-key set client/$match_bb/foo +expect_false ceph -k $k --name $match_bb config-key get client/$match_aa/foo +expect_false ceph -k $k --name $match_bb config-key set client/$match_aa/foo + +keys=(daemon-private/osd.123/test-foo + mgr/test-foo + device/test-foo + test/foo + client/$prefix_aa/foo + client/$prefix_bb/foo + client/$match_aa/foo + client/$match_bb/foo +) +# expect these all to fail accessing config-key +for c in $fail_aa $fail_bb $fail_cc \ + $fail_dd $fail_ee $fail_ff \ + $fail_gg; do + for m in get set; do + for key in ${keys[*]} client/$prefix_aa/foo client/$prefix_bb/foo; do + expect_false ceph -k $k --name $c config-key $m $key + done + done +done + +# fail writes but succeed on reads +expect_false ceph -k $k --name $fail_writes config-key set client/$match_aa/foo +expect_false ceph -k $k --name $fail_writes config-key set test/foo +ceph -k $k --name $fail_writes config-key ls +ceph -k $k --name $fail_writes config-key get client/$match_aa/foo +ceph -k $k --name $fail_writes config-key get daemon-private/osd.123/test-foo + +echo "OK" diff --git a/qa/workunits/mon/test_mon_config_key.py b/qa/workunits/mon/test_mon_config_key.py new file mode 100755 index 00000000..c0cb8299 --- /dev/null +++ b/qa/workunits/mon/test_mon_config_key.py @@ -0,0 +1,481 @@ +#!/usr/bin/python +# +# test_mon_config_key - Test 'ceph config-key' interface +# +# Copyright (C) 2013 Inktank +# +# This is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License version 2.1, as published by the Free Software +# Foundation. See file COPYING. +# +import argparse +import base64 +import errno +import json +import logging +import os +import random +import string +import subprocess +import sys +import time + +# +# Accepted Environment variables: +# CEPH_TEST_VERBOSE - be more verbose; '1' enables; '0' disables +# CEPH_TEST_DURATION - test duration in seconds +# CEPH_TEST_SEED - seed to be used during the test +# +# Accepted arguments and options (see --help): +# -v, --verbose - be more verbose +# -d, --duration SECS - test duration in seconds +# -s, --seed SEED - seed to be used during the test +# + + +LOG = logging.getLogger(os.path.basename(sys.argv[0].replace('.py', ''))) + +SIZES = [ + (0, 0), + (10, 0), + (25, 0), + (50, 0), + (100, 0), + (1000, 0), + (64 * 1024, 0), + (64 * 1024 + 1, -errno.EFBIG), + (128 * 1024, -errno.EFBIG) +] + +# tests will be randomly selected from the keys here, and the test +# suboperation will be randomly selected from the list in the values +# here. i.e. 'exists/existing' would test that a key the test put into +# the store earlier actually does still exist in the config store, +# and that's a separate test case from 'exists/enoent', which tests +# nonexistence of a key known to not be present. + +OPS = { + 'put': ['existing', 'new'], + 'del': ['existing', 'enoent'], + 'exists': ['existing', 'enoent'], + 'get': ['existing', 'enoent'], + 'list': ['existing', 'enoent'], + 'dump': ['existing', 'enoent'], +} + +CONFIG_PUT = [] # list: keys +CONFIG_DEL = [] # list: keys +CONFIG_EXISTING = {} # map: key -> size + + +def run_cmd(cmd, expects=0): + full_cmd = ['ceph', 'config-key'] + cmd + + if expects < 0: + expects = -expects + + cmdlog = LOG.getChild('run_cmd') + cmdlog.debug('{fc}'.format(fc=' '.join(full_cmd))) + + proc = subprocess.Popen(full_cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + + stdout = [] + stderr = [] + while True: + try: + out, err = proc.communicate() + if out is not None: + stdout += out.decode().split('\n') + cmdlog.debug('stdout: {s}'.format(s=out)) + if err is not None: + stdout += err.decode().split('\n') + cmdlog.debug('stderr: {s}'.format(s=err)) + except ValueError: + ret = proc.wait() + break + + if ret != expects: + cmdlog.error('cmd > {cmd}'.format(cmd=full_cmd)) + cmdlog.error("expected return '{expected}' got '{got}'".format( + expected=expects, got=ret)) + cmdlog.error('stdout') + for i in stdout: + cmdlog.error('{x}'.format(x=i)) + cmdlog.error('stderr') + for i in stderr: + cmdlog.error('{x}'.format(x=i)) + + +# end run_cmd + +def gen_data(size, rnd): + chars = string.ascii_letters + string.digits + return ''.join(rnd.choice(chars) for _ in range(size)) + + +def gen_key(rnd): + return gen_data(20, rnd) + + +def gen_tmp_file_path(rnd): + file_name = gen_data(20, rnd) + file_path = os.path.join('/tmp', 'ceph-test.' + file_name) + return file_path + + +def destroy_tmp_file(fpath): + if os.path.exists(fpath) and os.path.isfile(fpath): + os.unlink(fpath) + + +def write_data_file(data, rnd): + file_path = gen_tmp_file_path(rnd) + data_file = open(file_path, 'a+') + data_file.truncate() + data_file.write(data) + data_file.close() + return file_path + + +# end write_data_file + +def choose_random_op(rnd): + op = rnd.choice( + list(OPS.keys()) + ) + sop = rnd.choice(OPS[op]) + return op, sop + + +def parse_args(args): + parser = argparse.ArgumentParser( + description="Test the monitor's 'config-key' API", + ) + parser.add_argument( + '-v', '--verbose', + action='store_true', + help='be more verbose', + ) + parser.add_argument( + '-s', '--seed', + metavar='SEED', + help='use SEED instead of generating it in run-time', + ) + parser.add_argument( + '-d', '--duration', + metavar='SECS', + help='run test for SECS seconds (default: 300)', + ) + parser.set_defaults( + seed=None, + duration=300, + verbose=False, + ) + return parser.parse_args(args) + + +def main(): + args = parse_args(sys.argv[1:]) + + verbose = args.verbose + if os.environ.get('CEPH_TEST_VERBOSE') is not None: + verbose = (os.environ.get('CEPH_TEST_VERBOSE') == '1') + + duration = int(os.environ.get('CEPH_TEST_DURATION', args.duration)) + seed = os.environ.get('CEPH_TEST_SEED', args.seed) + seed = int(time.time()) if seed is None else int(seed) + + rnd = random.Random() + rnd.seed(seed) + + loglevel = logging.INFO + if verbose: + loglevel = logging.DEBUG + + logging.basicConfig(level=loglevel) + + LOG.info('seed: {s}'.format(s=seed)) + + start = time.time() + + while (time.time() - start) < duration: + (op, sop) = choose_random_op(rnd) + + LOG.info('{o}({s})'.format(o=op, s=sop)) + op_log = LOG.getChild('{o}({s})'.format(o=op, s=sop)) + + if op == 'put': + via_file = (rnd.uniform(0, 100) < 50.0) + + expected = 0 + cmd = ['put'] + key = None + + if sop == 'existing': + if len(CONFIG_EXISTING) == 0: + op_log.debug('no existing keys; continue') + continue + key = rnd.choice(CONFIG_PUT) + assert key in CONFIG_EXISTING, \ + "key '{k_}' not in CONFIG_EXISTING".format(k_=key) + + expected = 0 # the store just overrides the value if the key exists + # end if sop == 'existing' + elif sop == 'new': + for x in range(0, 10): + key = gen_key(rnd) + if key not in CONFIG_EXISTING: + break + key = None + if key is None: + op_log.error('unable to generate an unique key -- try again later.') + continue + + assert key not in CONFIG_PUT and key not in CONFIG_EXISTING, \ + 'key {k} was not supposed to exist!'.format(k=key) + + assert key is not None, \ + 'key must be != None' + + cmd += [key] + + (size, error) = rnd.choice(SIZES) + if size > 25: + via_file = True + + data = gen_data(size, rnd) + + if error == 0: # only add if we expect the put to be successful + if sop == 'new': + CONFIG_PUT.append(key) + CONFIG_EXISTING[key] = size + expected = error + + if via_file: + data_file = write_data_file(data, rnd) + cmd += ['-i', data_file] + else: + cmd += [data] + + op_log.debug('size: {sz}, via: {v}'.format( + sz=size, + v='file: {f}'.format(f=data_file) if via_file == True else 'cli') + ) + run_cmd(cmd, expects=expected) + if via_file: + destroy_tmp_file(data_file) + continue + + elif op == 'del': + expected = 0 + cmd = ['del'] + key = None + + if sop == 'existing': + if len(CONFIG_EXISTING) == 0: + op_log.debug('no existing keys; continue') + continue + key = rnd.choice(CONFIG_PUT) + assert key in CONFIG_EXISTING, \ + "key '{k_}' not in CONFIG_EXISTING".format(k_=key) + + if sop == 'enoent': + for x in range(0, 10): + key = base64.b64encode(os.urandom(20)).decode() + if key not in CONFIG_EXISTING: + break + key = None + if key is None: + op_log.error('unable to generate an unique key -- try again later.') + continue + assert key not in CONFIG_PUT and key not in CONFIG_EXISTING, \ + 'key {k} was not supposed to exist!'.format(k=key) + expected = 0 # deleting a non-existent key succeeds + + assert key is not None, \ + 'key must be != None' + + cmd += [key] + op_log.debug('key: {k}'.format(k=key)) + run_cmd(cmd, expects=expected) + if sop == 'existing': + CONFIG_DEL.append(key) + CONFIG_PUT.remove(key) + del CONFIG_EXISTING[key] + continue + + elif op == 'exists': + expected = 0 + cmd = ['exists'] + key = None + + if sop == 'existing': + if len(CONFIG_EXISTING) == 0: + op_log.debug('no existing keys; continue') + continue + key = rnd.choice(CONFIG_PUT) + assert key in CONFIG_EXISTING, \ + "key '{k_}' not in CONFIG_EXISTING".format(k_=key) + + if sop == 'enoent': + for x in range(0, 10): + key = base64.b64encode(os.urandom(20)).decode() + if key not in CONFIG_EXISTING: + break + key = None + if key is None: + op_log.error('unable to generate an unique key -- try again later.') + continue + assert key not in CONFIG_PUT and key not in CONFIG_EXISTING, \ + 'key {k} was not supposed to exist!'.format(k=key) + expected = -errno.ENOENT + + assert key is not None, \ + 'key must be != None' + + cmd += [key] + op_log.debug('key: {k}'.format(k=key)) + run_cmd(cmd, expects=expected) + continue + + elif op == 'get': + expected = 0 + cmd = ['get'] + key = None + + if sop == 'existing': + if len(CONFIG_EXISTING) == 0: + op_log.debug('no existing keys; continue') + continue + key = rnd.choice(CONFIG_PUT) + assert key in CONFIG_EXISTING, \ + "key '{k_}' not in CONFIG_EXISTING".format(k_=key) + + if sop == 'enoent': + for x in range(0, 10): + key = base64.b64encode(os.urandom(20)).decode() + if key not in CONFIG_EXISTING: + break + key = None + if key is None: + op_log.error('unable to generate an unique key -- try again later.') + continue + assert key not in CONFIG_PUT and key not in CONFIG_EXISTING, \ + 'key {k} was not supposed to exist!'.format(k=key) + expected = -errno.ENOENT + + assert key is not None, \ + 'key must be != None' + + file_path = gen_tmp_file_path(rnd) + cmd += [key, '-o', file_path] + op_log.debug('key: {k}'.format(k=key)) + run_cmd(cmd, expects=expected) + if sop == 'existing': + try: + temp_file = open(file_path, 'r+') + except IOError as err: + if err.errno == errno.ENOENT: + assert CONFIG_EXISTING[key] == 0, \ + "error opening '{fp}': {e}".format(fp=file_path, e=err) + continue + else: + assert False, \ + 'some error occurred: {e}'.format(e=err) + cnt = 0 + while True: + read_data = temp_file.read() + if read_data == '': + break + cnt += len(read_data) + assert cnt == CONFIG_EXISTING[key], \ + "wrong size from store for key '{k}': {sz}, expected {es}".format( + k=key, sz=cnt, es=CONFIG_EXISTING[key]) + destroy_tmp_file(file_path) + continue + + elif op == 'list' or op == 'dump': + expected = 0 + cmd = [op] + key = None + + if sop == 'existing': + if len(CONFIG_EXISTING) == 0: + op_log.debug('no existing keys; continue') + continue + key = rnd.choice(CONFIG_PUT) + assert key in CONFIG_EXISTING, \ + "key '{k_}' not in CONFIG_EXISTING".format(k_=key) + + if sop == 'enoent': + for x in range(0, 10): + key = base64.b64encode(os.urandom(20)).decode() + if key not in CONFIG_EXISTING: + break + key = None + if key is None: + op_log.error('unable to generate an unique key -- try again later.') + continue + assert key not in CONFIG_PUT and key not in CONFIG_EXISTING, \ + 'key {k} was not supposed to exist!'.format(k=key) + + assert key is not None, \ + 'key must be != None' + + file_path = gen_tmp_file_path(rnd) + cmd += ['-o', file_path] + op_log.debug('key: {k}'.format(k=key)) + run_cmd(cmd, expects=expected) + try: + temp_file = open(file_path, 'r+') + except IOError as err: + if err.errno == errno.ENOENT: + assert CONFIG_EXISTING[key] == 0, \ + "error opening '{fp}': {e}".format(fp=file_path, e=err) + continue + else: + assert False, \ + 'some error occurred: {e}'.format(e=err) + cnt = 0 + try: + read_data = json.load(temp_file) + except ValueError: + temp_file.seek(0) + assert False, "{op} output was not valid JSON:\n{filedata}".format(op, temp_file.readlines()) + + if sop == 'existing': + assert key in read_data, "key '{k}' not found in list/dump output".format(k=key) + if op == 'dump': + cnt = len(read_data[key]) + assert cnt == CONFIG_EXISTING[key], \ + "wrong size from list for key '{k}': {sz}, expected {es}".format( + k=key, sz=cnt, es=CONFIG_EXISTING[key]) + elif sop == 'enoent': + assert key not in read_data, "key '{k}' found in list/dump output".format(k=key) + destroy_tmp_file(file_path) + continue + else: + assert False, 'unknown op {o}'.format(o=op) + + # check if all keys in 'CONFIG_PUT' exist and + # if all keys on 'CONFIG_DEL' don't. + # but first however, remove all keys in CONFIG_PUT that might + # be in CONFIG_DEL as well. + config_put_set = set(CONFIG_PUT) + config_del_set = set(CONFIG_DEL).difference(config_put_set) + + LOG.info('perform sanity checks on store') + + for k in config_put_set: + LOG.getChild('check(puts)').debug('key: {k_}'.format(k_=k)) + run_cmd(['exists', k], expects=0) + for k in config_del_set: + LOG.getChild('check(dels)').debug('key: {k_}'.format(k_=k)) + run_cmd(['exists', k], expects=-errno.ENOENT) + + +if __name__ == "__main__": + main() diff --git a/qa/workunits/mon/test_mon_osdmap_prune.sh b/qa/workunits/mon/test_mon_osdmap_prune.sh new file mode 100755 index 00000000..9cdd7217 --- /dev/null +++ b/qa/workunits/mon/test_mon_osdmap_prune.sh @@ -0,0 +1,205 @@ +#!/bin/bash + +. $(dirname $0)/../../standalone/ceph-helpers.sh + +set -x + +function wait_for_osdmap_manifest() { + + local what=${1:-"true"} + + local -a delays=($(get_timeout_delays $TIMEOUT .1)) + local -i loop=0 + + for ((i=0; i < ${#delays[*]}; ++i)); do + has_manifest=$(ceph report | jq 'has("osdmap_manifest")') + if [[ "$has_manifest" == "$what" ]]; then + return 0 + fi + + sleep ${delays[$i]} + done + + echo "osdmap_manifest never outputted on report" + ceph report + return 1 +} + +function wait_for_trim() { + + local -i epoch=$1 + local -a delays=($(get_timeout_delays $TIMEOUT .1)) + local -i loop=0 + + for ((i=0; i < ${#delays[*]}; ++i)); do + fc=$(ceph report | jq '.osdmap_first_committed') + if [[ $fc -eq $epoch ]]; then + return 0 + fi + sleep ${delays[$i]} + done + + echo "never trimmed up to epoch $epoch" + ceph report + return 1 +} + +function test_osdmap() { + + local epoch=$1 + local ret=0 + + tmp_map=$(mktemp) + ceph osd getmap $epoch -o $tmp_map || return 1 + if ! osdmaptool --print $tmp_map | grep "epoch $epoch" ; then + echo "ERROR: failed processing osdmap epoch $epoch" + ret=1 + fi + rm $tmp_map + return $ret +} + +function generate_osdmaps() { + + local -i num=$1 + + cmds=( set unset ) + for ((i=0; i < num; ++i)); do + ceph osd ${cmds[$((i%2))]} noup || return 1 + done + return 0 +} + +function test_mon_osdmap_prune() { + + create_pool foo 32 + wait_for_clean || return 1 + + ceph config set mon mon_debug_block_osdmap_trim true || return 1 + + generate_osdmaps 500 || return 1 + + report="$(ceph report)" + fc=$(jq '.osdmap_first_committed' <<< $report) + lc=$(jq '.osdmap_last_committed' <<< $report) + + [[ $((lc-fc)) -ge 500 ]] || return 1 + + wait_for_osdmap_manifest || return 1 + + manifest="$(ceph report | jq '.osdmap_manifest')" + + first_pinned=$(jq '.first_pinned' <<< $manifest) + last_pinned=$(jq '.last_pinned' <<< $manifest) + pinned_maps=( $(jq '.pinned_maps[]' <<< $manifest) ) + + # validate pinned maps list + [[ $first_pinned -eq ${pinned_maps[0]} ]] || return 1 + [[ $last_pinned -eq ${pinned_maps[-1]} ]] || return 1 + + # validate pinned maps range + [[ $first_pinned -lt $last_pinned ]] || return 1 + [[ $last_pinned -lt $lc ]] || return 1 + [[ $first_pinned -eq $fc ]] || return 1 + + # ensure all the maps are available, and work as expected + # this can take a while... + + for ((i=$first_pinned; i <= $last_pinned; ++i)); do + test_osdmap $i || return 1 + done + + # update pinned maps state: + # the monitor may have pruned & pinned additional maps since we last + # assessed state, given it's an iterative process. + # + manifest="$(ceph report | jq '.osdmap_manifest')" + first_pinned=$(jq '.first_pinned' <<< $manifest) + last_pinned=$(jq '.last_pinned' <<< $manifest) + pinned_maps=( $(jq '.pinned_maps[]' <<< $manifest) ) + + # test trimming maps + # + # we're going to perform the following tests: + # + # 1. force trim to a pinned map + # 2. force trim to a pinned map's previous epoch + # 3. trim all maps except the last 200 or so. + # + + # 1. force trim to a pinned map + # + [[ ${#pinned_maps[@]} -gt 10 ]] || return 1 + + trim_to=${pinned_maps[1]} + ceph config set mon mon_osd_force_trim_to $trim_to + ceph config set mon mon_min_osdmap_epochs 100 + ceph config set mon paxos_service_trim_min 1 + ceph config set mon mon_debug_block_osdmap_trim false + + # generate an epoch so we get to trim maps + ceph osd set noup + ceph osd unset noup + + wait_for_trim $trim_to || return 1 + + report="$(ceph report)" + fc=$(jq '.osdmap_first_committed' <<< $report) + [[ $fc -eq $trim_to ]] || return 1 + + old_first_pinned=$first_pinned + old_last_pinned=$last_pinned + first_pinned=$(jq '.osdmap_manifest.first_pinned' <<< $report) + last_pinned=$(jq '.osdmap_manifest.last_pinned' <<< $report) + [[ $first_pinned -eq $trim_to ]] || return 1 + [[ $first_pinned -gt $old_first_pinned ]] || return 1 + [[ $last_pinned -gt $old_first_pinned ]] || return 1 + + test_osdmap $trim_to || return 1 + test_osdmap $(( trim_to+1 )) || return 1 + + pinned_maps=( $(jq '.osdmap_manifest.pinned_maps[]' <<< $report) ) + + # 2. force trim to a pinned map's previous epoch + # + [[ ${#pinned_maps[@]} -gt 2 ]] || return 1 + trim_to=$(( ${pinned_maps[1]} - 1)) + ceph config set mon mon_osd_force_trim_to $trim_to + + # generate an epoch so we get to trim maps + ceph osd set noup + ceph osd unset noup + + wait_for_trim $trim_to || return 1 + + report="$(ceph report)" + fc=$(jq '.osdmap_first_committed' <<< $report) + [[ $fc -eq $trim_to ]] || return 1 + + old_first_pinned=$first_pinned + old_last_pinned=$last_pinned + first_pinned=$(jq '.osdmap_manifest.first_pinned' <<< $report) + last_pinned=$(jq '.osdmap_manifest.last_pinned' <<< $report) + pinned_maps=( $(jq '.osdmap_manifest.pinned_maps[]' <<< $report) ) + [[ $first_pinned -eq $trim_to ]] || return 1 + [[ ${pinned_maps[1]} -eq $(( trim_to+1)) ]] || return 1 + + test_osdmap $first_pinned || return 1 + test_osdmap $(( first_pinned + 1 )) || return 1 + + # 3. trim everything + # + ceph config set mon mon_osd_force_trim_to 0 + + # generate an epoch so we get to trim maps + ceph osd set noup + ceph osd unset noup + + wait_for_osdmap_manifest "false" || return 1 + + return 0 +} + +test_mon_osdmap_prune || exit 1 + +echo "OK" diff --git a/qa/workunits/objectstore/test_fuse.sh b/qa/workunits/objectstore/test_fuse.sh new file mode 100755 index 00000000..f1dcbd04 --- /dev/null +++ b/qa/workunits/objectstore/test_fuse.sh @@ -0,0 +1,129 @@ +#!/bin/sh -ex + +if ! id -u | grep -q '^0$'; then + echo "not root, re-running self via sudo" + sudo PATH=$PATH TYPE=$TYPE $0 + exit 0 +fi + +expect_false() +{ + set -x + if "$@"; then return 1; else return 0; fi +} + +COT=ceph-objectstore-tool +DATA=store_test_fuse_dir +[ -z "$TYPE" ] && TYPE=bluestore +MNT=store_test_fuse_mnt + +rm -rf $DATA +mkdir -p $DATA + +test -d $MNT && fusermount -u $MNT || true +rmdir $MNT || true +mkdir $MNT + +export CEPH_ARGS=--enable_experimental_unrecoverable_data_corrupting_features=bluestore + +$COT --no-mon-config --op mkfs --data-path $DATA --type $TYPE +$COT --no-mon-config --op fuse --data-path $DATA --mountpoint $MNT & + +while ! test -e $MNT/type ; do + echo waiting for $MNT/type to appear + sleep 1 +done + +umask 0 + +grep $TYPE $MNT/type + +# create collection +mkdir $MNT/meta +test -e $MNT/meta/bitwise_hash_start +test -d $MNT/meta/all +test -d $MNT/meta/by_bitwise_hash + +# create object +mkdir $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0# +test -e $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/data +test -d $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/attr +test -d $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/omap +test -e $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/bitwise_hash +test -e $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/omap_header + +# omap header +echo omap header > $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/omap_header +grep -q omap $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/omap_header + +# omap +echo value a > $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/omap/keya +echo value b > $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/omap/keyb +ls $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/omap | grep -c key | grep -q 2 +grep 'value a' $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/omap/keya +grep 'value b' $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/omap/keyb +rm $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/omap/keya +test ! -e $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/omap/keya +rm $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/omap/keyb +test ! -e $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/omap/keyb + +# attr +echo value a > $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/attr/keya +echo value b > $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/attr/keyb +ls $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/attr | grep -c key | grep -q 2 +grep 'value a' $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/attr/keya +grep 'value b' $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/attr/keyb +rm $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/attr/keya +test ! -e $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/attr/keya +rm $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/attr/keyb +test ! -e $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/attr/keyb + +# data +test ! -s $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/data +echo asdfasdfasdf > $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/data +test -s $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/data +grep -q asdfasdfasdf $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/data +truncate --size 4 $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/data +stat --format=%s $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/data | grep -q ^4$ +expect_false grep -q asdfasdfasdf $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/data +rm $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/data +test ! -s $MNT/meta/all/#-1:7b3f43c4:::osd_superblock:0#/data + + +# create pg collection +mkdir --mode 0003 $MNT/0.0_head +grep -q 00000000 $MNT/0.0_head/bitwise_hash_start +if [ "$TYPE" = "bluestore" ]; then + cat $MNT/0.0_head/bitwise_hash_bits + grep -q 3 $MNT/0.0_head/bitwise_hash_bits + grep -q 1fffffff $MNT/0.0_head/bitwise_hash_end +fi +test -d $MNT/0.0_head/all + +mkdir --mode 0003 $MNT/0.1_head +grep -q 80000000 $MNT/0.1_head/bitwise_hash_start +if [ "$TYPE" = "bluestore" ]; then + grep -q 3 $MNT/0.1_head/bitwise_hash_bits + grep -q 9fffffff $MNT/0.1_head/bitwise_hash_end +fi + +# create pg object +mkdir $MNT/0.0_head/all/#0:00000000::::head#/ +mkdir $MNT/0.0_head/all/#0:10000000:::foo:head#/ + +# verify pg bounds check +if [ "$TYPE" = "bluestore" ]; then + expect_false mkdir $MNT/0.0_head/all/#0:20000000:::bar:head#/ +fi + +# remove a collection +expect_false rmdir $MNT/0.0_head +rmdir $MNT/0.0_head/all/#0:10000000:::foo:head#/ +rmdir $MNT/0.0_head/all/#0:00000000::::head#/ +rmdir $MNT/0.0_head +rmdir $MNT/0.1_head + +fusermount -u $MNT +wait + +echo OK diff --git a/qa/workunits/osdc/stress_objectcacher.sh b/qa/workunits/osdc/stress_objectcacher.sh new file mode 100755 index 00000000..67baadc3 --- /dev/null +++ b/qa/workunits/osdc/stress_objectcacher.sh @@ -0,0 +1,28 @@ +#!/bin/sh -ex + +for i in $(seq 1 10) +do + for DELAY in 0 1000 + do + for OPS in 1000 10000 + do + for OBJECTS in 10 50 100 + do + for READS in 0.90 0.50 0.10 + do + for OP_SIZE in 4096 131072 1048576 + do + for MAX_DIRTY in 0 25165824 + do + ceph_test_objectcacher_stress --ops $OPS --percent-read $READS --delay-ns $DELAY --objects $OBJECTS --max-op-size $OP_SIZE --client-oc-max-dirty $MAX_DIRTY --stress-test > /dev/null 2>&1 + done + done + done + done + done + done +done + +ceph_test_objectcacher_stress --correctness-test > /dev/null 2>&1 + +echo OK diff --git a/qa/workunits/post-file.sh b/qa/workunits/post-file.sh new file mode 100755 index 00000000..120fb263 --- /dev/null +++ b/qa/workunits/post-file.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash +set -ex + +what="$1" +[ -z "$what" ] && what=/etc/udev/rules.d +sudo ceph-post-file -d ceph-test-workunit $what + +echo OK diff --git a/qa/workunits/rados/clone.sh b/qa/workunits/rados/clone.sh new file mode 100755 index 00000000..281e89f7 --- /dev/null +++ b/qa/workunits/rados/clone.sh @@ -0,0 +1,13 @@ +#!/bin/sh -x + +set -e + +rados -p data rm foo || true +rados -p data put foo.tmp /etc/passwd --object-locator foo +rados -p data clonedata foo.tmp foo --object-locator foo +rados -p data get foo /tmp/foo +cmp /tmp/foo /etc/passwd +rados -p data rm foo.tmp --object-locator foo +rados -p data rm foo + +echo OK \ No newline at end of file diff --git a/qa/workunits/rados/load-gen-big.sh b/qa/workunits/rados/load-gen-big.sh new file mode 100755 index 00000000..6715658e --- /dev/null +++ b/qa/workunits/rados/load-gen-big.sh @@ -0,0 +1,10 @@ +#!/bin/sh + +rados -p rbd load-gen \ + --num-objects 10240 \ + --min-object-size 1048576 \ + --max-object-size 25600000 \ + --max-ops 1024 \ + --max-backlog 1024 \ + --read-percent 50 \ + --run-length 1200 diff --git a/qa/workunits/rados/load-gen-mix-small-long.sh b/qa/workunits/rados/load-gen-mix-small-long.sh new file mode 100755 index 00000000..593bad51 --- /dev/null +++ b/qa/workunits/rados/load-gen-mix-small-long.sh @@ -0,0 +1,10 @@ +#!/bin/sh + +rados -p rbd load-gen \ + --num-objects 1024 \ + --min-object-size 1 \ + --max-object-size 1048576 \ + --max-ops 128 \ + --max-backlog 128 \ + --read-percent 50 \ + --run-length 1800 diff --git a/qa/workunits/rados/load-gen-mix-small.sh b/qa/workunits/rados/load-gen-mix-small.sh new file mode 100755 index 00000000..02db77bd --- /dev/null +++ b/qa/workunits/rados/load-gen-mix-small.sh @@ -0,0 +1,10 @@ +#!/bin/sh + +rados -p rbd load-gen \ + --num-objects 1024 \ + --min-object-size 1 \ + --max-object-size 1048576 \ + --max-ops 128 \ + --max-backlog 128 \ + --read-percent 50 \ + --run-length 600 diff --git a/qa/workunits/rados/load-gen-mix.sh b/qa/workunits/rados/load-gen-mix.sh new file mode 100755 index 00000000..ad3b4be8 --- /dev/null +++ b/qa/workunits/rados/load-gen-mix.sh @@ -0,0 +1,10 @@ +#!/bin/sh + +rados -p rbd load-gen \ + --num-objects 10240 \ + --min-object-size 1 \ + --max-object-size 1048576 \ + --max-ops 128 \ + --max-backlog 128 \ + --read-percent 50 \ + --run-length 600 diff --git a/qa/workunits/rados/load-gen-mostlyread.sh b/qa/workunits/rados/load-gen-mostlyread.sh new file mode 100755 index 00000000..236f82dd --- /dev/null +++ b/qa/workunits/rados/load-gen-mostlyread.sh @@ -0,0 +1,10 @@ +#!/bin/sh + +rados -p rbd load-gen \ + --num-objects 51200 \ + --min-object-size 1 \ + --max-object-size 1048576 \ + --max-ops 128 \ + --max-backlog 128 \ + --read-percent 90 \ + --run-length 600 diff --git a/qa/workunits/rados/stress_watch.sh b/qa/workunits/rados/stress_watch.sh new file mode 100755 index 00000000..49f144bb --- /dev/null +++ b/qa/workunits/rados/stress_watch.sh @@ -0,0 +1,7 @@ +#!/bin/sh -e + +ceph_test_stress_watch +ceph_multi_stress_watch rep reppool repobj +ceph_multi_stress_watch ec ecpool ecobj + +exit 0 diff --git a/qa/workunits/rados/test.sh b/qa/workunits/rados/test.sh new file mode 100755 index 00000000..a0b2aed5 --- /dev/null +++ b/qa/workunits/rados/test.sh @@ -0,0 +1,61 @@ +#!/usr/bin/env bash +set -ex + +parallel=1 +[ "$1" = "--serial" ] && parallel=0 + +color="" +[ -t 1 ] && color="--gtest_color=yes" + +function cleanup() { + pkill -P $$ || true +} +trap cleanup EXIT ERR HUP INT QUIT + +declare -A pids + +for f in \ + api_aio api_aio_pp \ + api_io api_io_pp \ + api_asio api_list \ + api_lock api_lock_pp \ + api_misc api_misc_pp \ + api_tier_pp \ + api_pool \ + api_snapshots api_snapshots_pp \ + api_stat api_stat_pp \ + api_watch_notify api_watch_notify_pp \ + api_cmd api_cmd_pp \ + api_service api_service_pp \ + api_c_write_operations \ + api_c_read_operations \ + list_parallel \ + open_pools_parallel \ + delete_pools_parallel +do + if [ $parallel -eq 1 ]; then + r=`printf '%25s' $f` + ff=`echo $f | awk '{print $1}'` + bash -o pipefail -exc "ceph_test_rados_$f $color 2>&1 | tee ceph_test_rados_$ff.log | sed \"s/^/$r: /\"" & + pid=$! + echo "test $f on pid $pid" + pids[$f]=$pid + else + ceph_test_rados_$f + fi +done + +ret=0 +if [ $parallel -eq 1 ]; then +for t in "${!pids[@]}" +do + pid=${pids[$t]} + if ! wait $pid + then + echo "error in $t ($pid)" + ret=1 + fi +done +fi + +exit $ret diff --git a/qa/workunits/rados/test_alloc_hint.sh b/qa/workunits/rados/test_alloc_hint.sh new file mode 100755 index 00000000..2323915f --- /dev/null +++ b/qa/workunits/rados/test_alloc_hint.sh @@ -0,0 +1,177 @@ +#!/usr/bin/env bash + +set -ex +shopt -s nullglob # fns glob expansion in expect_alloc_hint_eq() + +# +# Helpers +# + +function get_xml_val() { + local xml="$1" + local tag="$2" + + local regex=".*<${tag}>(.*).*" + if [[ ! "${xml}" =~ ${regex} ]]; then + echo "'${xml}' xml doesn't match '${tag}' tag regex" >&2 + return 2 + fi + + echo "${BASH_REMATCH[1]}" +} + +function get_conf_val() { + set -e + + local entity="$1" + local option="$2" + + local val + val="$(sudo ceph daemon "${entity}" config get --format=xml "${option}")" + val="$(get_xml_val "${val}" "${option}")" + + echo "${val}" +} + +function setup_osd_data() { + for (( i = 0 ; i < "${NUM_OSDS}" ; i++ )); do + OSD_DATA[i]="$(get_conf_val "osd.$i" "osd_data")" + done +} + +function setup_pgid() { + local poolname="$1" + local objname="$2" + + local pgid + pgid="$(ceph osd map "${poolname}" "${objname}" --format=xml)" + pgid="$(get_xml_val "${pgid}" "pgid")" + + PGID="${pgid}" +} + +function expect_alloc_hint_eq() { + export CEPH_ARGS="--osd-objectstore=filestore" + local expected_extsize="$1" + + for (( i = 0 ; i < "${NUM_OSDS}" ; i++ )); do + # Make sure that stuff is flushed from the journal to the store + # by the time we get to it, as we prod the actual files and not + # the journal. + sudo ceph daemon "osd.${i}" "flush_journal" + + # e.g., .../25.6_head/foo__head_7FC1F406__19 + # .../26.bs1_head/bar__head_EFE6384B__1a_ffffffffffffffff_1 + local fns=$(sudo sh -c "ls ${OSD_DATA[i]}/current/${PGID}*_head/${OBJ}_*") + local count="${#fns[@]}" + if [ "${count}" -ne 1 ]; then + echo "bad fns count: ${count}" >&2 + return 2 + fi + + local extsize + extsize="$(sudo xfs_io -c extsize "${fns[0]}")" + local extsize_regex="^\[(.*)\] ${fns[0]}$" + if [[ ! "${extsize}" =~ ${extsize_regex} ]]; then + echo "extsize doesn't match extsize_regex: ${extsize}" >&2 + return 2 + fi + extsize="${BASH_REMATCH[1]}" + + if [ "${extsize}" -ne "${expected_extsize}" ]; then + echo "FAIL: alloc_hint: actual ${extsize}, expected ${expected_extsize}" >&2 + return 1 + fi + done +} + +# +# Global setup +# + +EC_K="2" +EC_M="1" +NUM_OSDS="$((EC_K + EC_M))" + +NUM_PG="12" +NUM_PGP="${NUM_PG}" + +LOW_CAP="$(get_conf_val "osd.0" "filestore_max_alloc_hint_size")" +HIGH_CAP="$((LOW_CAP * 10))" # 10M, assuming 1M default cap +SMALL_HINT="$((LOW_CAP / 4))" # 256K, assuming 1M default cap +BIG_HINT="$((LOW_CAP * 6))" # 6M, assuming 1M default cap + +setup_osd_data + +# +# ReplicatedBackend tests +# + +POOL="alloc_hint-rep" +ceph osd pool create "${POOL}" "${NUM_PG}" +ceph osd pool set "${POOL}" size "${NUM_OSDS}" +ceph osd pool application enable "${POOL}" rados + +OBJ="foo" +setup_pgid "${POOL}" "${OBJ}" +rados -p "${POOL}" create "${OBJ}" + +# Empty object, SMALL_HINT - expect SMALL_HINT +rados -p "${POOL}" set-alloc-hint "${OBJ}" "${SMALL_HINT}" "${SMALL_HINT}" +expect_alloc_hint_eq "${SMALL_HINT}" + +# Try changing to BIG_HINT (1) - expect LOW_CAP (BIG_HINT > LOW_CAP) +rados -p "${POOL}" set-alloc-hint "${OBJ}" "${BIG_HINT}" "${BIG_HINT}" +expect_alloc_hint_eq "${LOW_CAP}" + +# Bump the cap to HIGH_CAP +ceph tell 'osd.*' injectargs "--filestore_max_alloc_hint_size ${HIGH_CAP}" + +# Try changing to BIG_HINT (2) - expect BIG_HINT (BIG_HINT < HIGH_CAP) +rados -p "${POOL}" set-alloc-hint "${OBJ}" "${BIG_HINT}" "${BIG_HINT}" +expect_alloc_hint_eq "${BIG_HINT}" + +ceph tell 'osd.*' injectargs "--filestore_max_alloc_hint_size ${LOW_CAP}" + +# Populate object with some data +rados -p "${POOL}" put "${OBJ}" /etc/passwd + +# Try changing back to SMALL_HINT - expect BIG_HINT (non-empty object) +rados -p "${POOL}" set-alloc-hint "${OBJ}" "${SMALL_HINT}" "${SMALL_HINT}" +expect_alloc_hint_eq "${BIG_HINT}" + +OBJ="bar" +setup_pgid "${POOL}" "${OBJ}" + +# Non-existent object, SMALL_HINT - expect SMALL_HINT (object creation) +rados -p "${POOL}" set-alloc-hint "${OBJ}" "${SMALL_HINT}" "${SMALL_HINT}" +expect_alloc_hint_eq "${SMALL_HINT}" + +ceph osd pool delete "${POOL}" "${POOL}" --yes-i-really-really-mean-it + +# +# ECBackend tests +# + +PROFILE="alloc_hint-ecprofile" +POOL="alloc_hint-ec" +ceph osd erasure-code-profile set "${PROFILE}" k=2 m=1 crush-failure-domain=osd +ceph osd erasure-code-profile get "${PROFILE}" # just so it's logged +ceph osd pool create "${POOL}" "${NUM_PG}" "${NUM_PGP}" erasure "${PROFILE}" +ceph osd pool application enable "${POOL}" rados + +OBJ="baz" +setup_pgid "${POOL}" "${OBJ}" +rados -p "${POOL}" create "${OBJ}" + +# Empty object, SMALL_HINT - expect scaled-down SMALL_HINT +rados -p "${POOL}" set-alloc-hint "${OBJ}" "${SMALL_HINT}" "${SMALL_HINT}" +expect_alloc_hint_eq "$((SMALL_HINT / EC_K))" + +ceph osd pool delete "${POOL}" "${POOL}" --yes-i-really-really-mean-it + +# +# Global teardown +# + +echo "OK" diff --git a/qa/workunits/rados/test_cache_pool.sh b/qa/workunits/rados/test_cache_pool.sh new file mode 100755 index 00000000..5e28b355 --- /dev/null +++ b/qa/workunits/rados/test_cache_pool.sh @@ -0,0 +1,170 @@ +#!/usr/bin/env bash + +set -ex + +expect_false() +{ + set -x + if "$@"; then return 1; else return 0; fi +} + +# create pools, set up tier relationship +ceph osd pool create base_pool 2 +ceph osd pool application enable base_pool rados +ceph osd pool create partial_wrong 2 +ceph osd pool create wrong_cache 2 +ceph osd tier add base_pool partial_wrong +ceph osd tier add base_pool wrong_cache + +# populate base_pool with some data +echo "foo" > foo.txt +echo "bar" > bar.txt +echo "baz" > baz.txt +rados -p base_pool put fooobj foo.txt +rados -p base_pool put barobj bar.txt +# fill in wrong_cache backwards so we can tell we read from it +rados -p wrong_cache put fooobj bar.txt +rados -p wrong_cache put barobj foo.txt +# partial_wrong gets barobj backwards so we can check promote and non-promote +rados -p partial_wrong put barobj foo.txt + +# get the objects back before setting a caching pool +rados -p base_pool get fooobj tmp.txt +diff -q tmp.txt foo.txt +rados -p base_pool get barobj tmp.txt +diff -q tmp.txt bar.txt + +# set up redirect and make sure we get backwards results +ceph osd tier set-overlay base_pool wrong_cache +ceph osd tier cache-mode wrong_cache writeback +rados -p base_pool get fooobj tmp.txt +diff -q tmp.txt bar.txt +rados -p base_pool get barobj tmp.txt +diff -q tmp.txt foo.txt + +# switch cache pools and make sure we're doing promote +ceph osd tier remove-overlay base_pool +ceph osd tier set-overlay base_pool partial_wrong +ceph osd tier cache-mode partial_wrong writeback +rados -p base_pool get fooobj tmp.txt +diff -q tmp.txt foo.txt # hurray, it promoted! +rados -p base_pool get barobj tmp.txt +diff -q tmp.txt foo.txt # yep, we read partial_wrong's local object! + +# try a nonexistent object and make sure we get an error +expect_false rados -p base_pool get bazobj tmp.txt + +# drop the cache entirely and make sure contents are still the same +ceph osd tier remove-overlay base_pool +rados -p base_pool get fooobj tmp.txt +diff -q tmp.txt foo.txt +rados -p base_pool get barobj tmp.txt +diff -q tmp.txt bar.txt + +# create an empty cache pool and make sure it has objects after reading +ceph osd pool create empty_cache 2 + +touch empty.txt +rados -p empty_cache ls > tmp.txt +diff -q tmp.txt empty.txt + +ceph osd tier add base_pool empty_cache +ceph osd tier set-overlay base_pool empty_cache +ceph osd tier cache-mode empty_cache writeback +rados -p base_pool get fooobj tmp.txt +rados -p base_pool get barobj tmp.txt +expect_false rados -p base_pool get bazobj tmp.txt + +rados -p empty_cache ls > tmp.txt +expect_false diff -q tmp.txt empty.txt + +# cleanup +ceph osd tier remove-overlay base_pool +ceph osd tier remove base_pool wrong_cache +ceph osd tier remove base_pool partial_wrong +ceph osd tier remove base_pool empty_cache +ceph osd pool delete base_pool base_pool --yes-i-really-really-mean-it +ceph osd pool delete empty_cache empty_cache --yes-i-really-really-mean-it +ceph osd pool delete wrong_cache wrong_cache --yes-i-really-really-mean-it +ceph osd pool delete partial_wrong partial_wrong --yes-i-really-really-mean-it + +## set of base, cache +ceph osd pool create base 8 +ceph osd pool application enable base rados +ceph osd pool create cache 8 + +ceph osd tier add base cache +ceph osd tier cache-mode cache writeback +ceph osd tier set-overlay base cache + +# cache-flush, cache-evict +rados -p base put foo /etc/passwd +expect_false rados -p base cache-evict foo +expect_false rados -p base cache-flush foo +expect_false rados -p cache cache-evict foo +rados -p cache cache-flush foo +rados -p cache cache-evict foo +rados -p cache ls - | wc -l | grep 0 + +# cache-try-flush, cache-evict +rados -p base put foo /etc/passwd +expect_false rados -p base cache-evict foo +expect_false rados -p base cache-flush foo +expect_false rados -p cache cache-evict foo +rados -p cache cache-try-flush foo +rados -p cache cache-evict foo +rados -p cache ls - | wc -l | grep 0 + +# cache-flush-evict-all +rados -p base put bar /etc/passwd +rados -p cache ls - | wc -l | grep 1 +expect_false rados -p base cache-flush-evict-all +rados -p cache cache-flush-evict-all +rados -p cache ls - | wc -l | grep 0 + +# cache-try-flush-evict-all +rados -p base put bar /etc/passwd +rados -p cache ls - | wc -l | grep 1 +expect_false rados -p base cache-flush-evict-all +rados -p cache cache-try-flush-evict-all +rados -p cache ls - | wc -l | grep 0 + +# cache flush/evit when clone objects exist +rados -p base put testclone /etc/passwd +rados -p cache ls - | wc -l | grep 1 +ceph osd pool mksnap base snap +rados -p base put testclone /etc/hosts +rados -p cache cache-flush-evict-all +rados -p cache ls - | wc -l | grep 0 + +ceph osd tier cache-mode cache forward --yes-i-really-mean-it +rados -p base -s snap get testclone testclone.txt +diff -q testclone.txt /etc/passwd +rados -p base get testclone testclone.txt +diff -q testclone.txt /etc/hosts + +# test --with-clones option +ceph osd tier cache-mode cache writeback +rados -p base put testclone2 /etc/passwd +rados -p cache ls - | wc -l | grep 1 +ceph osd pool mksnap base snap1 +rados -p base put testclone2 /etc/hosts +expect_false rados -p cache cache-flush testclone2 +rados -p cache cache-flush testclone2 --with-clones +expect_false rados -p cache cache-evict testclone2 +rados -p cache cache-evict testclone2 --with-clones +rados -p cache ls - | wc -l | grep 0 + +rados -p base -s snap1 get testclone2 testclone2.txt +diff -q testclone2.txt /etc/passwd +rados -p base get testclone2 testclone2.txt +diff -q testclone2.txt /etc/hosts + +# cleanup +ceph osd tier remove-overlay base +ceph osd tier remove base cache + +ceph osd pool delete cache cache --yes-i-really-really-mean-it +ceph osd pool delete base base --yes-i-really-really-mean-it + +echo OK diff --git a/qa/workunits/rados/test_crash.sh b/qa/workunits/rados/test_crash.sh new file mode 100755 index 00000000..6608d787 --- /dev/null +++ b/qa/workunits/rados/test_crash.sh @@ -0,0 +1,39 @@ +#!/bin/sh + +set -x + +# run on a single-node three-OSD cluster + +sudo killall -ABRT ceph-osd +sleep 5 + +# kill caused coredumps; find them and delete them, carefully, so as +# not to disturb other coredumps, or else teuthology will see them +# and assume test failure. sudos are because the core files are +# root/600 +for f in $(find $TESTDIR/archive/coredump -type f); do + gdb_output=$(echo "quit" | sudo gdb /usr/bin/ceph-osd $f) + if expr match "$gdb_output" ".*generated.*ceph-osd.*" && \ + ( \ + + expr match "$gdb_output" ".*terminated.*signal 6.*" || \ + expr match "$gdb_output" ".*terminated.*signal SIGABRT.*" \ + ) + then + sudo rm $f + fi +done + +# let daemon find crashdumps on startup +sudo systemctl restart ceph-crash +sleep 30 + +# must be 3 crashdumps registered and moved to crash/posted +[ $(ceph crash ls | wc -l) = 4 ] || exit 1 # 4 here bc of the table header +[ $(sudo find /var/lib/ceph/crash/posted/ -name meta | wc -l) = 3 ] || exit 1 + +# there should be a health warning +ceph health detail | grep RECENT_CRASH || exit 1 +ceph crash archive-all +sleep 30 +ceph health detail | grep -c RECENT_CRASH | grep 0 # should be gone! diff --git a/qa/workunits/rados/test_dedup_tool.sh b/qa/workunits/rados/test_dedup_tool.sh new file mode 100755 index 00000000..25994401 --- /dev/null +++ b/qa/workunits/rados/test_dedup_tool.sh @@ -0,0 +1,156 @@ +#!/usr/bin/env bash + +set -x + +die() { + echo "$@" + exit 1 +} + +do_run() { + if [ "$1" == "--tee" ]; then + shift + tee_out="$1" + shift + "$@" | tee $tee_out + else + "$@" + fi +} + +run_expect_succ() { + echo "RUN_EXPECT_SUCC: " "$@" + do_run "$@" + [ $? -ne 0 ] && die "expected success, but got failure! cmd: $@" +} + +run() { + echo "RUN: " $@ + do_run "$@" +} + +if [ -n "$CEPH_BIN" ] ; then + # CMake env + RADOS_TOOL="$CEPH_BIN/rados" + CEPH_TOOL="$CEPH_BIN/ceph" + DEDUP_TOOL="$CEPH_BIN/cephdeduptool" +else + # executables should be installed by the QA env + RADOS_TOOL=$(which rados) + CEPH_TOOL=$(which ceph) + DEDUP_TOOL=$(which cephdeduptool) +fi + +POOL=dedup_pool +OBJ=test_rados_obj + +[ -x "$RADOS_TOOL" ] || die "couldn't find $RADOS_TOOL binary to test" +[ -x "$CEPH_TOOL" ] || die "couldn't find $CEPH_TOOL binary to test" + +run_expect_succ "$CEPH_TOOL" osd pool create "$POOL" 8 + +function test_dedup_ratio_fixed() +{ + # case 1 + dd if=/dev/urandom of=dedup_object_1k bs=1K count=1 + dd if=dedup_object_1k of=dedup_object_100k bs=1K count=100 + + $RADOS_TOOL -p $POOL put $OBJ ./dedup_object_100k + RESULT=$($DEDUP_TOOL --op estimate --pool $POOL --chunk-size 1024 --chunk-algorithm fixed --fingerprint-algorithm sha1 --debug | grep result | awk '{print$4}') + if [ 1024 -ne $RESULT ]; + then + die "Estimate failed expecting 1024 result $RESULT" + fi + + # case 2 + dd if=/dev/zero of=dedup_object_10m bs=10M count=1 + + $RADOS_TOOL -p $POOL put $OBJ ./dedup_object_10m + RESULT=$($DEDUP_TOOL --op estimate --pool $POOL --chunk-size 4096 --chunk-algorithm fixed --fingerprint-algorithm sha1 --debug | grep result | awk '{print$4}') + if [ 4096 -ne $RESULT ]; + then + die "Estimate failed expecting 4096 result $RESULT" + fi + + # case 3 max_thread + for num in `seq 0 20` + do + dd if=/dev/zero of=dedup_object_$num bs=4M count=1 + $RADOS_TOOL -p $POOL put dedup_object_$num ./dedup_object_$num + done + + RESULT=$($DEDUP_TOOL --op estimate --pool $POOL --chunk-size 4096 --chunk-algorithm fixed --fingerprint-algorithm sha1 --max-thread 4 --debug | grep result | awk '{print$2}') + + if [ 98566144 -ne $RESULT ]; + then + die "Estimate failed expecting 98566144 result $RESULT" + fi + + rm -rf ./dedup_object_1k ./dedup_object_100k ./dedup_object_10m + for num in `seq 0 20` + do + rm -rf ./dedup_object_$num + done +} + +function test_dedup_chunk_scrub() +{ + + CHUNK_POOL=dedup_chunk_pool + run_expect_succ "$CEPH_TOOL" osd pool create "$CHUNK_POOL" 8 + + echo "hi there" > foo + + echo "hi there" > bar + + echo "there" > foo-chunk + + echo "CHUNK" > bar-chunk + + $CEPH_TOOL osd pool set $POOL fingerprint_algorithm sha1 --yes-i-really-mean-it + + $RADOS_TOOL -p $POOL put foo ./foo + $RADOS_TOOL -p $POOL put bar ./bar + + $RADOS_TOOL -p $CHUNK_POOL put bar-chunk ./bar-chunk + $RADOS_TOOL -p $CHUNK_POOL put foo-chunk ./foo-chunk + + $RADOS_TOOL -p $POOL set-chunk bar 0 8 --target-pool $CHUNK_POOL bar-chunk 0 --with-reference + $RADOS_TOOL -p $POOL set-chunk foo 0 8 --target-pool $CHUNK_POOL foo-chunk 0 --with-reference + + echo "There hi" > test_obj + # dirty + $RADOS_TOOL -p $POOL put foo ./test_obj + # flush + $RADOS_TOOL -p $POOL put foo ./test_obj + sleep 2 + + rados ls -p $CHUNK_POOL + CHUNK_OID=$(echo -n "There hi" | sha1sum) + + $DEDUP_TOOL --op add_chunk_ref --pool $POOL --chunk_pool $CHUNK_POOL --object $CHUNK_OID --target_ref bar + RESULT=$($DEDUP_TOOL --op get_chunk_ref --pool $POOL --chunk_pool $CHUNK_POOL --object $CHUNK_OID) + + $DEDUP_TOOL --op chunk_scrub --pool $POOL --chunk_pool $CHUNK_POOL + + RESULT=$($DEDUP_TOOL --op get_chunk_ref --pool $POOL --chunk_pool $CHUNK_POOL --object $CHUNK_OID | grep bar) + if [ -n "$RESULT" ] ; then + $CEPH_TOOL osd pool delete $POOL $POOL --yes-i-really-really-mean-it + $CEPH_TOOL osd pool delete $CHUNK_POOL $CHUNK_POOL --yes-i-really-really-mean-it + die "Scrub failed expecting bar is removed" + fi + + $CEPH_TOOL osd pool delete $CHUNK_POOL $CHUNK_POOL --yes-i-really-really-mean-it + + rm -rf ./foo ./bar ./foo-chunk ./bar-chunk ./test_obj +} + +test_dedup_ratio_fixed +test_dedup_chunk_scrub + +$CEPH_TOOL osd pool delete $POOL $POOL --yes-i-really-really-mean-it + +echo "SUCCESS!" +exit 0 + + diff --git a/qa/workunits/rados/test_envlibrados_for_rocksdb.sh b/qa/workunits/rados/test_envlibrados_for_rocksdb.sh new file mode 100755 index 00000000..7099dafb --- /dev/null +++ b/qa/workunits/rados/test_envlibrados_for_rocksdb.sh @@ -0,0 +1,94 @@ +#!/usr/bin/env bash +set -ex + +############################################ +# Helper functions +############################################ +source $(dirname $0)/../ceph-helpers-root.sh + +############################################ +# Install required tools +############################################ +echo "Install required tools" + +CURRENT_PATH=`pwd` + +############################################ +# Compile&Start RocksDB +############################################ +# install prerequisites +# for rocksdb +case $(distro_id) in + ubuntu|debian|devuan) + install git g++ libsnappy-dev zlib1g-dev libbz2-dev libradospp-dev cmake + ;; + centos|fedora|rhel) + install git gcc-c++.x86_64 snappy-devel zlib zlib-devel bzip2 bzip2-devel libradospp-devel.x86_64 + if [ $(distro_id) = "fedora" ]; then + install cmake + else + install_cmake3_on_centos7 + fi + ;; + opensuse*|suse|sles) + install git gcc-c++ snappy-devel zlib-devel libbz2-devel libradospp-devel + ;; + *) + echo "$(distro_id) is unknown, $@ will have to be installed manually." + ;; +esac + +# # gflags +# sudo yum install gflags-devel +# +# wget https://github.com/schuhschuh/gflags/archive/master.zip +# unzip master.zip +# cd gflags-master +# mkdir build && cd build +# export CXXFLAGS="-fPIC" && cmake .. && make VERBOSE=1 +# make && make install + +# # snappy-devel + + +echo "Compile rocksdb" +if [ -e rocksdb ]; then + rm -fr rocksdb +fi + +pushd $(dirname /home/ubuntu/cephtest/clone.client.0/qa/workunits/rados/bash.sh)/../../../ +git submodule update --init src/rocksdb +popd +git clone $(dirname /home/ubuntu/cephtest/clone.client.0/qa/workunits/rados/bash.sh)/../../../src/rocksdb rocksdb + +# compile code +cd rocksdb +if type cmake3 > /dev/null 2>&1 ; then + CMAKE=cmake3 +else + CMAKE=cmake +fi +mkdir build && cd build && ${CMAKE} -DWITH_LIBRADOS=ON -DWITH_SNAPPY=ON -DWITH_GFLAGS=OFF -DFAIL_ON_WARNINGS=OFF .. +make rocksdb_env_librados_test -j8 + +echo "Copy ceph.conf" +# prepare ceph.conf +mkdir -p ../ceph/src/ +if [ -f "/etc/ceph/ceph.conf" ]; then + cp /etc/ceph/ceph.conf ../ceph/src/ +elif [ -f "/etc/ceph/ceph/ceph.conf" ]; then + cp /etc/ceph/ceph/ceph.conf ../ceph/src/ +else + echo "/etc/ceph/ceph/ceph.conf doesn't exist" +fi + +echo "Run EnvLibrados test" +# run test +if [ -f "../ceph/src/ceph.conf" ] + then + cp env_librados_test ~/cephtest/archive + ./env_librados_test +else + echo "../ceph/src/ceph.conf doesn't exist" +fi +cd ${CURRENT_PATH} diff --git a/qa/workunits/rados/test_hang.sh b/qa/workunits/rados/test_hang.sh new file mode 100755 index 00000000..724e0bb8 --- /dev/null +++ b/qa/workunits/rados/test_hang.sh @@ -0,0 +1,8 @@ +#!/bin/sh -ex + +# Hang forever for manual testing using the thrasher +while(true) +do + sleep 300 +done +exit 0 diff --git a/qa/workunits/rados/test_health_warnings.sh b/qa/workunits/rados/test_health_warnings.sh new file mode 100755 index 00000000..d393e5c6 --- /dev/null +++ b/qa/workunits/rados/test_health_warnings.sh @@ -0,0 +1,76 @@ +#!/usr/bin/env bash + +set -uex + +# number of osds = 10 +crushtool -o crushmap --build --num_osds 10 host straw 2 rack straw 2 row straw 2 root straw 0 +ceph osd setcrushmap -i crushmap +ceph osd tree +ceph tell osd.* injectargs --osd_max_markdown_count 1024 --osd_max_markdown_period 1 +ceph osd set noout + +wait_for_healthy() { + while ceph health | grep down + do + sleep 1 + done +} + +test_mark_two_osds_same_host_down() { + ceph osd set noup + ceph osd down osd.0 osd.1 + ceph health detail + ceph health | grep "1 host" + ceph health | grep "2 osds" + ceph health detail | grep "osd.0" + ceph health detail | grep "osd.1" + ceph osd unset noup + wait_for_healthy +} + +test_mark_two_osds_same_rack_down() { + ceph osd set noup + ceph osd down osd.8 osd.9 + ceph health detail + ceph health | grep "1 host" + ceph health | grep "1 rack" + ceph health | grep "1 row" + ceph health | grep "2 osds" + ceph health detail | grep "osd.8" + ceph health detail | grep "osd.9" + ceph osd unset noup + wait_for_healthy +} + +test_mark_all_but_last_osds_down() { + ceph osd set noup + ceph osd down $(ceph osd ls | sed \$d) + ceph health detail + ceph health | grep "1 row" + ceph health | grep "2 racks" + ceph health | grep "4 hosts" + ceph health | grep "9 osds" + ceph osd unset noup + wait_for_healthy +} + +test_mark_two_osds_same_host_down_with_classes() { + ceph osd set noup + ceph osd crush set-device-class ssd osd.0 osd.2 osd.4 osd.6 osd.8 + ceph osd crush set-device-class hdd osd.1 osd.3 osd.5 osd.7 osd.9 + ceph osd down osd.0 osd.1 + ceph health detail + ceph health | grep "1 host" + ceph health | grep "2 osds" + ceph health detail | grep "osd.0" + ceph health detail | grep "osd.1" + ceph osd unset noup + wait_for_healthy +} + +test_mark_two_osds_same_host_down +test_mark_two_osds_same_rack_down +test_mark_all_but_last_osds_down +test_mark_two_osds_same_host_down_with_classes + +exit 0 diff --git a/qa/workunits/rados/test_large_omap_detection.py b/qa/workunits/rados/test_large_omap_detection.py new file mode 100755 index 00000000..c6cf195d --- /dev/null +++ b/qa/workunits/rados/test_large_omap_detection.py @@ -0,0 +1,134 @@ +#!/usr/bin/python +# -*- mode:python -*- +# vim: ts=4 sw=4 smarttab expandtab +# +# Copyright (C) 2017 Red Hat +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Library Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Library Public License for more details. +# + +import json +import rados +import shlex +import subprocess +import time + +def cleanup(cluster): + cluster.delete_pool('large-omap-test-pool') + cluster.shutdown() + +def init(): + # For local testing + #cluster = rados.Rados(conffile='./ceph.conf') + cluster = rados.Rados(conffile='/etc/ceph/ceph.conf') + cluster.connect() + print("\nCluster ID: " + cluster.get_fsid()) + cluster.create_pool('large-omap-test-pool') + ioctx = cluster.open_ioctx('large-omap-test-pool') + ioctx.write_full('large-omap-test-object1', "Lorem ipsum") + op = ioctx.create_write_op() + + keys = [] + values = [] + for x in range(20001): + keys.append(str(x)) + values.append("X") + + ioctx.set_omap(op, tuple(keys), tuple(values)) + ioctx.operate_write_op(op, 'large-omap-test-object1', 0) + ioctx.release_write_op(op) + + ioctx.write_full('large-omap-test-object2', "Lorem ipsum dolor") + op = ioctx.create_write_op() + + buffer = ("Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do " + "eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut " + "enim ad minim veniam, quis nostrud exercitation ullamco laboris " + "nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in " + "reprehenderit in voluptate velit esse cillum dolore eu fugiat " + "nulla pariatur. Excepteur sint occaecat cupidatat non proident, " + "sunt in culpa qui officia deserunt mollit anim id est laborum.") + + keys = [] + values = [] + for x in range(20000): + keys.append(str(x)) + values.append(buffer) + + ioctx.set_omap(op, tuple(keys), tuple(values)) + ioctx.operate_write_op(op, 'large-omap-test-object2', 0) + ioctx.release_write_op(op) + ioctx.close() + return cluster + +def get_deep_scrub_timestamp(pgid): + cmd = ['ceph', 'pg', 'dump', '--format=json-pretty'] + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE) + out = proc.communicate()[0] + try: + pgstats = json.loads(out)['pg_map']['pg_stats'] + except KeyError: + pgstats = json.loads(out)['pg_stats'] + for stat in pgstats: + if stat['pgid'] == pgid: + return stat['last_deep_scrub_stamp'] + +def wait_for_scrub(): + osds = set(); + pgs = dict(); + cmd = ['ceph', 'osd', 'map', 'large-omap-test-pool', + 'large-omap-test-object1', '--format=json-pretty'] + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE) + out = proc.communicate()[0] + osds.add(json.loads(out)['acting_primary']) + pgs[json.loads(out)['pgid']] = get_deep_scrub_timestamp(json.loads(out)['pgid']) + cmd = ['ceph', 'osd', 'map', 'large-omap-test-pool', + 'large-omap-test-object2', '--format=json-pretty'] + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE) + out = proc.communicate()[0] + osds.add(json.loads(out)['acting_primary']) + pgs[json.loads(out)['pgid']] = get_deep_scrub_timestamp(json.loads(out)['pgid']) + + for pg in pgs: + command = "ceph pg deep-scrub " + str(pg) + subprocess.check_call(shlex.split(command)) + + for pg in pgs: + RETRIES = 0 + while RETRIES < 60 and pgs[pg] == get_deep_scrub_timestamp(pg): + time.sleep(10) + RETRIES += 1 + +def check_health_output(): + RETRIES = 0 + result = 0 + while RETRIES < 6 and result != 2: + result = 0 + RETRIES += 1 + output = subprocess.check_output(["ceph", "health", "detail"]) + for line in output.splitlines(): + result += int(line.find('2 large omap objects') != -1) + time.sleep(10) + + if result != 2: + print("Error, got invalid output:") + print(output) + raise Exception + +def main(): + cluster = init() + wait_for_scrub() + check_health_output() + + cleanup(cluster) + +if __name__ == '__main__': + main() diff --git a/qa/workunits/rados/test_librados_build.sh b/qa/workunits/rados/test_librados_build.sh new file mode 100755 index 00000000..0bca5050 --- /dev/null +++ b/qa/workunits/rados/test_librados_build.sh @@ -0,0 +1,74 @@ +#!/bin/bash -ex +# +# Compile and run a librados application outside of the ceph build system, so +# that we can be sure librados.h[pp] is still usable and hasn't accidentally +# started depending on internal headers. +# +# The script assumes all dependencies - e.g. curl, make, gcc, librados headers, +# libradosstriper headers, boost headers, etc. - are already installed. +# + +source $(dirname $0)/../ceph-helpers-root.sh + +trap cleanup EXIT + +SOURCES="hello_radosstriper.cc +hello_world_c.c +hello_world.cc +Makefile +" +BINARIES_TO_RUN="hello_world_c +hello_world_cpp +" +BINARIES="${BINARIES_TO_RUN}hello_radosstriper_cpp +" +DL_PREFIX="http://git.ceph.com/?p=ceph.git;a=blob_plain;hb=nautilus;f=examples/librados/" +#DL_PREFIX="https://raw.githubusercontent.com/ceph/ceph/nautilus/examples/librados/" +DESTDIR=$(pwd) + +function cleanup () { + for f in $BINARIES$SOURCES ; do + rm -f "${DESTDIR}/$f" + done +} + +function get_sources () { + for s in $SOURCES ; do + curl --progress-bar --output $s ${DL_PREFIX}$s + done +} + +function check_sources () { + for s in $SOURCES ; do + test -f $s + done +} + +function check_binaries () { + for b in $BINARIES ; do + file $b + test -f $b + done +} + +function run_binaries () { + for b in $BINARIES_TO_RUN ; do + ./$b -c /etc/ceph/ceph.conf + done +} + +pushd $DESTDIR +case $(distro_id) in + centos|fedora|rhel|opensuse*|suse|sles) + install gcc-c++ make libradospp-devel librados-devel;; + ubuntu|debian|devuan) + install g++ make libradospp-dev librados-dev;; + *) + echo "$(distro_id) is unknown, $@ will have to be installed manually." +esac +get_sources +check_sources +make all-system +check_binaries +run_binaries +popd diff --git a/qa/workunits/rados/test_pool_access.sh b/qa/workunits/rados/test_pool_access.sh new file mode 100755 index 00000000..2a7077a4 --- /dev/null +++ b/qa/workunits/rados/test_pool_access.sh @@ -0,0 +1,108 @@ +#!/usr/bin/env bash + +set -ex + +KEYRING=$(mktemp) +trap cleanup EXIT ERR HUP INT QUIT + +cleanup() { + (ceph auth del client.mon_read || true) >/dev/null 2>&1 + (ceph auth del client.mon_write || true) >/dev/null 2>&1 + + rm -f $KEYRING +} + +expect_false() +{ + set -x + if "$@"; then return 1; else return 0; fi +} + +create_pool_op() { + ID=$1 + POOL=$2 + + cat << EOF | CEPH_ARGS="-k $KEYRING" python +import rados + +cluster = rados.Rados(conffile="", rados_id="${ID}") +cluster.connect() +cluster.create_pool("${POOL}") +EOF +} + +delete_pool_op() { + ID=$1 + POOL=$2 + + cat << EOF | CEPH_ARGS="-k $KEYRING" python +import rados + +cluster = rados.Rados(conffile="", rados_id="${ID}") +cluster.connect() +cluster.delete_pool("${POOL}") +EOF +} + +create_pool_snap_op() { + ID=$1 + POOL=$2 + SNAP=$3 + + cat << EOF | CEPH_ARGS="-k $KEYRING" python +import rados + +cluster = rados.Rados(conffile="", rados_id="${ID}") +cluster.connect() +ioctx = cluster.open_ioctx("${POOL}") + +ioctx.create_snap("${SNAP}") +EOF +} + +remove_pool_snap_op() { + ID=$1 + POOL=$2 + SNAP=$3 + + cat << EOF | CEPH_ARGS="-k $KEYRING" python +import rados + +cluster = rados.Rados(conffile="", rados_id="${ID}") +cluster.connect() +ioctx = cluster.open_ioctx("${POOL}") + +ioctx.remove_snap("${SNAP}") +EOF +} + +test_pool_op() +{ + ceph auth get-or-create client.mon_read mon 'allow r' >> $KEYRING + ceph auth get-or-create client.mon_write mon 'allow *' >> $KEYRING + + expect_false create_pool_op mon_read pool1 + create_pool_op mon_write pool1 + + expect_false create_pool_snap_op mon_read pool1 snap1 + create_pool_snap_op mon_write pool1 snap1 + + expect_false remove_pool_snap_op mon_read pool1 snap1 + remove_pool_snap_op mon_write pool1 snap1 + + expect_false delete_pool_op mon_read pool1 + delete_pool_op mon_write pool1 +} + +key=`ceph auth get-or-create-key client.poolaccess1 mon 'allow r' osd 'allow *'` +rados --id poolaccess1 --key $key -p rbd ls + +key=`ceph auth get-or-create-key client.poolaccess2 mon 'allow r' osd 'allow * pool=nopool'` +expect_false rados --id poolaccess2 --key $key -p rbd ls + +key=`ceph auth get-or-create-key client.poolaccess3 mon 'allow r' osd 'allow rw pool=nopool'` +expect_false rados --id poolaccess3 --key $key -p rbd ls + +test_pool_op + +echo OK diff --git a/qa/workunits/rados/test_pool_quota.sh b/qa/workunits/rados/test_pool_quota.sh new file mode 100755 index 00000000..0eacefc6 --- /dev/null +++ b/qa/workunits/rados/test_pool_quota.sh @@ -0,0 +1,68 @@ +#!/bin/sh -ex + +p=`uuidgen` + +# objects +ceph osd pool create $p 12 +ceph osd pool set-quota $p max_objects 10 +ceph osd pool application enable $p rados + +for f in `seq 1 10` ; do + rados -p $p put obj$f /etc/passwd +done + +sleep 30 + +rados -p $p put onemore /etc/passwd & +pid=$! + +ceph osd pool set-quota $p max_objects 100 +wait $pid +[ $? -ne 0 ] && exit 1 || true + +rados -p $p put twomore /etc/passwd + +# bytes +ceph osd pool set-quota $p max_bytes 100 +sleep 30 + +rados -p $p put two /etc/passwd & +pid=$! + +ceph osd pool set-quota $p max_bytes 0 +ceph osd pool set-quota $p max_objects 0 +wait $pid +[ $? -ne 0 ] && exit 1 || true + +rados -p $p put three /etc/passwd + + +#one pool being full does not block a different pool + +pp=`uuidgen` + +ceph osd pool create $pp 12 +ceph osd pool application enable $pp rados + +# set objects quota +ceph osd pool set-quota $pp max_objects 10 +sleep 30 + +for f in `seq 1 10` ; do + rados -p $pp put obj$f /etc/passwd +done + +sleep 30 + +rados -p $p put threemore /etc/passwd + +ceph osd pool set-quota $p max_bytes 0 +ceph osd pool set-quota $p max_objects 0 + +sleep 30 +# done +ceph osd pool delete $p $p --yes-i-really-really-mean-it +ceph osd pool delete $pp $pp --yes-i-really-really-mean-it + +echo OK + diff --git a/qa/workunits/rados/test_python.sh b/qa/workunits/rados/test_python.sh new file mode 100755 index 00000000..80369c8d --- /dev/null +++ b/qa/workunits/rados/test_python.sh @@ -0,0 +1,4 @@ +#!/bin/sh -ex + +${PYTHON:-python} -m nose -v $(dirname $0)/../../../src/test/pybind/test_rados.py +exit 0 diff --git a/qa/workunits/rados/test_rados_timeouts.sh b/qa/workunits/rados/test_rados_timeouts.sh new file mode 100755 index 00000000..327c7ab3 --- /dev/null +++ b/qa/workunits/rados/test_rados_timeouts.sh @@ -0,0 +1,48 @@ +#!/usr/bin/env bash +set -x + +delay_mon() { + MSGTYPE=$1 + shift + $@ --rados-mon-op-timeout 1 --ms-inject-delay-type mon --ms-inject-delay-max 10000000 --ms-inject-delay-probability 1 --ms-inject-delay-msg-type $MSGTYPE + if [ $? -eq 0 ]; then + exit 1 + fi +} + +delay_osd() { + MSGTYPE=$1 + shift + $@ --rados-osd-op-timeout 1 --ms-inject-delay-type osd --ms-inject-delay-max 10000000 --ms-inject-delay-probability 1 --ms-inject-delay-msg-type $MSGTYPE + if [ $? -eq 0 ]; then + exit 2 + fi +} + +# pool ops +delay_mon omap rados lspools +delay_mon poolopreply ceph osd pool create test 8 +delay_mon poolopreply rados mksnap -p test snap +delay_mon poolopreply ceph osd pool rm test test --yes-i-really-really-mean-it + +# other mon ops +delay_mon getpoolstats rados df +delay_mon mon_command ceph df +delay_mon omap ceph osd dump +delay_mon omap ceph -s + +# osd ops +delay_osd osd_op_reply rados -p data put ls /bin/ls +delay_osd osd_op_reply rados -p data get ls - >/dev/null +delay_osd osd_op_reply rados -p data ls +delay_osd command_reply ceph tell osd.0 bench 1 1 + +# rbd commands, using more kinds of osd ops +rbd create -s 1 test +delay_osd osd_op_reply rbd watch test +delay_osd osd_op_reply rbd info test +delay_osd osd_op_reply rbd snap create test@snap +delay_osd osd_op_reply rbd import /bin/ls ls +rbd rm test + +echo OK diff --git a/qa/workunits/rados/test_rados_tool.sh b/qa/workunits/rados/test_rados_tool.sh new file mode 100755 index 00000000..5325743f --- /dev/null +++ b/qa/workunits/rados/test_rados_tool.sh @@ -0,0 +1,924 @@ +#!/usr/bin/env bash + +set -x + +die() { + echo "$@" + exit 1 +} + +usage() { + cat < /dev/null 2>&1 || true + $RADOS_TOOL -p $POOL_EC rm $OBJ > /dev/null 2>&1 || true +} + +test_omap() { + cleanup + for i in $(seq 1 1 10) + do + if [ $(($i % 2)) -eq 0 ]; then + $RADOS_TOOL -p $POOL setomapval $OBJ $i $i + else + echo -n "$i" | $RADOS_TOOL -p $POOL setomapval $OBJ $i + fi + $RADOS_TOOL -p $POOL getomapval $OBJ $i | grep -q "|$i|\$" + done + $RADOS_TOOL -p $POOL listomapvals $OBJ | grep -c value | grep 10 + for i in $(seq 1 1 5) + do + $RADOS_TOOL -p $POOL rmomapkey $OBJ $i + done + $RADOS_TOOL -p $POOL listomapvals $OBJ | grep -c value | grep 5 + $RADOS_TOOL -p $POOL clearomap $OBJ + $RADOS_TOOL -p $POOL listomapvals $OBJ | wc -l | grep 0 + cleanup + + for i in $(seq 1 1 10) + do + dd if=/dev/urandom bs=128 count=1 > $TDIR/omap_key + if [ $(($i % 2)) -eq 0 ]; then + $RADOS_TOOL -p $POOL --omap-key-file $TDIR/omap_key setomapval $OBJ $i + else + echo -n "$i" | $RADOS_TOOL -p $POOL --omap-key-file $TDIR/omap_key setomapval $OBJ + fi + $RADOS_TOOL -p $POOL --omap-key-file $TDIR/omap_key getomapval $OBJ | grep -q "|$i|\$" + $RADOS_TOOL -p $POOL --omap-key-file $TDIR/omap_key rmomapkey $OBJ + $RADOS_TOOL -p $POOL listomapvals $OBJ | grep -c value | grep 0 + done + cleanup +} + +test_xattr() { + cleanup + $RADOS_TOOL -p $POOL put $OBJ /etc/passwd + V1=`mktemp fooattrXXXXXXX` + V2=`mktemp fooattrXXXXXXX` + echo -n fooval > $V1 + expect_false $RADOS_TOOL -p $POOL setxattr $OBJ 2>/dev/null + expect_false $RADOS_TOOL -p $POOL setxattr $OBJ foo fooval extraarg 2>/dev/null + $RADOS_TOOL -p $POOL setxattr $OBJ foo fooval + $RADOS_TOOL -p $POOL getxattr $OBJ foo > $V2 + cmp $V1 $V2 + cat $V1 | $RADOS_TOOL -p $POOL setxattr $OBJ bar + $RADOS_TOOL -p $POOL getxattr $OBJ bar > $V2 + cmp $V1 $V2 + $RADOS_TOOL -p $POOL listxattr $OBJ > $V1 + grep -q foo $V1 + grep -q bar $V1 + [ `cat $V1 | wc -l` -eq 2 ] + rm $V1 $V2 + cleanup +} +test_rmobj() { + p=`uuidgen` + $CEPH_TOOL osd pool create $p 1 + $CEPH_TOOL osd pool set-quota $p max_objects 1 + V1=`mktemp fooattrXXXXXXX` + $RADOS_TOOL put $OBJ $V1 -p $p + while ! $CEPH_TOOL osd dump | grep 'full_quota max_objects' + do + sleep 2 + done + $RADOS_TOOL -p $p rm $OBJ --force-full + $CEPH_TOOL osd pool rm $p $p --yes-i-really-really-mean-it + rm $V1 +} + +test_ls() { + echo "Testing rados ls command" + p=`uuidgen` + $CEPH_TOOL osd pool create $p 1 + NS=10 + OBJS=20 + # Include default namespace (0) in the total + TOTAL=$(expr $OBJS \* $(expr $NS + 1)) + + for nsnum in `seq 0 $NS` + do + for onum in `seq 1 $OBJS` + do + if [ "$nsnum" = "0" ]; + then + "$RADOS_TOOL" -p $p put obj${onum} /etc/fstab 2> /dev/null + else + "$RADOS_TOOL" -p $p -N "NS${nsnum}" put obj${onum} /etc/fstab 2> /dev/null + fi + done + done + CHECK=$("$RADOS_TOOL" -p $p ls 2> /dev/null | wc -l) + if [ "$OBJS" -ne "$CHECK" ]; + then + die "Created $OBJS objects in default namespace but saw $CHECK" + fi + TESTNS=NS${NS} + CHECK=$("$RADOS_TOOL" -p $p -N $TESTNS ls 2> /dev/null | wc -l) + if [ "$OBJS" -ne "$CHECK" ]; + then + die "Created $OBJS objects in $TESTNS namespace but saw $CHECK" + fi + CHECK=$("$RADOS_TOOL" -p $p --all ls 2> /dev/null | wc -l) + if [ "$TOTAL" -ne "$CHECK" ]; + then + die "Created $TOTAL objects but saw $CHECK" + fi + + $CEPH_TOOL osd pool rm $p $p --yes-i-really-really-mean-it +} + +test_cleanup() { + echo "Testing rados cleanup command" + p=`uuidgen` + $CEPH_TOOL osd pool create $p 1 + NS=5 + OBJS=4 + # Include default namespace (0) in the total + TOTAL=$(expr $OBJS \* $(expr $NS + 1)) + + for nsnum in `seq 0 $NS` + do + for onum in `seq 1 $OBJS` + do + if [ "$nsnum" = "0" ]; + then + "$RADOS_TOOL" -p $p put obj${onum} /etc/fstab 2> /dev/null + else + "$RADOS_TOOL" -p $p -N "NS${nsnum}" put obj${onum} /etc/fstab 2> /dev/null + fi + done + done + + $RADOS_TOOL -p $p --all ls > $TDIR/before.ls.out 2> /dev/null + + $RADOS_TOOL -p $p bench 3 write --no-cleanup 2> /dev/null + $RADOS_TOOL -p $p -N NS1 bench 3 write --no-cleanup 2> /dev/null + $RADOS_TOOL -p $p -N NS2 bench 3 write --no-cleanup 2> /dev/null + $RADOS_TOOL -p $p -N NS3 bench 3 write --no-cleanup 2> /dev/null + # Leave dangling objects without a benchmark_last_metadata in NS4 + expect_false timeout 3 $RADOS_TOOL -p $p -N NS4 bench 30 write --no-cleanup 2> /dev/null + $RADOS_TOOL -p $p -N NS5 bench 3 write --no-cleanup 2> /dev/null + + $RADOS_TOOL -p $p -N NS3 cleanup 2> /dev/null + #echo "Check NS3 after specific cleanup" + CHECK=$($RADOS_TOOL -p $p -N NS3 ls | wc -l) + if [ "$OBJS" -ne "$CHECK" ] ; + then + die "Expected $OBJS objects in NS3 but saw $CHECK" + fi + + #echo "Try to cleanup all" + $RADOS_TOOL -p $p --all cleanup + #echo "Check all namespaces" + $RADOS_TOOL -p $p --all ls > $TDIR/after.ls.out 2> /dev/null + CHECK=$(cat $TDIR/after.ls.out | wc -l) + if [ "$TOTAL" -ne "$CHECK" ]; + then + die "Expected $TOTAL objects but saw $CHECK" + fi + if ! diff $TDIR/before.ls.out $TDIR/after.ls.out + then + die "Different objects found after cleanup" + fi + + set +e + run_expect_fail $RADOS_TOOL -p $p cleanup --prefix illegal_prefix + run_expect_succ $RADOS_TOOL -p $p cleanup --prefix benchmark_data_otherhost + set -e + + $CEPH_TOOL osd pool rm $p $p --yes-i-really-really-mean-it +} + +function test_append() +{ + cleanup + + # create object + touch ./rados_append_null + $RADOS_TOOL -p $POOL append $OBJ ./rados_append_null + $RADOS_TOOL -p $POOL get $OBJ ./rados_append_0_out + cmp ./rados_append_null ./rados_append_0_out + + # append 4k, total size 4k + dd if=/dev/zero of=./rados_append_4k bs=4k count=1 + $RADOS_TOOL -p $POOL append $OBJ ./rados_append_4k + $RADOS_TOOL -p $POOL get $OBJ ./rados_append_4k_out + cmp ./rados_append_4k ./rados_append_4k_out + + # append 4k, total size 8k + $RADOS_TOOL -p $POOL append $OBJ ./rados_append_4k + $RADOS_TOOL -p $POOL get $OBJ ./rados_append_4k_out + read_size=`ls -l ./rados_append_4k_out | awk -F ' ' '{print $5}'` + if [ 8192 -ne $read_size ]; + then + die "Append failed expecting 8192 read $read_size" + fi + + # append 10M, total size 10493952 + dd if=/dev/zero of=./rados_append_10m bs=10M count=1 + $RADOS_TOOL -p $POOL append $OBJ ./rados_append_10m + $RADOS_TOOL -p $POOL get $OBJ ./rados_append_10m_out + read_size=`ls -l ./rados_append_10m_out | awk -F ' ' '{print $5}'` + if [ 10493952 -ne $read_size ]; + then + die "Append failed expecting 10493952 read $read_size" + fi + + # cleanup + cleanup + + # create object + $RADOS_TOOL -p $POOL_EC append $OBJ ./rados_append_null + $RADOS_TOOL -p $POOL_EC get $OBJ ./rados_append_0_out + cmp rados_append_null rados_append_0_out + + # append 4k, total size 4k + $RADOS_TOOL -p $POOL_EC append $OBJ ./rados_append_4k + $RADOS_TOOL -p $POOL_EC get $OBJ ./rados_append_4k_out + cmp rados_append_4k rados_append_4k_out + + # append 4k, total size 8k + $RADOS_TOOL -p $POOL_EC append $OBJ ./rados_append_4k + $RADOS_TOOL -p $POOL_EC get $OBJ ./rados_append_4k_out + read_size=`ls -l ./rados_append_4k_out | awk -F ' ' '{print $5}'` + if [ 8192 -ne $read_size ]; + then + die "Append failed expecting 8192 read $read_size" + fi + + # append 10M, total size 10493952 + $RADOS_TOOL -p $POOL_EC append $OBJ ./rados_append_10m + $RADOS_TOOL -p $POOL_EC get $OBJ ./rados_append_10m_out + read_size=`ls -l ./rados_append_10m_out | awk -F ' ' '{print $5}'` + if [ 10493952 -ne $read_size ]; + then + die "Append failed expecting 10493952 read $read_size" + fi + + cleanup + rm -rf ./rados_append_null ./rados_append_0_out + rm -rf ./rados_append_4k ./rados_append_4k_out ./rados_append_10m ./rados_append_10m_out +} + +function test_put() +{ + # rados put test: + cleanup + + # create file in local fs + dd if=/dev/urandom of=rados_object_10k bs=1K count=10 + + # test put command + $RADOS_TOOL -p $POOL put $OBJ ./rados_object_10k + $RADOS_TOOL -p $POOL get $OBJ ./rados_object_10k_out + cmp ./rados_object_10k ./rados_object_10k_out + cleanup + + # test put command with offset 0 + $RADOS_TOOL -p $POOL put $OBJ ./rados_object_10k --offset 0 + $RADOS_TOOL -p $POOL get $OBJ ./rados_object_offset_0_out + cmp ./rados_object_10k ./rados_object_offset_0_out + cleanup + + # test put command with offset 1000 + $RADOS_TOOL -p $POOL put $OBJ ./rados_object_10k --offset 1000 + $RADOS_TOOL -p $POOL get $OBJ ./rados_object_offset_1000_out + cmp ./rados_object_10k ./rados_object_offset_1000_out 0 1000 + cleanup + + rm -rf ./rados_object_10k ./rados_object_10k_out ./rados_object_offset_0_out ./rados_object_offset_1000_out +} + +function test_stat() +{ + bluestore=$("$CEPH_TOOL" osd metadata | grep '"osd_objectstore": "bluestore"' | cut -f1) + # create file in local fs + dd if=/dev/urandom of=rados_object_128k bs=64K count=2 + + # rados df test (replicated_pool): + $RADOS_TOOL purge $POOL --yes-i-really-really-mean-it + $CEPH_TOOL osd pool rm $POOL $POOL --yes-i-really-really-mean-it + $CEPH_TOOL osd pool create $POOL 8 + $CEPH_TOOL osd pool set $POOL size 3 + + # put object with 1 MB gap in front + $RADOS_TOOL -p $POOL put $OBJ ./rados_object_128k --offset=1048576 + MATCH_CNT=0 + if [ "" == "$bluestore" ]; + then + STORED=1.1 + STORED_UNIT="MiB" + else + STORED=384 + STORED_UNIT="KiB" + fi + for i in {1..60} + do + IN=$($RADOS_TOOL -p $POOL df | grep $POOL ; [[ ! -z $? ]] && echo "") + [[ -z $IN ]] && sleep 1 && continue + IFS=' ' read -ra VALS <<< "$IN" + + # verification is a bit tricky due to stats report's eventual model + # VALS[1] - STORED + # VALS[2] - STORED units + # VALS[3] - OBJECTS + # VALS[5] - COPIES + # VALS[12] - WR_OPS + # VALS[13] - WR + # VALS[14] - WR uints + # implies replication factor 3 + if [ ${VALS[1]} == $STORED ] && [ ${VALS[2]} == $STORED_UNIT ] && [ ${VALS[3]} == "1" ] && [ ${VALS[5]} == "3" ] && [ ${VALS[12]} == "1" ] && [ ${VALS[13]} == 128 ] && [ ${VALS[14]} == "KiB" ] + then + # enforce multiple match to make sure stats aren't changing any more + MATCH_CNT=$((MATCH_CNT+1)) + [[ $MATCH_CNT == 3 ]] && break + sleep 1 + continue + fi + MATCH_CNT=0 + sleep 1 + continue + done + [[ -z $IN ]] && die "Failed to retrieve any pool stats within 60 seconds" + if [ ${VALS[1]} != $STORED ] || [ ${VALS[2]} != $STORED_UNIT ] || [ ${VALS[3]} != "1" ] || [ ${VALS[5]} != "3" ] || [ ${VALS[12]} != "1" ] || [ ${VALS[13]} != 128 ] || [ ${VALS[14]} != "KiB" ] + then + die "Failed to retrieve proper pool stats within 60 seconds" + fi + + # overwrite data at 1MB offset + $RADOS_TOOL -p $POOL put $OBJ ./rados_object_128k --offset=1048576 + MATCH_CNT=0 + if [ "" == "$bluestore" ]; + then + STORED=1.1 + STORED_UNIT="MiB" + else + STORED=384 + STORED_UNIT="KiB" + fi + for i in {1..60} + do + IN=$($RADOS_TOOL -p $POOL df | grep $POOL ; [[ ! -z $? ]] && echo "") + IFS=' ' read -ra VALS <<< "$IN" + + # verification is a bit tricky due to stats report's eventual model + # VALS[1] - STORED + # VALS[2] - STORED units + # VALS[3] - OBJECTS + # VALS[5] - COPIES + # VALS[12] - WR_OPS + # VALS[13] - WR + # VALS[14] - WR uints + # implies replication factor 3 + if [ ${VALS[1]} == $STORED ] && [ ${VALS[2]} == $STORED_UNIT ] && [ ${VALS[3]} == "1" ] && [ ${VALS[5]} == "3" ] && [ ${VALS[12]} == "2" ] && [ ${VALS[13]} == 256 ] && [ ${VALS[14]} == "KiB" ] + then + # enforce multiple match to make sure stats aren't changing any more + MATCH_CNT=$((MATCH_CNT+1)) + [[ $MATCH_CNT == 3 ]] && break + sleep 1 + continue + fi + MATCH_CNT=0 + sleep 1 + continue + done + if [ ${VALS[1]} != $STORED ] || [ ${VALS[2]} != $STORED_UNIT ] || [ ${VALS[3]} != "1" ] || [ ${VALS[5]} != "3" ] || [ ${VALS[12]} != "2" ] || [ ${VALS[13]} != 256 ] || [ ${VALS[14]} != "KiB" ] + then + die "Failed to retrieve proper pool stats within 60 seconds" + fi + + # write data at 64K offset + $RADOS_TOOL -p $POOL put $OBJ ./rados_object_128k --offset=65536 + MATCH_CNT=0 + if [ "" == "$bluestore" ]; + then + STORED=1.1 + STORED_UNIT="MiB" + else + STORED=768 + STORED_UNIT="KiB" + fi + for i in {1..60} + do + IN=$($RADOS_TOOL -p $POOL df | grep $POOL ; [[ ! -z $? ]] && echo "") + IFS=' ' read -ra VALS <<< "$IN" + + # verification is a bit tricky due to stats report's eventual model + # VALS[1] - STORED + # VALS[2] - STORED units + # VALS[3] - OBJECTS + # VALS[5] - COPIES + # VALS[12] - WR_OPS + # VALS[13] - WR + # VALS[14] - WR uints + # implies replication factor 3 + if [ ${VALS[1]} == $STORED ] && [ ${VALS[2]} == $STORED_UNIT ] && [ ${VALS[3]} == "1" ] && [ ${VALS[5]} == "3" ] && [ ${VALS[12]} == "3" ] && [ ${VALS[13]} == 384 ] && [ ${VALS[14]} == "KiB" ] + then + # enforce multiple match to make sure stats aren't changing any more + MATCH_CNT=$((MATCH_CNT+1)) + [[ $MATCH_CNT == 3 ]] && break + sleep 1 + continue + fi + MATCH_CNT=0 + sleep 1 + continue + done + if [ ${VALS[1]} != $STORED ] || [ ${VALS[2]} != $STORED_UNIT ] || [ ${VALS[3]} != "1" ] || [ ${VALS[5]} != "3" ] || [ ${VALS[12]} != "3" ] || [ ${VALS[13]} != 384 ] || [ ${VALS[14]} != "KiB" ] + then + die "Failed to retrieve proper pool stats within 60 seconds" + fi + + # overwrite object totally + $RADOS_TOOL -p $POOL put $OBJ ./rados_object_128k + MATCH_CNT=0 + if [ "" == "$bluestore" ]; + then + STORED=128 + STORED_UNIT="KiB" + else + STORED=384 + STORED_UNIT="KiB" + fi + for i in {1..60} + do + IN=$($RADOS_TOOL -p $POOL df | grep $POOL ; [[ ! -z $? ]] && echo "") + IFS=' ' read -ra VALS <<< "$IN" + + # verification is a bit tricky due to stats report's eventual model + # VALS[1] - STORED + # VALS[2] - STORED units + # VALS[3] - OBJECTS + # VALS[5] - COPIES + # VALS[12] - WR_OPS + # VALS[13] - WR + # VALS[14] - WR uints + # implies replication factor 3 + if [ ${VALS[1]} == $STORED ] && [ ${VALS[2]} == $STORED_UNIT ] && [ ${VALS[3]} == "1" ] && [ ${VALS[5]} == "3" ] && [ ${VALS[12]} == "4" ] && [ ${VALS[13]} == 512 ] && [ ${VALS[14]} == "KiB" ] + then + # enforce multiple match to make sure stats aren't changing any more + MATCH_CNT=$((MATCH_CNT+1)) + [[ $MATCH_CNT == 3 ]] && break + sleep 1 + continue + fi + MATCH_CNT=0 + sleep 1 + continue + done + if [ ${VALS[1]} != $STORED ] || [ ${VALS[2]} != $STORED_UNIT ] || [ ${VALS[3]} != "1" ] || [ ${VALS[5]} != "3" ] || [ ${VALS[12]} != "4" ] || [ ${VALS[13]} != 512 ] || [ ${VALS[14]} != "KiB" ] + then + die "Failed to retrieve proper pool stats within 60 seconds" + fi + + cleanup + + # after cleanup? + MATCH_CNT=0 + for i in {1..60} + do + IN=$($RADOS_TOOL -p $POOL df | grep $POOL ; [[ ! -z $? ]] && echo "") + IFS=' ' read -ra VALS <<< "$IN" + + # verification is a bit tricky due to stats report's eventual model + # VALS[1] - STORED + # VALS[2] - STORED units + # VALS[3] - OBJECTS + # VALS[5] - COPIES + # VALS[12] - WR_OPS + # VALS[13] - WR + # VALS[14] - WR uints + # implies replication factor 3 + if [ ${VALS[1]} == 0 ] && [ ${VALS[2]} == "B" ] && [ ${VALS[3]} == "0" ] && [ ${VALS[5]} == "0" ] && [ ${VALS[12]} == "5" ] && [ ${VALS[13]} == 512 ] && [ ${VALS[14]} == "KiB" ] + then + # enforce multiple match to make sure stats aren't changing any more + MATCH_CNT=$((MATCH_CNT+1)) + [[ $MATCH_CNT == 3 ]] && break + sleep 1 + continue + fi + MATCH_CNT=0 + sleep 1 + continue + done + if [ ${VALS[1]} != 0 ] || [ ${VALS[2]} != "B" ] || [ ${VALS[3]} != "0" ] || [ ${VALS[5]} != "0" ] || [ ${VALS[12]} != "5" ] || [ ${VALS[13]} != 512 ] || [ ${VALS[14]} != "KiB" ] + then + die "Failed to retrieve proper pool stats within 60 seconds" + fi + + ############ rados df test (EC pool): ############## + $RADOS_TOOL purge $POOL_EC --yes-i-really-really-mean-it + $CEPH_TOOL osd pool rm $POOL_EC $POOL_EC --yes-i-really-really-mean-it + $CEPH_TOOL osd erasure-code-profile set myprofile k=2 m=1 stripe_unit=2K crush-failure-domain=osd --force + $CEPH_TOOL osd pool create $POOL_EC 8 8 erasure + + # put object + $RADOS_TOOL -p $POOL_EC put $OBJ ./rados_object_128k + MATCH_CNT=0 + if [ "" == "$bluestore" ]; + then + STORED=128 + STORED_UNIT="KiB" + else + STORED=192 + STORED_UNIT="KiB" + fi + for i in {1..60} + do + IN=$($RADOS_TOOL -p $POOL_EC df | grep $POOL_EC ; [[ ! -z $? ]] && echo "") + [[ -z $IN ]] && sleep 1 && continue + IFS=' ' read -ra VALS <<< "$IN" + + # verification is a bit tricky due to stats report's eventual model + # VALS[1] - STORED + # VALS[2] - STORED units + # VALS[3] - OBJECTS + # VALS[5] - COPIES + # VALS[12] - WR_OPS + # VALS[13] - WR + # VALS[14] - WR uints + # implies replication factor 2+1 + if [ ${VALS[1]} == $STORED ] && [ ${VALS[2]} == $STORED_UNIT ] && [ ${VALS[3]} == "1" ] && [ ${VALS[5]} == "3" ] && [ ${VALS[12]} == "1" ] && [ ${VALS[13]} == 128 ] && [ ${VALS[14]} == "KiB" ] + then + # enforce multiple match to make sure stats aren't changing any more + MATCH_CNT=$((MATCH_CNT+1)) + [[ $MATCH_CNT == 3 ]] && break + sleep 1 + continue + fi + MATCH_CNT=0 + sleep 1 + continue + done + [[ -z $IN ]] && die "Failed to retrieve any pool stats within 60 seconds" + if [ ${VALS[1]} != $STORED ] || [ ${VALS[2]} != $STORED_UNIT ] || [ ${VALS[3]} != "1" ] || [ ${VALS[5]} != "3" ] || [ ${VALS[12]} != "1" ] || [ ${VALS[13]} != 128 ] || [ ${VALS[14]} != "KiB" ] + then + die "Failed to retrieve proper pool stats within 60 seconds" + fi + + # overwrite object + $RADOS_TOOL -p $POOL_EC put $OBJ ./rados_object_128k + MATCH_CNT=0 + if [ "" == "$bluestore" ]; + then + STORED=128 + STORED_UNIT="KiB" + else + STORED=192 + STORED_UNIT="KiB" + fi + for i in {1..60} + do + IN=$($RADOS_TOOL -p $POOL_EC df | grep $POOL_EC ; [[ ! -z $? ]] && echo "") + IFS=' ' read -ra VALS <<< "$IN" + + # verification is a bit tricky due to stats report's eventual model + # VALS[1] - STORED + # VALS[2] - STORED units + # VALS[3] - OBJECTS + # VALS[5] - COPIES + # VALS[12] - WR_OPS + # VALS[13] - WR + # VALS[14] - WR uints + # implies replication factor 2+1 + if [ ${VALS[1]} == $STORED ] && [ ${VALS[2]} == $STORED_UNIT ] && [ ${VALS[3]} == "1" ] && [ ${VALS[5]} == "3" ] && [ ${VALS[12]} == "2" ] && [ ${VALS[13]} == 256 ] && [ ${VALS[14]} == "KiB" ] + then + # enforce multiple match to make sure stats aren't changing any more + MATCH_CNT=$((MATCH_CNT+1)) + [[ $MATCH_CNT == 3 ]] && break + sleep 1 + continue + fi + MATCH_CNT=0 + sleep 1 + continue + done + if [ ${VALS[1]} != $STORED ] || [ ${VALS[2]} != $STORED_UNIT ] || [ ${VALS[3]} != "1" ] || [ ${VALS[5]} != "3" ] || [ ${VALS[12]} != "2" ] || [ ${VALS[13]} != 256 ] || [ ${VALS[14]} != "KiB" ] + then + die "Failed to retrieve proper pool stats within 60 seconds" + fi + + cleanup + + # after cleanup? + MATCH_CNT=0 + for i in {1..60} + do + IN=$($RADOS_TOOL -p $POOL_EC df | grep $POOL_EC ; [[ ! -z $? ]] && echo "") + IFS=' ' read -ra VALS <<< "$IN" + + # verification is a bit tricky due to stats report's eventual model + # VALS[1] - STORED + # VALS[2] - STORED units + # VALS[3] - OBJECTS + # VALS[5] - COPIES + # VALS[12] - WR_OPS + # VALS[13] - WR + # VALS[14] - WR uints + # implies replication factor 2+1 + if [ ${VALS[1]} == 0 ] && [ ${VALS[2]} == "B" ] && [ ${VALS[3]} == "0" ] && [ ${VALS[5]} == "0" ] && [ ${VALS[12]} == "3" ] && [ ${VALS[13]} == 256 ] && [ ${VALS[14]} == "KiB" ] + then + # enforce multiple match to make sure stats aren't changing any more + MATCH_CNT=$((MATCH_CNT+1)) + [[ $MATCH_CNT == 3 ]] && break + sleep 1 + continue + fi + MATCH_CNT=0 + sleep 1 + continue + done + if [ ${VALS[1]} != 0 ] || [ ${VALS[2]} != "B" ] || [ ${VALS[3]} != "0" ] || [ ${VALS[5]} != "0" ] || [ ${VALS[12]} != "3" ] || [ ${VALS[13]} != 256 ] || [ ${VALS[14]} != "KiB" ] + then + die "Failed to retrieve proper pool stats within 60 seconds" + fi + + rm -rf ./rados_object_128k +} + +test_xattr +test_omap +test_rmobj +test_ls +test_cleanup +test_append +test_put +test_stat + +# clean up environment, delete pool +$CEPH_TOOL osd pool delete $POOL $POOL --yes-i-really-really-mean-it +$CEPH_TOOL osd pool delete $POOL_EC $POOL_EC --yes-i-really-really-mean-it +$CEPH_TOOL osd pool delete $POOL_CP_TARGET $POOL_CP_TARGET --yes-i-really-really-mean-it + +echo "SUCCESS!" +exit 0 diff --git a/qa/workunits/rbd/cli_generic.sh b/qa/workunits/rbd/cli_generic.sh new file mode 100755 index 00000000..7f44d932 --- /dev/null +++ b/qa/workunits/rbd/cli_generic.sh @@ -0,0 +1,933 @@ +#!/usr/bin/env bash +set -ex + +. $(dirname $0)/../../standalone/ceph-helpers.sh + +export RBD_FORCE_ALLOW_V1=1 + +# make sure rbd pool is EMPTY.. this is a test script!! +rbd ls | wc -l | grep -v '^0$' && echo "nonempty rbd pool, aborting! run this script on an empty test cluster only." && exit 1 + +IMGS="testimg1 testimg2 testimg3 testimg4 testimg5 testimg6 testimg-diff1 testimg-diff2 testimg-diff3 foo foo2 bar bar2 test1 test2 test3 test4 clone2" + +expect_fail() { + "$@" && return 1 || return 0 +} + +tiered=0 +if ceph osd dump | grep ^pool | grep "'rbd'" | grep tier; then + tiered=1 +fi + +remove_images() { + for img in $IMGS + do + (rbd snap purge $img || true) >/dev/null 2>&1 + (rbd rm $img || true) >/dev/null 2>&1 + done +} + +test_others() { + echo "testing import, export, resize, and snapshots..." + TMP_FILES="/tmp/img1 /tmp/img1.new /tmp/img2 /tmp/img2.new /tmp/img3 /tmp/img3.new /tmp/img-diff1.new /tmp/img-diff2.new /tmp/img-diff3.new /tmp/img1.snap1 /tmp/img1.snap1 /tmp/img-diff1.snap1" + + remove_images + rm -f $TMP_FILES + + # create an image + dd if=/bin/sh of=/tmp/img1 bs=1k count=1 seek=10 + dd if=/bin/dd of=/tmp/img1 bs=1k count=10 seek=100 + dd if=/bin/rm of=/tmp/img1 bs=1k count=100 seek=1000 + dd if=/bin/ls of=/tmp/img1 bs=1k seek=10000 + dd if=/bin/ln of=/tmp/img1 bs=1k seek=100000 + + # import, snapshot + rbd import $RBD_CREATE_ARGS /tmp/img1 testimg1 + rbd resize testimg1 --size=256 --allow-shrink + rbd export testimg1 /tmp/img2 + rbd snap create testimg1 --snap=snap1 + rbd resize testimg1 --size=128 && exit 1 || true # shrink should fail + rbd resize testimg1 --size=128 --allow-shrink + rbd export testimg1 /tmp/img3 + + # info + rbd info testimg1 | grep 'size 128 MiB' + rbd info --snap=snap1 testimg1 | grep 'size 256 MiB' + + # export-diff + rm -rf /tmp/diff-testimg1-1 /tmp/diff-testimg1-2 + rbd export-diff testimg1 --snap=snap1 /tmp/diff-testimg1-1 + rbd export-diff testimg1 --from-snap=snap1 /tmp/diff-testimg1-2 + + # import-diff + rbd create $RBD_CREATE_ARGS --size=1 testimg-diff1 + rbd import-diff --sparse-size 8K /tmp/diff-testimg1-1 testimg-diff1 + rbd import-diff --sparse-size 8K /tmp/diff-testimg1-2 testimg-diff1 + + # info + rbd info testimg1 | grep 'size 128 MiB' + rbd info --snap=snap1 testimg1 | grep 'size 256 MiB' + rbd info testimg-diff1 | grep 'size 128 MiB' + rbd info --snap=snap1 testimg-diff1 | grep 'size 256 MiB' + + # make copies + rbd copy testimg1 --snap=snap1 testimg2 + rbd copy testimg1 testimg3 + rbd copy testimg-diff1 --sparse-size 768K --snap=snap1 testimg-diff2 + rbd copy testimg-diff1 --sparse-size 768K testimg-diff3 + + # verify the result + rbd info testimg2 | grep 'size 256 MiB' + rbd info testimg3 | grep 'size 128 MiB' + rbd info testimg-diff2 | grep 'size 256 MiB' + rbd info testimg-diff3 | grep 'size 128 MiB' + + # deep copies + rbd deep copy testimg1 testimg4 + rbd deep copy testimg1 --snap=snap1 testimg5 + rbd info testimg4 | grep 'size 128 MiB' + rbd info testimg5 | grep 'size 256 MiB' + rbd snap ls testimg4 | grep -v 'SNAPID' | wc -l | grep 1 + rbd snap ls testimg4 | grep '.*snap1.*' + + rbd export testimg1 /tmp/img1.new + rbd export testimg2 /tmp/img2.new + rbd export testimg3 /tmp/img3.new + rbd export testimg-diff1 /tmp/img-diff1.new + rbd export testimg-diff2 /tmp/img-diff2.new + rbd export testimg-diff3 /tmp/img-diff3.new + + cmp /tmp/img2 /tmp/img2.new + cmp /tmp/img3 /tmp/img3.new + cmp /tmp/img2 /tmp/img-diff2.new + cmp /tmp/img3 /tmp/img-diff3.new + + # rollback + rbd snap rollback --snap=snap1 testimg1 + rbd snap rollback --snap=snap1 testimg-diff1 + rbd info testimg1 | grep 'size 256 MiB' + rbd info testimg-diff1 | grep 'size 256 MiB' + rbd export testimg1 /tmp/img1.snap1 + rbd export testimg-diff1 /tmp/img-diff1.snap1 + cmp /tmp/img2 /tmp/img1.snap1 + cmp /tmp/img2 /tmp/img-diff1.snap1 + + # test create, copy of zero-length images + rbd rm testimg2 + rbd rm testimg3 + rbd create testimg2 -s 0 + rbd cp testimg2 testimg3 + rbd deep cp testimg2 testimg6 + + # remove snapshots + rbd snap rm --snap=snap1 testimg1 + rbd snap rm --snap=snap1 testimg-diff1 + rbd info --snap=snap1 testimg1 2>&1 | grep 'error setting snapshot context: (2) No such file or directory' + rbd info --snap=snap1 testimg-diff1 2>&1 | grep 'error setting snapshot context: (2) No such file or directory' + + # sparsify + rbd sparsify testimg1 + + remove_images + rm -f $TMP_FILES +} + +test_rename() { + echo "testing rename..." + remove_images + + rbd create --image-format 1 -s 1 foo + rbd create --image-format 2 -s 1 bar + rbd rename foo foo2 + rbd rename foo2 bar 2>&1 | grep exists + rbd rename bar bar2 + rbd rename bar2 foo2 2>&1 | grep exists + + ceph osd pool create rbd2 8 + rbd pool init rbd2 + rbd create -p rbd2 -s 1 foo + rbd rename rbd2/foo rbd2/bar + rbd -p rbd2 ls | grep bar + rbd rename rbd2/bar foo + rbd rename --pool rbd2 foo bar + ! rbd rename rbd2/bar --dest-pool rbd foo + rbd rename --pool rbd2 bar --dest-pool rbd2 foo + rbd -p rbd2 ls | grep foo + ceph osd pool rm rbd2 rbd2 --yes-i-really-really-mean-it + + remove_images +} + +test_ls() { + echo "testing ls..." + remove_images + + rbd create --image-format 1 -s 1 test1 + rbd create --image-format 1 -s 1 test2 + rbd ls | grep test1 + rbd ls | grep test2 + rbd ls | wc -l | grep 2 + # look for fields in output of ls -l without worrying about space + rbd ls -l | grep 'test1.*1 MiB.*1' + rbd ls -l | grep 'test2.*1 MiB.*1' + + rbd rm test1 + rbd rm test2 + + rbd create --image-format 2 -s 1 test1 + rbd create --image-format 2 -s 1 test2 + rbd ls | grep test1 + rbd ls | grep test2 + rbd ls | wc -l | grep 2 + rbd ls -l | grep 'test1.*1 MiB.*2' + rbd ls -l | grep 'test2.*1 MiB.*2' + + rbd rm test1 + rbd rm test2 + + rbd create --image-format 2 -s 1 test1 + rbd create --image-format 1 -s 1 test2 + rbd ls | grep test1 + rbd ls | grep test2 + rbd ls | wc -l | grep 2 + rbd ls -l | grep 'test1.*1 MiB.*2' + rbd ls -l | grep 'test2.*1 MiB.*1' + remove_images + + # test that many images can be shown by ls + for i in $(seq -w 00 99); do + rbd create image.$i -s 1 + done + rbd ls | wc -l | grep 100 + rbd ls -l | grep image | wc -l | grep 100 + for i in $(seq -w 00 99); do + rbd rm image.$i + done + + for i in $(seq -w 00 99); do + rbd create image.$i --image-format 2 -s 1 + done + rbd ls | wc -l | grep 100 + rbd ls -l | grep image | wc -l | grep 100 + for i in $(seq -w 00 99); do + rbd rm image.$i + done +} + +test_remove() { + echo "testing remove..." + remove_images + + rbd remove "NOT_EXIST" && exit 1 || true # remove should fail + rbd create --image-format 1 -s 1 test1 + rbd rm test1 + rbd ls | wc -l | grep "^0$" + + rbd create --image-format 2 -s 1 test2 + rbd rm test2 + rbd ls | wc -l | grep "^0$" + + # check that remove succeeds even if it's + # interrupted partway through. simulate this + # by removing some objects manually. + + # remove with header missing (old format) + rbd create --image-format 1 -s 1 test1 + rados rm -p rbd test1.rbd + rbd rm test1 + rbd ls | wc -l | grep "^0$" + + if [ $tiered -eq 0 ]; then + # remove with header missing + rbd create --image-format 2 -s 1 test2 + HEADER=$(rados -p rbd ls | grep '^rbd_header') + rados -p rbd rm $HEADER + rbd rm test2 + rbd ls | wc -l | grep "^0$" + + # remove with id missing + rbd create --image-format 2 -s 1 test2 + rados -p rbd rm rbd_id.test2 + rbd rm test2 + rbd ls | wc -l | grep "^0$" + + # remove with header and id missing + rbd create --image-format 2 -s 1 test2 + HEADER=$(rados -p rbd ls | grep '^rbd_header') + rados -p rbd rm $HEADER + rados -p rbd rm rbd_id.test2 + rbd rm test2 + rbd ls | wc -l | grep "^0$" + fi + + # remove with rbd_children object missing (and, by extension, + # with child not mentioned in rbd_children) + rbd create --image-format 2 -s 1 test2 + rbd snap create test2@snap + rbd snap protect test2@snap + rbd clone test2@snap clone --rbd-default-clone-format 1 + + rados -p rbd rm rbd_children + rbd rm clone + rbd ls | grep clone | wc -l | grep '^0$' + + rbd snap unprotect test2@snap + rbd snap rm test2@snap + rbd rm test2 +} + +test_locking() { + echo "testing locking..." + remove_images + + rbd create $RBD_CREATE_ARGS -s 1 test1 + rbd lock list test1 | wc -l | grep '^0$' + rbd lock add test1 id + rbd lock list test1 | grep ' 1 ' + LOCKER=$(rbd lock list test1 | tail -n 1 | awk '{print $1;}') + rbd lock remove test1 id $LOCKER + rbd lock list test1 | wc -l | grep '^0$' + + rbd lock add test1 id --shared tag + rbd lock list test1 | grep ' 1 ' + rbd lock add test1 id --shared tag + rbd lock list test1 | grep ' 2 ' + rbd lock add test1 id2 --shared tag + rbd lock list test1 | grep ' 3 ' + rbd lock list test1 | tail -n 1 | awk '{print $2, $1;}' | xargs rbd lock remove test1 + if rbd info test1 | grep -qE "features:.*exclusive" + then + # new locking functionality requires all locks to be released + while [ -n "$(rbd lock list test1)" ] + do + rbd lock list test1 | tail -n 1 | awk '{print $2, $1;}' | xargs rbd lock remove test1 + done + fi + rbd rm test1 +} + +test_pool_image_args() { + echo "testing pool and image args..." + remove_images + + ceph osd pool delete test test --yes-i-really-really-mean-it || true + ceph osd pool create test 32 + rbd pool init test + truncate -s 1 /tmp/empty /tmp/empty@snap + + rbd ls | wc -l | grep 0 + rbd create -s 1 test1 + rbd ls | grep -q test1 + rbd import --image test2 /tmp/empty + rbd ls | grep -q test2 + rbd --dest test3 import /tmp/empty + rbd ls | grep -q test3 + rbd import /tmp/empty foo + rbd ls | grep -q foo + + # should fail due to "destination snapname specified" + rbd import --dest test/empty@snap /tmp/empty && exit 1 || true + rbd import /tmp/empty test/empty@snap && exit 1 || true + rbd import --image test/empty@snap /tmp/empty && exit 1 || true + rbd import /tmp/empty@snap && exit 1 || true + + rbd ls test | wc -l | grep 0 + rbd import /tmp/empty test/test1 + rbd ls test | grep -q test1 + rbd -p test import /tmp/empty test2 + rbd ls test | grep -q test2 + rbd --image test3 -p test import /tmp/empty + rbd ls test | grep -q test3 + rbd --image test4 -p test import /tmp/empty + rbd ls test | grep -q test4 + rbd --dest test5 -p test import /tmp/empty + rbd ls test | grep -q test5 + rbd --dest test6 --dest-pool test import /tmp/empty + rbd ls test | grep -q test6 + rbd --image test7 --dest-pool test import /tmp/empty + rbd ls test | grep -q test7 + rbd --image test/test8 import /tmp/empty + rbd ls test | grep -q test8 + rbd --dest test/test9 import /tmp/empty + rbd ls test | grep -q test9 + rbd import --pool test /tmp/empty + rbd ls test | grep -q empty + + # copy with no explicit pool goes to pool rbd + rbd copy test/test9 test10 + rbd ls test | grep -qv test10 + rbd ls | grep -q test10 + rbd copy test/test9 test/test10 + rbd ls test | grep -q test10 + rbd copy --pool test test10 --dest-pool test test11 + rbd ls test | grep -q test11 + rbd copy --dest-pool rbd --pool test test11 test12 + rbd ls | grep test12 + rbd ls test | grep -qv test12 + + rm -f /tmp/empty /tmp/empty@snap + ceph osd pool delete test test --yes-i-really-really-mean-it + + for f in foo test1 test10 test12 test2 test3 ; do + rbd rm $f + done +} + +test_clone() { + echo "testing clone..." + remove_images + rbd create test1 $RBD_CREATE_ARGS -s 1 + rbd snap create test1@s1 + rbd snap protect test1@s1 + + ceph osd pool create rbd2 8 + rbd pool init rbd2 + rbd clone test1@s1 rbd2/clone + rbd -p rbd2 ls | grep clone + rbd -p rbd2 ls -l | grep clone | grep test1@s1 + rbd ls | grep -v clone + rbd flatten rbd2/clone + rbd snap create rbd2/clone@s1 + rbd snap protect rbd2/clone@s1 + rbd clone rbd2/clone@s1 clone2 + rbd ls | grep clone2 + rbd ls -l | grep clone2 | grep rbd2/clone@s1 + rbd -p rbd2 ls | grep -v clone2 + + rbd rm clone2 + rbd snap unprotect rbd2/clone@s1 + rbd snap rm rbd2/clone@s1 + rbd rm rbd2/clone + rbd snap unprotect test1@s1 + rbd snap rm test1@s1 + rbd rm test1 + ceph osd pool rm rbd2 rbd2 --yes-i-really-really-mean-it +} + +test_trash() { + echo "testing trash..." + remove_images + + rbd create $RBD_CREATE_ARGS -s 1 test1 + rbd create $RBD_CREATE_ARGS -s 1 test2 + rbd ls | grep test1 + rbd ls | grep test2 + rbd ls | wc -l | grep 2 + rbd ls -l | grep 'test1.*2.*' + rbd ls -l | grep 'test2.*2.*' + + rbd trash mv test1 + rbd ls | grep test2 + rbd ls | wc -l | grep 1 + rbd ls -l | grep 'test2.*2.*' + + rbd trash ls | grep test1 + rbd trash ls | wc -l | grep 1 + rbd trash ls -l | grep 'test1.*USER.*' + rbd trash ls -l | grep -v 'protected until' + + ID=`rbd trash ls | cut -d ' ' -f 1` + rbd trash rm $ID + + rbd trash mv test2 + ID=`rbd trash ls | cut -d ' ' -f 1` + rbd info --image-id $ID | grep "rbd image 'test2'" + + rbd trash restore $ID + rbd ls | grep test2 + rbd ls | wc -l | grep 1 + rbd ls -l | grep 'test2.*2.*' + + rbd trash mv test2 --expires-at "3600 sec" + rbd trash ls | grep test2 + rbd trash ls | wc -l | grep 1 + rbd trash ls -l | grep 'test2.*USER.*protected until' + + rbd trash rm $ID 2>&1 | grep 'Deferment time has not expired' + rbd trash rm --image-id $ID --force + + rbd create $RBD_CREATE_ARGS -s 1 test1 + rbd snap create test1@snap1 + rbd snap protect test1@snap1 + rbd trash mv test1 + + rbd trash ls | grep test1 + rbd trash ls | wc -l | grep 1 + rbd trash ls -l | grep 'test1.*USER.*' + rbd trash ls -l | grep -v 'protected until' + + ID=`rbd trash ls | cut -d ' ' -f 1` + rbd snap ls --image-id $ID | grep -v 'SNAPID' | wc -l | grep 1 + rbd snap ls --image-id $ID | grep '.*snap1.*' + + rbd snap unprotect --image-id $ID --snap snap1 + rbd snap rm --image-id $ID --snap snap1 + rbd snap ls --image-id $ID | grep -v 'SNAPID' | wc -l | grep 0 + + rbd trash restore $ID + rbd snap create test1@snap1 + rbd snap create test1@snap2 + rbd snap ls --image-id $ID | grep -v 'SNAPID' | wc -l | grep 2 + rbd snap purge --image-id $ID + rbd snap ls --image-id $ID | grep -v 'SNAPID' | wc -l | grep 0 + + rbd rm --rbd_move_to_trash_on_remove=true --rbd_move_to_trash_on_remove_expire_seconds=3600 test1 + rbd trash ls | grep test1 + rbd trash ls | wc -l | grep 1 + rbd trash ls -l | grep 'test1.*USER.*protected until' + rbd trash rm $ID 2>&1 | grep 'Deferment time has not expired' + rbd trash rm --image-id $ID --force + + remove_images +} + +test_purge() { + echo "testing trash purge..." + remove_images + + rbd trash purge + rbd trash ls | wc -l | grep 0 + + rbd create $RBD_CREATE_ARGS foo -s 1 + rbd create $RBD_CREATE_ARGS bar -s 1 + + rbd trash mv foo --expires-at "10 sec" + rbd trash mv bar --expires-at "30 sec" + + rbd trash purge --expired-before "now + 10 sec" + rbd trash ls | grep -v foo | wc -l | grep 1 + rbd trash ls | grep bar + + LAST_IMG=$(rbd trash ls | grep bar | awk '{print $1;}') + rbd trash rm $LAST_IMG --force --no-progress | grep -v '.' | wc -l | grep 0 +} + +test_deep_copy_clone() { + echo "testing deep copy clone..." + remove_images + + rbd create testimg1 $RBD_CREATE_ARGS --size 256 + rbd snap create testimg1 --snap=snap1 + rbd snap protect testimg1@snap1 + rbd clone testimg1@snap1 testimg2 + rbd snap create testimg2@snap2 + rbd deep copy testimg2 testimg3 + rbd info testimg3 | grep 'size 256 MiB' + rbd info testimg3 | grep 'parent: rbd/testimg1@snap1' + rbd snap ls testimg3 | grep -v 'SNAPID' | wc -l | grep 1 + rbd snap ls testimg3 | grep '.*snap2.*' + rbd info testimg2 | grep 'features:.*deep-flatten' || rbd snap rm testimg2@snap2 + rbd info testimg3 | grep 'features:.*deep-flatten' || rbd snap rm testimg3@snap2 + rbd flatten testimg2 + rbd flatten testimg3 + rbd snap unprotect testimg1@snap1 + rbd snap purge testimg2 + rbd snap purge testimg3 + rbd rm testimg2 + rbd rm testimg3 + + rbd snap protect testimg1@snap1 + rbd clone testimg1@snap1 testimg2 + rbd snap create testimg2@snap2 + rbd deep copy --flatten testimg2 testimg3 + rbd info testimg3 | grep 'size 256 MiB' + rbd info testimg3 | grep -v 'parent:' + rbd snap ls testimg3 | grep -v 'SNAPID' | wc -l | grep 1 + rbd snap ls testimg3 | grep '.*snap2.*' + rbd info testimg2 | grep 'features:.*deep-flatten' || rbd snap rm testimg2@snap2 + rbd flatten testimg2 + rbd snap unprotect testimg1@snap1 + + remove_images +} + +test_clone_v2() { + echo "testing clone v2..." + remove_images + + rbd create $RBD_CREATE_ARGS -s 1 test1 + rbd snap create test1@1 + rbd clone --rbd-default-clone-format=1 test1@1 test2 && exit 1 || true + rbd clone --rbd-default-clone-format=2 test1@1 test2 + rbd clone --rbd-default-clone-format=2 test1@1 test3 + + rbd snap protect test1@1 + rbd clone --rbd-default-clone-format=1 test1@1 test4 + + rbd children test1@1 | sort | tr '\n' ' ' | grep -E "test2.*test3.*test4" + rbd children --descendants test1 | sort | tr '\n' ' ' | grep -E "test2.*test3.*test4" + + rbd remove test4 + rbd snap unprotect test1@1 + + rbd snap remove test1@1 + rbd snap list --all test1 | grep -E "trash \(1\) *$" + + rbd snap create test1@2 + rbd rm test1 2>&1 | grep 'image has snapshots' + + rbd snap rm test1@2 + rbd rm test1 2>&1 | grep 'linked clones' + + rbd rm test3 + rbd rm test1 2>&1 | grep 'linked clones' + + rbd flatten test2 + rbd snap list --all test1 | wc -l | grep '^0$' + rbd rm test1 + rbd rm test2 +} + +test_thick_provision() { + echo "testing thick provision..." + remove_images + + # Try to create small and large thick-pro image and + # check actual size. (64M and 4G) + + # Small thick-pro image test + rbd create $RBD_CREATE_ARGS --thick-provision -s 64M test1 + count=0 + ret="" + while [ $count -lt 10 ] + do + rbd du|grep test1|tr -s " "|cut -d " " -f 4-5|grep '^64 MiB' && ret=$? + if [ "$ret" = "0" ] + then + break; + fi + count=`expr $count + 1` + sleep 2 + done + rbd du + if [ "$ret" != "0" ] + then + exit 1 + fi + rbd rm test1 + rbd ls | grep test1 | wc -l | grep '^0$' + + # Large thick-pro image test + rbd create $RBD_CREATE_ARGS --thick-provision -s 4G test1 + count=0 + ret="" + while [ $count -lt 10 ] + do + rbd du|grep test1|tr -s " "|cut -d " " -f 4-5|grep '^4 GiB' && ret=$? + if [ "$ret" = "0" ] + then + break; + fi + count=`expr $count + 1` + sleep 2 + done + rbd du + if [ "$ret" != "0" ] + then + exit 1 + fi + rbd rm test1 + rbd ls | grep test1 | wc -l | grep '^0$' +} + +test_namespace() { + echo "testing namespace..." + remove_images + + rbd namespace ls | wc -l | grep '^0$' + rbd namespace create rbd/test1 + rbd namespace create --pool rbd --namespace test2 + rbd namespace create --namespace test3 + expect_fail rbd namespace create rbd/test3 + + rbd namespace list | grep 'test' | wc -l | grep '^3$' + + expect_fail rbd namespace remove --pool rbd missing + + rbd create $RBD_CREATE_ARGS --size 1G rbd/test1/image1 + + # default test1 ns to test2 ns clone + rbd bench --io-type write --io-pattern rand --io-total 32M --io-size 4K rbd/test1/image1 + rbd snap create rbd/test1/image1@1 + rbd clone --rbd-default-clone-format 2 rbd/test1/image1@1 rbd/test2/image1 + rbd snap rm rbd/test1/image1@1 + cmp <(rbd export rbd/test1/image1 -) <(rbd export rbd/test2/image1 -) + rbd rm rbd/test2/image1 + + # default ns to test1 ns clone + rbd create $RBD_CREATE_ARGS --size 1G rbd/image2 + rbd bench --io-type write --io-pattern rand --io-total 32M --io-size 4K rbd/image2 + rbd snap create rbd/image2@1 + rbd clone --rbd-default-clone-format 2 rbd/image2@1 rbd/test2/image2 + rbd snap rm rbd/image2@1 + cmp <(rbd export rbd/image2 -) <(rbd export rbd/test2/image2 -) + expect_fail rbd rm rbd/image2 + rbd rm rbd/test2/image2 + rbd rm rbd/image2 + + # v1 clones are supported within the same namespace + rbd create $RBD_CREATE_ARGS --size 1G rbd/test1/image3 + rbd snap create rbd/test1/image3@1 + rbd snap protect rbd/test1/image3@1 + rbd clone --rbd-default-clone-format 1 rbd/test1/image3@1 rbd/test1/image4 + rbd rm rbd/test1/image4 + rbd snap unprotect rbd/test1/image3@1 + rbd snap rm rbd/test1/image3@1 + rbd rm rbd/test1/image3 + + rbd create $RBD_CREATE_ARGS --size 1G --namespace test1 image2 + expect_fail rbd namespace remove rbd/test1 + + rbd group create rbd/test1/group1 + rbd group image add rbd/test1/group1 rbd/test1/image1 + rbd group rm rbd/test1/group1 + + rbd trash move rbd/test1/image1 + ID=`rbd trash --namespace test1 ls | cut -d ' ' -f 1` + rbd trash rm rbd/test1/${ID} + + rbd remove rbd/test1/image2 + + rbd namespace remove --pool rbd --namespace test1 + rbd namespace remove --namespace test3 + + rbd namespace list | grep 'test' | wc -l | grep '^1$' + rbd namespace remove rbd/test2 +} + +get_migration_state() { + local image=$1 + + rbd --format xml status $image | + $XMLSTARLET sel -t -v '//status/migration/state' +} + +test_migration() { + echo "testing migration..." + remove_images + ceph osd pool create rbd2 8 + rbd pool init rbd2 + + # Convert to new format + rbd create --image-format 1 -s 128M test1 + rbd info test1 | grep 'format: 1' + rbd migration prepare test1 --image-format 2 + test "$(get_migration_state test1)" = prepared + rbd info test1 | grep 'format: 2' + rbd rm test1 && exit 1 || true + rbd migration execute test1 + test "$(get_migration_state test1)" = executed + rbd migration commit test1 + get_migration_state test1 && exit 1 || true + + # Enable layering (and some other features) + rbd info test1 | grep 'features: .*layering' && exit 1 || true + rbd migration prepare test1 --image-feature \ + layering,exclusive-lock,object-map,fast-diff,deep-flatten + rbd info test1 | grep 'features: .*layering' + rbd migration execute test1 + rbd migration commit test1 + + # Migration to other pool + rbd migration prepare test1 rbd2/test1 + test "$(get_migration_state rbd2/test1)" = prepared + rbd ls | wc -l | grep '^0$' + rbd -p rbd2 ls | grep test1 + rbd migration execute test1 + test "$(get_migration_state rbd2/test1)" = executed + rbd rm rbd2/test1 && exit 1 || true + rbd migration commit test1 + + # Migration to other namespace + rbd namespace create rbd2/ns1 + rbd namespace create rbd2/ns2 + rbd migration prepare rbd2/test1 rbd2/ns1/test1 + test "$(get_migration_state rbd2/ns1/test1)" = prepared + rbd migration execute rbd2/test1 + test "$(get_migration_state rbd2/ns1/test1)" = executed + rbd migration commit rbd2/test1 + rbd migration prepare rbd2/ns1/test1 rbd2/ns2/test1 + rbd migration execute rbd2/ns2/test1 + rbd migration commit rbd2/ns2/test1 + + # Enable data pool + rbd create -s 128M test1 + rbd migration prepare test1 --data-pool rbd2 + rbd info test1 | grep 'data_pool: rbd2' + rbd migration execute test1 + rbd migration commit test1 + + # testing trash + rbd migration prepare test1 + expect_fail rbd trash mv test1 + ID=`rbd trash ls -a | cut -d ' ' -f 1` + expect_fail rbd trash rm $ID + expect_fail rbd trash restore $ID + rbd migration abort test1 + + # Migrate parent + rbd remove test1 + dd if=/dev/urandom bs=1M count=1 | rbd --image-format 2 import - test1 + md5sum=$(rbd export test1 - | md5sum) + rbd snap create test1@snap1 + rbd snap protect test1@snap1 + rbd snap create test1@snap2 + rbd clone test1@snap1 clone_v1 --rbd_default_clone_format=1 + rbd clone test1@snap2 clone_v2 --rbd_default_clone_format=2 + rbd info clone_v1 | fgrep 'parent: rbd/test1@snap1' + rbd info clone_v2 | fgrep 'parent: rbd/test1@snap2' + rbd info clone_v2 |grep 'op_features: clone-child' + test "$(rbd export clone_v1 - | md5sum)" = "${md5sum}" + test "$(rbd export clone_v2 - | md5sum)" = "${md5sum}" + test "$(rbd children test1@snap1)" = "rbd/clone_v1" + test "$(rbd children test1@snap2)" = "rbd/clone_v2" + rbd migration prepare test1 rbd2/test2 + rbd info clone_v1 | fgrep 'parent: rbd2/test2@snap1' + rbd info clone_v2 | fgrep 'parent: rbd2/test2@snap2' + rbd info clone_v2 | fgrep 'op_features: clone-child' + test "$(rbd children rbd2/test2@snap1)" = "rbd/clone_v1" + test "$(rbd children rbd2/test2@snap2)" = "rbd/clone_v2" + rbd migration execute test1 + expect_fail rbd migration commit test1 + rbd migration commit test1 --force + test "$(rbd export clone_v1 - | md5sum)" = "${md5sum}" + test "$(rbd export clone_v2 - | md5sum)" = "${md5sum}" + rbd migration prepare rbd2/test2 test1 + rbd info clone_v1 | fgrep 'parent: rbd/test1@snap1' + rbd info clone_v2 | fgrep 'parent: rbd/test1@snap2' + rbd info clone_v2 | fgrep 'op_features: clone-child' + test "$(rbd children test1@snap1)" = "rbd/clone_v1" + test "$(rbd children test1@snap2)" = "rbd/clone_v2" + rbd migration execute test1 + expect_fail rbd migration commit test1 + rbd migration commit test1 --force + test "$(rbd export clone_v1 - | md5sum)" = "${md5sum}" + test "$(rbd export clone_v2 - | md5sum)" = "${md5sum}" + rbd remove clone_v1 + rbd remove clone_v2 + rbd snap unprotect test1@snap1 + rbd snap purge test1 + rbd rm test1 + + for format in 1 2; do + # Abort migration after successful prepare + rbd create -s 128M --image-format ${format} test2 + rbd migration prepare test2 --data-pool rbd2 + rbd bench --io-type write --io-size 1024 --io-total 1024 test2 + rbd migration abort test2 + rbd bench --io-type write --io-size 1024 --io-total 1024 test2 + rbd rm test2 + + # Abort migration after successful execute + rbd create -s 128M --image-format ${format} test2 + rbd migration prepare test2 --data-pool rbd2 + rbd bench --io-type write --io-size 1024 --io-total 1024 test2 + rbd migration execute test2 + rbd migration abort test2 + rbd bench --io-type write --io-size 1024 --io-total 1024 test2 + rbd rm test2 + + # Migration is automatically aborted if prepare failed + rbd create -s 128M --image-format ${format} test2 + rbd migration prepare test2 --data-pool INVALID_DATA_POOL && exit 1 || true + rbd bench --io-type write --io-size 1024 --io-total 1024 test2 + rbd rm test2 + + # Abort migration to other pool + rbd create -s 128M --image-format ${format} test2 + rbd migration prepare test2 rbd2/test2 + rbd bench --io-type write --io-size 1024 --io-total 1024 rbd2/test2 + rbd migration abort test2 + rbd bench --io-type write --io-size 1024 --io-total 1024 test2 + rbd rm test2 + + # The same but abort using destination image + rbd create -s 128M --image-format ${format} test2 + rbd migration prepare test2 rbd2/test2 + rbd migration abort rbd2/test2 + rbd bench --io-type write --io-size 1024 --io-total 1024 test2 + rbd rm test2 + + test $format = 1 && continue + + # Abort migration to other namespace + rbd create -s 128M --image-format ${format} test2 + rbd migration prepare test2 rbd2/ns1/test3 + rbd bench --io-type write --io-size 1024 --io-total 1024 rbd2/ns1/test3 + rbd migration abort test2 + rbd bench --io-type write --io-size 1024 --io-total 1024 test2 + rbd rm test2 + done + + remove_images + ceph osd pool rm rbd2 rbd2 --yes-i-really-really-mean-it +} + +test_config() { + echo "testing config..." + remove_images + + expect_fail rbd config global set osd rbd_cache true + expect_fail rbd config global set global debug_ms 10 + expect_fail rbd config global set global rbd_UNKNOWN false + expect_fail rbd config global set global rbd_cache INVALID + rbd config global set global rbd_cache false + rbd config global set client rbd_cache true + rbd config global set client.123 rbd_cache false + rbd config global get global rbd_cache | grep '^false$' + rbd config global get client rbd_cache | grep '^true$' + rbd config global get client.123 rbd_cache | grep '^false$' + expect_fail rbd config global get client.UNKNOWN rbd_cache + rbd config global list global | grep '^rbd_cache * false * global *$' + rbd config global list client | grep '^rbd_cache * true * client *$' + rbd config global list client.123 | grep '^rbd_cache * false * client.123 *$' + rbd config global list client.UNKNOWN | grep '^rbd_cache * true * client *$' + rbd config global rm client rbd_cache + expect_fail rbd config global get client rbd_cache + rbd config global list client | grep '^rbd_cache * false * global *$' + rbd config global rm client.123 rbd_cache + rbd config global rm global rbd_cache + + rbd config pool set rbd rbd_cache true + rbd config pool list rbd | grep '^rbd_cache * true * pool *$' + rbd config pool get rbd rbd_cache | grep '^true$' + + rbd create $RBD_CREATE_ARGS -s 1 test1 + + rbd config image list rbd/test1 | grep '^rbd_cache * true * pool *$' + rbd config image set rbd/test1 rbd_cache false + rbd config image list rbd/test1 | grep '^rbd_cache * false * image *$' + rbd config image get rbd/test1 rbd_cache | grep '^false$' + rbd config image remove rbd/test1 rbd_cache + expect_fail rbd config image get rbd/test1 rbd_cache + rbd config image list rbd/test1 | grep '^rbd_cache * true * pool *$' + + rbd config pool remove rbd rbd_cache + expect_fail rbd config pool get rbd rbd_cache + rbd config pool list rbd | grep '^rbd_cache * true * config *$' + + rbd rm test1 +} + +test_pool_image_args +test_rename +test_ls +test_remove +test_migration +test_config +RBD_CREATE_ARGS="" +test_others +test_locking +test_thick_provision +RBD_CREATE_ARGS="--image-format 2" +test_others +test_locking +test_clone +test_trash +test_purge +test_deep_copy_clone +test_clone_v2 +test_thick_provision +test_namespace + +echo OK diff --git a/qa/workunits/rbd/concurrent.sh b/qa/workunits/rbd/concurrent.sh new file mode 100755 index 00000000..abaad75f --- /dev/null +++ b/qa/workunits/rbd/concurrent.sh @@ -0,0 +1,375 @@ +#!/usr/bin/env bash + +# Copyright (C) 2013 Inktank Storage, Inc. +# +# This is free software; see the source for copying conditions. +# There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR +# A PARTICULAR PURPOSE. +# +# This is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as +# published by the Free Software Foundation version 2. + +# Alex Elder +# January 29, 2013 + +################################################################ + +# The purpose of this test is to exercise paths through the rbd +# code, making sure no bad pointer references or invalid reference +# count operations occur in the face of concurrent activity. +# +# Each pass of the test creates an rbd image, maps it, and writes +# some data into the image. It also reads some data from all of the +# other images that exist at the time the pass executes. Finally, +# the image is unmapped and removed. The image removal completes in +# the background. +# +# An iteration of the test consists of performing some number of +# passes, initating each pass as a background job, and finally +# sleeping for a variable delay. The delay is initially a specified +# value, but each iteration shortens that proportionally, such that +# the last iteration will not delay at all. +# +# The result exercises concurrent creates and deletes of rbd images, +# writes to new images, reads from both written and unwritten image +# data (including reads concurrent with writes), and attempts to +# unmap images being read. + +# Usage: concurrent [-i ] [-c ] [-d ] +# +# Exit status: +# 0: success +# 1: usage error +# 2: other runtime error +# 99: argument count error (programming error) +# 100: getopt error (internal error) + +################################################################ + +set -ex + +# Default flag values; RBD_CONCURRENT_ITER names are intended +# to be used in yaml scripts to pass in alternate values, e.g.: +# env: +# RBD_CONCURRENT_ITER: 20 +# RBD_CONCURRENT_COUNT: 5 +# RBD_CONCURRENT_DELAY: 3 +ITER_DEFAULT=${RBD_CONCURRENT_ITER:-100} +COUNT_DEFAULT=${RBD_CONCURRENT_COUNT:-5} +DELAY_DEFAULT=${RBD_CONCURRENT_DELAY:-5} # seconds + +CEPH_SECRET_FILE=${CEPH_SECRET_FILE:-} +CEPH_ID=${CEPH_ID:-admin} +SECRET_ARGS="" +if [ "${CEPH_SECRET_FILE}" ]; then + SECRET_ARGS="--secret $CEPH_SECRET_FILE" +fi + +################################################################ + +function setup() { + ID_MAX_DIR=$(mktemp -d /tmp/image_max_id.XXXXX) + ID_COUNT_DIR=$(mktemp -d /tmp/image_ids.XXXXXX) + NAMES_DIR=$(mktemp -d /tmp/image_names.XXXXXX) + SOURCE_DATA=$(mktemp /tmp/source_data.XXXXXX) + + # Use urandom to generate SOURCE_DATA + dd if=/dev/urandom of=${SOURCE_DATA} bs=2048 count=66 \ + >/dev/null 2>&1 + + # List of rbd id's *not* created by this script + export INITIAL_RBD_IDS=$(ls /sys/bus/rbd/devices) + + # Set up some environment for normal teuthology test setup. + # This really should not be necessary but I found it was. + + export CEPH_ARGS=" --name client.0" +} + +function cleanup() { + [ ! "${ID_MAX_DIR}" ] && return + local id + local image + + # Unmap mapped devices + for id in $(rbd_ids); do + image=$(cat "/sys/bus/rbd/devices/${id}/name") + rbd_unmap_image "${id}" + rbd_destroy_image "${image}" + done + # Get any leftover images + for image in $(rbd ls 2>/dev/null); do + rbd_destroy_image "${image}" + done + wait + sync + rm -f "${SOURCE_DATA}" + [ -d "${NAMES_DIR}" ] && rmdir "${NAMES_DIR}" + echo "Max concurrent rbd image count was $(get_max "${ID_COUNT_DIR}")" + rm -rf "${ID_COUNT_DIR}" + echo "Max rbd image id was $(get_max "${ID_MAX_DIR}")" + rm -rf "${ID_MAX_DIR}" +} + +function get_max() { + [ $# -eq 1 ] || exit 99 + local dir="$1" + + ls -U "${dir}" | sort -n | tail -1 +} + +trap cleanup HUP INT QUIT + +# print a usage message and quit +# +# if a message is supplied, print that first, and then exit +# with non-zero status +function usage() { + if [ $# -gt 0 ]; then + echo "" >&2 + echo "$@" >&2 + fi + + echo "" >&2 + echo "Usage: ${PROGNAME} " >&2 + echo "" >&2 + echo " options:" >&2 + echo " -h or --help" >&2 + echo " show this message" >&2 + echo " -i or --iterations" >&2 + echo " iteration count (1 or more)" >&2 + echo " -c or --count" >&2 + echo " images created per iteration (1 or more)" >&2 + echo " -d or --delay" >&2 + echo " maximum delay between iterations" >&2 + echo "" >&2 + echo " defaults:" >&2 + echo " iterations: ${ITER_DEFAULT}" + echo " count: ${COUNT_DEFAULT}" + echo " delay: ${DELAY_DEFAULT} (seconds)" + echo "" >&2 + + [ $# -gt 0 ] && exit 1 + + exit 0 # This is used for a --help +} + +# parse command line arguments +function parseargs() { + ITER="${ITER_DEFAULT}" + COUNT="${COUNT_DEFAULT}" + DELAY="${DELAY_DEFAULT}" + + # Short option flags + SHORT_OPTS="" + SHORT_OPTS="${SHORT_OPTS},h" + SHORT_OPTS="${SHORT_OPTS},i:" + SHORT_OPTS="${SHORT_OPTS},c:" + SHORT_OPTS="${SHORT_OPTS},d:" + + # Short option flags + LONG_OPTS="" + LONG_OPTS="${LONG_OPTS},help" + LONG_OPTS="${LONG_OPTS},iterations:" + LONG_OPTS="${LONG_OPTS},count:" + LONG_OPTS="${LONG_OPTS},delay:" + + TEMP=$(getopt --name "${PROGNAME}" \ + --options "${SHORT_OPTS}" \ + --longoptions "${LONG_OPTS}" \ + -- "$@") + eval set -- "$TEMP" + + while [ "$1" != "--" ]; do + case "$1" in + -h|--help) + usage + ;; + -i|--iterations) + ITER="$2" + [ "${ITER}" -lt 1 ] && + usage "bad iterations value" + shift + ;; + -c|--count) + COUNT="$2" + [ "${COUNT}" -lt 1 ] && + usage "bad count value" + shift + ;; + -d|--delay) + DELAY="$2" + shift + ;; + *) + exit 100 # Internal error + ;; + esac + shift + done + shift +} + +function rbd_ids() { + [ $# -eq 0 ] || exit 99 + local ids + local i + + [ -d /sys/bus/rbd ] || return + ids=" $(echo $(ls /sys/bus/rbd/devices)) " + for i in ${INITIAL_RBD_IDS}; do + ids=${ids/ ${i} / } + done + echo ${ids} +} + +function update_maxes() { + local ids="$@" + local last_id + # These aren't 100% safe against concurrent updates but it + # should be pretty close + count=$(echo ${ids} | wc -w) + touch "${ID_COUNT_DIR}/${count}" + last_id=${ids% } + last_id=${last_id##* } + touch "${ID_MAX_DIR}/${last_id}" +} + +function rbd_create_image() { + [ $# -eq 0 ] || exit 99 + local image=$(basename $(mktemp "${NAMES_DIR}/image.XXXXXX")) + + rbd create "${image}" --size=1024 + echo "${image}" +} + +function rbd_image_id() { + [ $# -eq 1 ] || exit 99 + local image="$1" + + grep -l "${image}" /sys/bus/rbd/devices/*/name 2>/dev/null | + cut -d / -f 6 +} + +function rbd_map_image() { + [ $# -eq 1 ] || exit 99 + local image="$1" + local id + + sudo rbd map "${image}" --user "${CEPH_ID}" ${SECRET_ARGS} \ + > /dev/null 2>&1 + + id=$(rbd_image_id "${image}") + echo "${id}" +} + +function rbd_write_image() { + [ $# -eq 1 ] || exit 99 + local id="$1" + + # Offset and size here are meant to ensure beginning and end + # cross both (4K or 64K) page and (4MB) rbd object boundaries. + # It assumes the SOURCE_DATA file has size 66 * 2048 bytes + dd if="${SOURCE_DATA}" of="/dev/rbd${id}" bs=2048 seek=2015 \ + > /dev/null 2>&1 +} + +# All starting and ending offsets here are selected so they are not +# aligned on a (4 KB or 64 KB) page boundary +function rbd_read_image() { + [ $# -eq 1 ] || exit 99 + local id="$1" + + # First read starting and ending at an offset before any + # written data. The osd zero-fills data read from an + # existing rbd object, but before any previously-written + # data. + dd if="/dev/rbd${id}" of=/dev/null bs=2048 count=34 skip=3 \ + > /dev/null 2>&1 + # Next read starting at an offset before any written data, + # but ending at an offset that includes data that's been + # written. The osd zero-fills unwritten data at the + # beginning of a read. + dd if="/dev/rbd${id}" of=/dev/null bs=2048 count=34 skip=1983 \ + > /dev/null 2>&1 + # Read the data at offset 2015 * 2048 bytes (where it was + # written) and make sure it matches the original data. + cmp --quiet "${SOURCE_DATA}" "/dev/rbd${id}" 0 4126720 || + echo "MISMATCH!!!" + # Now read starting within the pre-written data, but ending + # beyond it. The rbd client zero-fills the unwritten + # portion at the end of a read. + dd if="/dev/rbd${id}" of=/dev/null bs=2048 count=34 skip=2079 \ + > /dev/null 2>&1 + # Now read starting from an unwritten range within a written + # rbd object. The rbd client zero-fills this. + dd if="/dev/rbd${id}" of=/dev/null bs=2048 count=34 skip=2115 \ + > /dev/null 2>&1 + # Finally read from an unwritten region which would reside + # in a different (non-existent) osd object. The osd client + # zero-fills unwritten data when the target object doesn't + # exist. + dd if="/dev/rbd${id}" of=/dev/null bs=2048 count=34 skip=4098 \ + > /dev/null 2>&1 +} + +function rbd_unmap_image() { + [ $# -eq 1 ] || exit 99 + local id="$1" + + sudo rbd unmap "/dev/rbd${id}" +} + +function rbd_destroy_image() { + [ $# -eq 1 ] || exit 99 + local image="$1" + + # Don't wait for it to complete, to increase concurrency + rbd rm "${image}" >/dev/null 2>&1 & + rm -f "${NAMES_DIR}/${image}" +} + +function one_pass() { + [ $# -eq 0 ] || exit 99 + local image + local id + local ids + local i + + image=$(rbd_create_image) + id=$(rbd_map_image "${image}") + ids=$(rbd_ids) + update_maxes "${ids}" + for i in ${rbd_ids}; do + if [ "${i}" -eq "${id}" ]; then + rbd_write_image "${i}" + else + rbd_read_image "${i}" + fi + done + rbd_unmap_image "${id}" + rbd_destroy_image "${image}" +} + +################################################################ + +parseargs "$@" + +setup + +for iter in $(seq 1 "${ITER}"); do + for count in $(seq 1 "${COUNT}"); do + one_pass & + done + # Sleep longer at first, overlap iterations more later. + # Use awk to get sub-second granularity (see sleep(1)). + sleep $(echo "${DELAY}" "${iter}" "${ITER}" | + awk '{ printf("%.2f\n", $1 - $1 * $2 / $3);}') + +done +wait + +cleanup + +exit 0 diff --git a/qa/workunits/rbd/diff.sh b/qa/workunits/rbd/diff.sh new file mode 100755 index 00000000..fbd6e064 --- /dev/null +++ b/qa/workunits/rbd/diff.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash +set -ex + +function cleanup() { + rbd snap purge foo || : + rbd rm foo || : + rbd snap purge foo.copy || : + rbd rm foo.copy || : + rbd snap purge foo.copy2 || : + rbd rm foo.copy2 || : + rm -f foo.diff foo.out +} + +cleanup + +rbd create foo --size 1000 +rbd bench --io-type write foo --io-size 4096 --io-threads 5 --io-total 4096000 --io-pattern rand + +#rbd cp foo foo.copy +rbd create foo.copy --size 1000 +rbd export-diff foo - | rbd import-diff - foo.copy + +rbd snap create foo --snap=two +rbd bench --io-type write foo --io-size 4096 --io-threads 5 --io-total 4096000 --io-pattern rand +rbd snap create foo --snap=three +rbd snap create foo.copy --snap=two + +rbd export-diff foo@two --from-snap three foo.diff && exit 1 || true # wrong snap order +rm -f foo.diff + +rbd export-diff foo@three --from-snap two foo.diff +rbd import-diff foo.diff foo.copy +rbd import-diff foo.diff foo.copy && exit 1 || true # this should fail with EEXIST on the end snap +rbd snap ls foo.copy | grep three + +rbd create foo.copy2 --size 1000 +rbd import-diff foo.diff foo.copy2 && exit 1 || true # this should fail bc the start snap dne + +rbd export foo foo.out +orig=`md5sum foo.out | awk '{print $1}'` +rm foo.out +rbd export foo.copy foo.out +copy=`md5sum foo.out | awk '{print $1}'` + +if [ "$orig" != "$copy" ]; then + echo does not match + exit 1 +fi + +cleanup + +echo OK + diff --git a/qa/workunits/rbd/diff_continuous.sh b/qa/workunits/rbd/diff_continuous.sh new file mode 100755 index 00000000..b8f7e8b7 --- /dev/null +++ b/qa/workunits/rbd/diff_continuous.sh @@ -0,0 +1,60 @@ +#!/usr/bin/env bash +set -ex + +max=20 +size=1500 + +iosize=16384 +iototal=16384000 +iothreads=16 + +parent=`uuidgen`"-parent" +src=`uuidgen`"-src"; +dst=`uuidgen`"-dst"; + +function cleanup() { + rbd snap purge $src || : + rbd rm $src || : + rbd snap purge $dst || : + rbd rm $dst || : + rbd snap unprotect $parent --snap parent || : + rbd snap purge $parent || : + rbd rm $parent || : +} +trap cleanup EXIT + +# start from a clone +rbd create $parent --size $size --image-format 2 --stripe-count 8 --stripe-unit 65536 +rbd bench --io-type write $parent --io-size $iosize --io-threads $iothreads --io-total $iototal --io-pattern rand +rbd snap create $parent --snap parent +rbd snap protect $parent --snap parent +rbd clone $parent@parent $src --stripe-count 4 --stripe-unit 262144 +rbd create $dst --size $size --image-format 2 --order 19 + +# mirror for a while +for s in `seq 1 $max`; do + rbd snap create $src --snap=snap$s + rbd export-diff $src@snap$s - $lastsnap | rbd import-diff - $dst & + rbd bench --io-type write $src --io-size $iosize --io-threads $iothreads --io-total $iototal --io-pattern rand & + wait + lastsnap="--from-snap snap$s" +done + +#trap "" EXIT +#exit 0 + +# validate +for s in `seq 1 $max`; do + ssum=`rbd export $src@snap$s - | md5sum` + dsum=`rbd export $dst@snap$s - | md5sum` + if [ "$ssum" != "$dsum" ]; then + echo different sum at snap$s + exit 1 + fi +done + +cleanup +trap "" EXIT + +echo OK + diff --git a/qa/workunits/rbd/huge-tickets.sh b/qa/workunits/rbd/huge-tickets.sh new file mode 100755 index 00000000..22853c07 --- /dev/null +++ b/qa/workunits/rbd/huge-tickets.sh @@ -0,0 +1,41 @@ +#!/usr/bin/env bash + +# This is a test for http://tracker.ceph.com/issues/8979 and the fallout +# from triaging it. #8979 itself was random crashes on corrupted memory +# due to a buffer overflow (for tickets larger than 256 bytes), further +# inspection showed that vmalloced tickets weren't handled correctly as +# well. +# +# What we are doing here is generating three huge keyrings and feeding +# them to libceph (through 'rbd map' on a scratch image). Bad kernels +# will crash reliably either on corrupted memory somewhere or a bad page +# fault in scatterwalk_pagedone(). + +set -ex + +function generate_keyring() { + local user=$1 + local n=$2 + + ceph-authtool -C -n client.$user --cap mon 'allow *' --gen-key /tmp/keyring-$user + + set +x # don't pollute trace with echos + echo -en "\tcaps osd = \"allow rwx pool=rbd" >>/tmp/keyring-$user + for i in $(seq 1 $n); do + echo -n ", allow rwx pool=pool$i" >>/tmp/keyring-$user + done + echo "\"" >>/tmp/keyring-$user + set -x +} + +generate_keyring foo 1000 # ~25K, kmalloc +generate_keyring bar 20000 # ~500K, vmalloc +generate_keyring baz 300000 # ~8M, vmalloc + sg chaining + +rbd create --size 1 test + +for user in {foo,bar,baz}; do + ceph auth import -i /tmp/keyring-$user + DEV=$(sudo rbd map -n client.$user --keyring /tmp/keyring-$user test) + sudo rbd unmap $DEV +done diff --git a/qa/workunits/rbd/image_read.sh b/qa/workunits/rbd/image_read.sh new file mode 100755 index 00000000..ddca8356 --- /dev/null +++ b/qa/workunits/rbd/image_read.sh @@ -0,0 +1,680 @@ +#!/usr/bin/env bash + +# Copyright (C) 2013 Inktank Storage, Inc. +# +# This is free software; see the source for copying conditions. +# There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR +# A PARTICULAR PURPOSE. +# +# This is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as +# published by the Free Software Foundation version 2. + +# Alex Elder +# April 10, 2013 + +################################################################ + +# The purpose of this test is to validate that data read from a +# mapped rbd image is what it's expected to be. +# +# By default it creates an image and fills it with some data. It +# then reads back the data at a series of offsets known to cover +# various situations (such as reading the beginning, end, or the +# entirety of an object, or doing a read that spans multiple +# objects), and stashes the results in a set of local files. +# +# It also creates and maps a snapshot of the original image after +# it's been filled, and reads back the same ranges of data from the +# snapshot. It then compares the data read back with what was read +# back from the original image, verifying they match. +# +# Clone functionality is tested as well, in which case a clone is +# made of the snapshot, and the same ranges of data are again read +# and compared with the original. In addition, a snapshot of that +# clone is created, and a clone of *that* snapshot is put through +# the same set of tests. (Clone testing can be optionally skipped.) + +################################################################ + +# Default parameter values. Environment variables, if set, will +# supercede these defaults. Such variables have names that begin +# with "IMAGE_READ_", for e.g. use IMAGE_READ_PAGE_SIZE=65536 +# to use 65536 as the page size. +set -e + +DEFAULT_VERBOSE=true +DEFAULT_TEST_CLONES=true +DEFAULT_LOCAL_FILES=false +DEFAULT_FORMAT=2 +DEFAULT_DOUBLE_ORDER=true +DEFAULT_HALF_ORDER=false +DEFAULT_PAGE_SIZE=4096 +DEFAULT_OBJECT_ORDER=22 +MIN_OBJECT_ORDER=12 # technically 9, but the rbd CLI enforces 12 +MAX_OBJECT_ORDER=32 + +RBD_FORCE_ALLOW_V1=1 + +PROGNAME=$(basename $0) + +ORIGINAL=original-$$ +SNAP1=snap1-$$ +CLONE1=clone1-$$ +SNAP2=snap2-$$ +CLONE2=clone2-$$ + +function err() { + if [ $# -gt 0 ]; then + echo "${PROGNAME}: $@" >&2 + fi + exit 2 +} + +function usage() { + if [ $# -gt 0 ]; then + echo "" >&2 + echo "${PROGNAME}: $@" >&2 + fi + echo "" >&2 + echo "Usage: ${PROGNAME} []" >&2 + echo "" >&2 + echo "options are:" >&2 + echo " -o object_order" >&2 + echo " must be ${MIN_OBJECT_ORDER}..${MAX_OBJECT_ORDER}" >&2 + echo " -p page_size (in bytes)" >&2 + echo " note: there must be at least 4 pages per object" >&2 + echo " -1" >&2 + echo " test using format 1 rbd images (default)" >&2 + echo " -2" >&2 + echo " test using format 2 rbd images" >&2 + echo " -c" >&2 + echo " also test rbd clone images (implies format 2)" >&2 + echo " -d" >&2 + echo " clone object order double its parent's (format 2)" >&2 + echo " -h" >&2 + echo " clone object order half of its parent's (format 2)" >&2 + echo " -l" >&2 + echo " use local files rather than rbd images" >&2 + echo " -v" >&2 + echo " disable reporting of what's going on" >&2 + echo "" >&2 + exit 1 +} + +function verbose() { + [ "${VERBOSE}" = true ] && echo "$@" + true # Don't let the verbose test spoil our return value +} + +function quiet() { + "$@" 2> /dev/null +} + +function boolean_toggle() { + [ $# -eq 1 ] || exit 99 + test "$1" = "true" && echo false || echo true +} + +function parseargs() { + local opts="o:p:12clv" + local lopts="order:,page_size:,local,clone,verbose" + local parsed + local clone_order_msg + + # use values from environment if available + VERBOSE="${IMAGE_READ_VERBOSE:-${DEFAULT_VERBOSE}}" + TEST_CLONES="${IMAGE_READ_TEST_CLONES:-${DEFAULT_TEST_CLONES}}" + LOCAL_FILES="${IMAGE_READ_LOCAL_FILES:-${DEFAULT_LOCAL_FILES}}" + DOUBLE_ORDER="${IMAGE_READ_DOUBLE_ORDER:-${DEFAULT_DOUBLE_ORDER}}" + HALF_ORDER="${IMAGE_READ_HALF_ORDER:-${DEFAULT_HALF_ORDER}}" + FORMAT="${IMAGE_READ_FORMAT:-${DEFAULT_FORMAT}}" + PAGE_SIZE="${IMAGE_READ_PAGE_SIZE:-${DEFAULT_PAGE_SIZE}}" + OBJECT_ORDER="${IMAGE_READ_OBJECT_ORDER:-${DEFAULT_OBJECT_ORDER}}" + + parsed=$(getopt -o "${opts}" -l "${lopts}" -n "${PROGNAME}" -- "$@") || + usage + eval set -- "${parsed}" + while true; do + case "$1" in + -v|--verbose) + VERBOSE=$(boolean_toggle "${VERBOSE}");; + -c|--clone) + TEST_CLONES=$(boolean_toggle "${TEST_CLONES}");; + -d|--double) + DOUBLE_ORDER=$(boolean_toggle "${DOUBLE_ORDER}");; + -h|--half) + HALF_ORDER=$(boolean_toggle "${HALF_ORDER}");; + -l|--local) + LOCAL_FILES=$(boolean_toggle "${LOCAL_FILES}");; + -1|-2) + FORMAT="${1:1}";; + -p|--page_size) + PAGE_SIZE="$2"; shift;; + -o|--order) + OBJECT_ORDER="$2"; shift;; + --) + shift; break;; + *) + err "getopt internal error" + esac + shift + done + [ $# -gt 0 ] && usage "excess arguments ($*)" + + if [ "${TEST_CLONES}" = true ]; then + # If we're using different object orders for clones, + # make sure the limits are updated accordingly. If + # both "half" and "double" are specified, just + # ignore them both. + if [ "${DOUBLE_ORDER}" = true ]; then + if [ "${HALF_ORDER}" = true ]; then + DOUBLE_ORDER=false + HALF_ORDER=false + else + ((MAX_OBJECT_ORDER -= 2)) + fi + elif [ "${HALF_ORDER}" = true ]; then + ((MIN_OBJECT_ORDER += 2)) + fi + fi + + [ "${OBJECT_ORDER}" -lt "${MIN_OBJECT_ORDER}" ] && + usage "object order (${OBJECT_ORDER}) must be" \ + "at least ${MIN_OBJECT_ORDER}" + [ "${OBJECT_ORDER}" -gt "${MAX_OBJECT_ORDER}" ] && + usage "object order (${OBJECT_ORDER}) must be" \ + "at most ${MAX_OBJECT_ORDER}" + + if [ "${TEST_CLONES}" = true ]; then + if [ "${DOUBLE_ORDER}" = true ]; then + ((CLONE1_ORDER = OBJECT_ORDER + 1)) + ((CLONE2_ORDER = OBJECT_ORDER + 2)) + clone_order_msg="double" + elif [ "${HALF_ORDER}" = true ]; then + ((CLONE1_ORDER = OBJECT_ORDER - 1)) + ((CLONE2_ORDER = OBJECT_ORDER - 2)) + clone_order_msg="half of" + else + CLONE1_ORDER="${OBJECT_ORDER}" + CLONE2_ORDER="${OBJECT_ORDER}" + clone_order_msg="the same as" + fi + fi + + [ "${TEST_CLONES}" != true ] || FORMAT=2 + + OBJECT_SIZE=$(echo "2 ^ ${OBJECT_ORDER}" | bc) + OBJECT_PAGES=$(echo "${OBJECT_SIZE} / ${PAGE_SIZE}" | bc) + IMAGE_SIZE=$((2 * 16 * OBJECT_SIZE / (1024 * 1024))) + [ "${IMAGE_SIZE}" -lt 1 ] && IMAGE_SIZE=1 + IMAGE_OBJECTS=$((IMAGE_SIZE * (1024 * 1024) / OBJECT_SIZE)) + + [ "${OBJECT_PAGES}" -lt 4 ] && + usage "object size (${OBJECT_SIZE}) must be" \ + "at least 4 * page size (${PAGE_SIZE})" + + echo "parameters for this run:" + echo " format ${FORMAT} images will be tested" + echo " object order is ${OBJECT_ORDER}, so" \ + "objects are ${OBJECT_SIZE} bytes" + echo " page size is ${PAGE_SIZE} bytes, so" \ + "there are are ${OBJECT_PAGES} pages in an object" + echo " derived image size is ${IMAGE_SIZE} MB, so" \ + "there are ${IMAGE_OBJECTS} objects in an image" + if [ "${TEST_CLONES}" = true ]; then + echo " clone functionality will be tested" + echo " object size for a clone will be ${clone_order_msg}" + echo " the object size of its parent image" + fi + + true # Don't let the clones test spoil our return value +} + +function image_dev_path() { + [ $# -eq 1 ] || exit 99 + local image_name="$1" + + if [ "${LOCAL_FILES}" = true ]; then + echo "${TEMP}/${image_name}" + return + fi + + echo "/dev/rbd/rbd/${image_name}" +} + +function out_data_dir() { + [ $# -lt 2 ] || exit 99 + local out_data="${TEMP}/data" + local image_name + + if [ $# -eq 1 ]; then + image_name="$1" + echo "${out_data}/${image_name}" + else + echo "${out_data}" + fi +} + +function setup() { + verbose "===== setting up =====" + TEMP=$(mktemp -d /tmp/rbd_image_read.XXXXX) + mkdir -p $(out_data_dir) + + # create and fill the original image with some data + create_image "${ORIGINAL}" + map_image "${ORIGINAL}" + fill_original + + # create a snapshot of the original + create_image_snap "${ORIGINAL}" "${SNAP1}" + map_image_snap "${ORIGINAL}" "${SNAP1}" + + if [ "${TEST_CLONES}" = true ]; then + # create a clone of the original snapshot + create_snap_clone "${ORIGINAL}" "${SNAP1}" \ + "${CLONE1}" "${CLONE1_ORDER}" + map_image "${CLONE1}" + + # create a snapshot of that clone + create_image_snap "${CLONE1}" "${SNAP2}" + map_image_snap "${CLONE1}" "${SNAP2}" + + # create a clone of that clone's snapshot + create_snap_clone "${CLONE1}" "${SNAP2}" \ + "${CLONE2}" "${CLONE2_ORDER}" + map_image "${CLONE2}" + fi +} + +function teardown() { + verbose "===== cleaning up =====" + if [ "${TEST_CLONES}" = true ]; then + unmap_image "${CLONE2}" || true + destroy_snap_clone "${CLONE1}" "${SNAP2}" "${CLONE2}" || true + + unmap_image_snap "${CLONE1}" "${SNAP2}" || true + destroy_image_snap "${CLONE1}" "${SNAP2}" || true + + unmap_image "${CLONE1}" || true + destroy_snap_clone "${ORIGINAL}" "${SNAP1}" "${CLONE1}" || true + fi + unmap_image_snap "${ORIGINAL}" "${SNAP1}" || true + destroy_image_snap "${ORIGINAL}" "${SNAP1}" || true + unmap_image "${ORIGINAL}" || true + destroy_image "${ORIGINAL}" || true + + rm -rf $(out_data_dir) + rmdir "${TEMP}" +} + +function create_image() { + [ $# -eq 1 ] || exit 99 + local image_name="$1" + local image_path + local bytes + + verbose "creating image \"${image_name}\"" + if [ "${LOCAL_FILES}" = true ]; then + image_path=$(image_dev_path "${image_name}") + bytes=$(echo "${IMAGE_SIZE} * 1024 * 1024 - 1" | bc) + quiet dd if=/dev/zero bs=1 count=1 seek="${bytes}" \ + of="${image_path}" + return + fi + + rbd create "${image_name}" --image-format "${FORMAT}" \ + --size "${IMAGE_SIZE}" --order "${OBJECT_ORDER}" \ + --image-shared +} + +function destroy_image() { + [ $# -eq 1 ] || exit 99 + local image_name="$1" + local image_path + + verbose "destroying image \"${image_name}\"" + if [ "${LOCAL_FILES}" = true ]; then + image_path=$(image_dev_path "${image_name}") + rm -f "${image_path}" + return + fi + + rbd rm "${image_name}" +} + +function map_image() { + [ $# -eq 1 ] || exit 99 + local image_name="$1" # can be image@snap too + + if [ "${LOCAL_FILES}" = true ]; then + return + fi + + sudo rbd map "${image_name}" +} + +function unmap_image() { + [ $# -eq 1 ] || exit 99 + local image_name="$1" # can be image@snap too + local image_path + + if [ "${LOCAL_FILES}" = true ]; then + return + fi + image_path=$(image_dev_path "${image_name}") + + if [ -e "${image_path}" ]; then + sudo rbd unmap "${image_path}" + fi +} + +function map_image_snap() { + [ $# -eq 2 ] || exit 99 + local image_name="$1" + local snap_name="$2" + local image_snap + + if [ "${LOCAL_FILES}" = true ]; then + return + fi + + image_snap="${image_name}@${snap_name}" + map_image "${image_snap}" +} + +function unmap_image_snap() { + [ $# -eq 2 ] || exit 99 + local image_name="$1" + local snap_name="$2" + local image_snap + + if [ "${LOCAL_FILES}" = true ]; then + return + fi + + image_snap="${image_name}@${snap_name}" + unmap_image "${image_snap}" +} + +function create_image_snap() { + [ $# -eq 2 ] || exit 99 + local image_name="$1" + local snap_name="$2" + local image_snap="${image_name}@${snap_name}" + local image_path + local snap_path + + verbose "creating snapshot \"${snap_name}\"" \ + "of image \"${image_name}\"" + if [ "${LOCAL_FILES}" = true ]; then + image_path=$(image_dev_path "${image_name}") + snap_path=$(image_dev_path "${image_snap}") + + cp "${image_path}" "${snap_path}" + return + fi + + rbd snap create "${image_snap}" +} + +function destroy_image_snap() { + [ $# -eq 2 ] || exit 99 + local image_name="$1" + local snap_name="$2" + local image_snap="${image_name}@${snap_name}" + local snap_path + + verbose "destroying snapshot \"${snap_name}\"" \ + "of image \"${image_name}\"" + if [ "${LOCAL_FILES}" = true ]; then + snap_path=$(image_dev_path "${image_snap}") + rm -rf "${snap_path}" + return + fi + + rbd snap rm "${image_snap}" +} + +function create_snap_clone() { + [ $# -eq 4 ] || exit 99 + local image_name="$1" + local snap_name="$2" + local clone_name="$3" + local clone_order="$4" + local image_snap="${image_name}@${snap_name}" + local snap_path + local clone_path + + verbose "creating clone image \"${clone_name}\"" \ + "of image snapshot \"${image_name}@${snap_name}\"" + if [ "${LOCAL_FILES}" = true ]; then + snap_path=$(image_dev_path "${image_name}@${snap_name}") + clone_path=$(image_dev_path "${clone_name}") + + cp "${snap_path}" "${clone_path}" + return + fi + + rbd snap protect "${image_snap}" + rbd clone --order "${clone_order}" --image-shared \ + "${image_snap}" "${clone_name}" +} + +function destroy_snap_clone() { + [ $# -eq 3 ] || exit 99 + local image_name="$1" + local snap_name="$2" + local clone_name="$3" + local image_snap="${image_name}@${snap_name}" + local clone_path + + verbose "destroying clone image \"${clone_name}\"" + if [ "${LOCAL_FILES}" = true ]; then + clone_path=$(image_dev_path "${clone_name}") + + rm -rf "${clone_path}" + return + fi + + rbd rm "${clone_name}" + rbd snap unprotect "${image_snap}" +} + +# function that produces "random" data with which to fill the image +function source_data() { + while quiet dd if=/bin/bash skip=$(($$ % 199)) bs="${PAGE_SIZE}"; do + : # Just do the dd + done +} + +function fill_original() { + local image_path=$(image_dev_path "${ORIGINAL}") + + verbose "filling original image" + # Fill 16 objects worth of "random" data + source_data | + quiet dd bs="${PAGE_SIZE}" count=$((16 * OBJECT_PAGES)) \ + of="${image_path}" +} + +function do_read() { + [ $# -eq 3 -o $# -eq 4 ] || exit 99 + local image_name="$1" + local offset="$2" + local length="$3" + [ "${length}" -gt 0 ] || err "do_read: length must be non-zero" + local image_path=$(image_dev_path "${image_name}") + local out_data=$(out_data_dir "${image_name}") + local range=$(printf "%06u~%04u" "${offset}" "${length}") + local out_file + + [ $# -eq 4 ] && offset=$((offset + 16 * OBJECT_PAGES)) + + verbose "reading \"${image_name}\" pages ${range}" + + out_file="${out_data}/pages_${range}" + + quiet dd bs="${PAGE_SIZE}" skip="${offset}" count="${length}" \ + if="${image_path}" of="${out_file}" +} + +function one_pass() { + [ $# -eq 1 -o $# -eq 2 ] || exit 99 + local image_name="$1" + local extended + [ $# -eq 2 ] && extended="true" + local offset + local length + + offset=0 + + # +-----------+-----------+--- + # |X:X:X...X:X| : : ... : | : + # +-----------+-----------+--- + length="${OBJECT_PAGES}" + do_read "${image_name}" "${offset}" "${length}" ${extended} + offset=$((offset + length)) + + # ---+-----------+--- + # : |X: : ... : | : + # ---+-----------+--- + length=1 + do_read "${image_name}" "${offset}" "${length}" ${extended} + offset=$((offset + length)) + + # ---+-----------+--- + # : | :X: ... : | : + # ---+-----------+--- + length=1 + do_read "${image_name}" "${offset}" "${length}" ${extended} + offset=$((offset + length)) + + # ---+-----------+--- + # : | : :X...X: | : + # ---+-----------+--- + length=$((OBJECT_PAGES - 3)) + do_read "${image_name}" "${offset}" "${length}" ${extended} + offset=$((offset + length)) + + # ---+-----------+--- + # : | : : ... :X| : + # ---+-----------+--- + length=1 + do_read "${image_name}" "${offset}" "${length}" ${extended} + offset=$((offset + length)) + + # ---+-----------+--- + # : |X:X:X...X:X| : + # ---+-----------+--- + length="${OBJECT_PAGES}" + do_read "${image_name}" "${offset}" "${length}" ${extended} + offset=$((offset + length)) + + offset=$((offset + 1)) # skip 1 + + # ---+-----------+--- + # : | :X:X...X:X| : + # ---+-----------+--- + length=$((OBJECT_PAGES - 1)) + do_read "${image_name}" "${offset}" "${length}" ${extended} + offset=$((offset + length)) + + # ---+-----------+-----------+--- + # : |X:X:X...X:X|X: : ... : | : + # ---+-----------+-----------+--- + length=$((OBJECT_PAGES + 1)) + do_read "${image_name}" "${offset}" "${length}" ${extended} + offset=$((offset + length)) + + # ---+-----------+-----------+--- + # : | :X:X...X:X|X: : ... : | : + # ---+-----------+-----------+--- + length="${OBJECT_PAGES}" + do_read "${image_name}" "${offset}" "${length}" ${extended} + offset=$((offset + length)) + + # ---+-----------+-----------+--- + # : | :X:X...X:X|X:X: ... : | : + # ---+-----------+-----------+--- + length=$((OBJECT_PAGES + 1)) + do_read "${image_name}" "${offset}" "${length}" ${extended} + offset=$((offset + length)) + + # ---+-----------+-----------+--- + # : | : :X...X:X|X:X:X...X:X| : + # ---+-----------+-----------+--- + length=$((2 * OBJECT_PAGES + 2)) + do_read "${image_name}" "${offset}" "${length}" ${extended} + offset=$((offset + length)) + + offset=$((offset + 1)) # skip 1 + + # ---+-----------+-----------+----- + # : | :X:X...X:X|X:X:X...X:X|X: : + # ---+-----------+-----------+----- + length=$((2 * OBJECT_PAGES)) + do_read "${image_name}" "${offset}" "${length}" ${extended} + offset=$((offset + length)) + + # --+-----------+-----------+-------- + # : | :X:X...X:X|X:X:X...X:X|X:X: : + # --+-----------+-----------+-------- + length=2049 + length=$((2 * OBJECT_PAGES + 1)) + do_read "${image_name}" "${offset}" "${length}" ${extended} + # offset=$((offset + length)) +} + +function run_using() { + [ $# -eq 1 ] || exit 99 + local image_name="$1" + local out_data=$(out_data_dir "${image_name}") + + verbose "===== running using \"${image_name}\" =====" + mkdir -p "${out_data}" + one_pass "${image_name}" + one_pass "${image_name}" extended +} + +function compare() { + [ $# -eq 1 ] || exit 99 + local image_name="$1" + local out_data=$(out_data_dir "${image_name}") + local original=$(out_data_dir "${ORIGINAL}") + + verbose "===== comparing \"${image_name}\" =====" + for i in $(ls "${original}"); do + verbose compare "\"${image_name}\" \"${i}\"" + cmp "${original}/${i}" "${out_data}/${i}" + done + [ "${image_name}" = "${ORIGINAL}" ] || rm -rf "${out_data}" +} + +function doit() { + [ $# -eq 1 ] || exit 99 + local image_name="$1" + + run_using "${image_name}" + compare "${image_name}" +} + +########## Start + +parseargs "$@" + +trap teardown EXIT HUP INT +setup + +run_using "${ORIGINAL}" +doit "${ORIGINAL}@${SNAP1}" +if [ "${TEST_CLONES}" = true ]; then + doit "${CLONE1}" + doit "${CLONE1}@${SNAP2}" + doit "${CLONE2}" +fi +rm -rf $(out_data_dir "${ORIGINAL}") + +echo "Success!" + +exit 0 diff --git a/qa/workunits/rbd/import_export.sh b/qa/workunits/rbd/import_export.sh new file mode 100755 index 00000000..89e8d35c --- /dev/null +++ b/qa/workunits/rbd/import_export.sh @@ -0,0 +1,259 @@ +#!/bin/sh -ex + +# V1 image unsupported but required for testing purposes +export RBD_FORCE_ALLOW_V1=1 + +# returns data pool for a given image +get_image_data_pool () { + image=$1 + data_pool=$(rbd info $image | grep "data_pool: " | awk -F':' '{ print $NF }') + if [ -z $data_pool ]; then + data_pool='rbd' + fi + + echo $data_pool +} + +# return list of object numbers populated in image +objects () { + image=$1 + prefix=$(rbd info $image | grep block_name_prefix | awk '{print $NF;}') + + # strip off prefix and leading zeros from objects; sort, although + # it doesn't necessarily make sense as they're hex, at least it makes + # the list repeatable and comparable + objects=$(rados ls -p $(get_image_data_pool $image) | grep $prefix | \ + sed -e 's/'$prefix'\.//' -e 's/^0*\([0-9a-f]\)/\1/' | sort -u) + echo $objects +} + +# return false if either files don't compare or their ondisk +# sizes don't compare + +compare_files_and_ondisk_sizes () { + cmp -l $1 $2 || return 1 + origsize=$(stat $1 --format %b) + exportsize=$(stat $2 --format %b) + difference=$(($exportsize - $origsize)) + difference=${difference#-} # absolute value + test $difference -ge 0 -a $difference -lt 4096 +} + +TMPDIR=/tmp/rbd_import_export_$$ +rm -rf $TMPDIR +mkdir $TMPDIR +trap "rm -rf $TMPDIR" INT TERM EXIT + +# cannot import a dir +mkdir foo.$$ +rbd import foo.$$ foo.dir && exit 1 || true # should fail +rmdir foo.$$ + +# create a sparse file +dd if=/bin/sh of=${TMPDIR}/img bs=1k count=1 seek=10 +dd if=/bin/dd of=${TMPDIR}/img bs=1k count=10 seek=100 +dd if=/bin/rm of=${TMPDIR}/img bs=1k count=100 seek=1000 +dd if=/bin/ls of=${TMPDIR}/img bs=1k seek=10000 +dd if=/bin/ln of=${TMPDIR}/img bs=1k seek=100000 +dd if=/bin/grep of=${TMPDIR}/img bs=1k seek=1000000 + +rbd rm testimg || true + +rbd import $RBD_CREATE_ARGS ${TMPDIR}/img testimg +rbd export testimg ${TMPDIR}/img2 +rbd export testimg - > ${TMPDIR}/img3 +rbd rm testimg +cmp ${TMPDIR}/img ${TMPDIR}/img2 +cmp ${TMPDIR}/img ${TMPDIR}/img3 +rm ${TMPDIR}/img2 ${TMPDIR}/img3 + +# try again, importing from stdin +rbd import $RBD_CREATE_ARGS - testimg < ${TMPDIR}/img +rbd export testimg ${TMPDIR}/img2 +rbd export testimg - > ${TMPDIR}/img3 +rbd rm testimg +cmp ${TMPDIR}/img ${TMPDIR}/img2 +cmp ${TMPDIR}/img ${TMPDIR}/img3 + +rm ${TMPDIR}/img ${TMPDIR}/img2 ${TMPDIR}/img3 + +if rbd help export | grep -q export-format; then + # try with --export-format for snapshots + dd if=/bin/dd of=${TMPDIR}/img bs=1k count=10 seek=100 + rbd import $RBD_CREATE_ARGS ${TMPDIR}/img testimg + rbd snap create testimg@snap + rbd image-meta set testimg key1 value1 + IMAGEMETA_BEFORE=`rbd image-meta list testimg` + rbd export --export-format 2 testimg ${TMPDIR}/img_v2 + rbd import --export-format 2 ${TMPDIR}/img_v2 testimg_import + rbd info testimg_import + rbd info testimg_import@snap + IMAGEMETA_AFTER=`rbd image-meta list testimg_import` + [ "$IMAGEMETA_BEFORE" = "$IMAGEMETA_AFTER" ] + + # compare the contents between testimg and testimg_import + rbd export testimg_import ${TMPDIR}/img_import + compare_files_and_ondisk_sizes ${TMPDIR}/img ${TMPDIR}/img_import + + rbd export testimg@snap ${TMPDIR}/img_snap + rbd export testimg_import@snap ${TMPDIR}/img_snap_import + compare_files_and_ondisk_sizes ${TMPDIR}/img_snap ${TMPDIR}/img_snap_import + + rm ${TMPDIR}/img_v2 + rm ${TMPDIR}/img_import + rm ${TMPDIR}/img_snap + rm ${TMPDIR}/img_snap_import + + rbd snap rm testimg_import@snap + rbd remove testimg_import + rbd snap rm testimg@snap + rbd rm testimg + + # order + rbd import --order 20 ${TMPDIR}/img testimg + rbd export --export-format 2 testimg ${TMPDIR}/img_v2 + rbd import --export-format 2 ${TMPDIR}/img_v2 testimg_import + rbd info testimg_import|grep order|awk '{print $2}'|grep 20 + + rm ${TMPDIR}/img_v2 + + rbd remove testimg_import + rbd remove testimg + + # features + rbd import --image-feature layering ${TMPDIR}/img testimg + FEATURES_BEFORE=`rbd info testimg|grep features` + rbd export --export-format 2 testimg ${TMPDIR}/img_v2 + rbd import --export-format 2 ${TMPDIR}/img_v2 testimg_import + FEATURES_AFTER=`rbd info testimg_import|grep features` + if [ "$FEATURES_BEFORE" != "$FEATURES_AFTER" ]; then + false + fi + + rm ${TMPDIR}/img_v2 + + rbd remove testimg_import + rbd remove testimg + + # stripe + rbd import --stripe-count 1000 --stripe-unit 4096 ${TMPDIR}/img testimg + rbd export --export-format 2 testimg ${TMPDIR}/img_v2 + rbd import --export-format 2 ${TMPDIR}/img_v2 testimg_import + rbd info testimg_import|grep "stripe unit"|grep -Ei '(4 KiB|4096)' + rbd info testimg_import|grep "stripe count"|awk '{print $3}'|grep 1000 + + rm ${TMPDIR}/img_v2 + + rbd remove testimg_import + rbd remove testimg + + # snap protect + rbd import --image-format=2 ${TMPDIR}/img testimg + rbd snap create testimg@snap1 + rbd snap create testimg@snap2 + rbd snap protect testimg@snap2 + rbd export --export-format 2 testimg ${TMPDIR}/snap_protect + rbd import --export-format 2 ${TMPDIR}/snap_protect testimg_import + rbd info testimg_import@snap1 | grep 'protected: False' + rbd info testimg_import@snap2 | grep 'protected: True' + + rm ${TMPDIR}/snap_protect + + rbd snap unprotect testimg@snap2 + rbd snap unprotect testimg_import@snap2 + rbd snap purge testimg + rbd snap purge testimg_import + rbd remove testimg + rbd remove testimg_import +fi + +tiered=0 +if ceph osd dump | grep ^pool | grep "'rbd'" | grep tier; then + tiered=1 +fi + +# create specifically sparse files +# 1 1M block of sparse, 1 1M block of random +dd if=/dev/urandom bs=1M seek=1 count=1 of=${TMPDIR}/sparse1 + +# 1 1M block of random, 1 1M block of sparse +dd if=/dev/urandom bs=1M count=1 of=${TMPDIR}/sparse2; truncate ${TMPDIR}/sparse2 -s 2M + +# 1M-block images; validate resulting blocks + +# 1M sparse, 1M data +rbd rm sparse1 || true +rbd import $RBD_CREATE_ARGS --order 20 ${TMPDIR}/sparse1 +rbd ls -l | grep sparse1 | grep -Ei '(2 MiB|2048k)' +[ $tiered -eq 1 -o "$(objects sparse1)" = '1' ] + +# export, compare contents and on-disk size +rbd export sparse1 ${TMPDIR}/sparse1.out +compare_files_and_ondisk_sizes ${TMPDIR}/sparse1 ${TMPDIR}/sparse1.out +rm ${TMPDIR}/sparse1.out +rbd rm sparse1 + +# 1M data, 1M sparse +rbd rm sparse2 || true +rbd import $RBD_CREATE_ARGS --order 20 ${TMPDIR}/sparse2 +rbd ls -l | grep sparse2 | grep -Ei '(2 MiB|2048k)' +[ $tiered -eq 1 -o "$(objects sparse2)" = '0' ] +rbd export sparse2 ${TMPDIR}/sparse2.out +compare_files_and_ondisk_sizes ${TMPDIR}/sparse2 ${TMPDIR}/sparse2.out +rm ${TMPDIR}/sparse2.out +rbd rm sparse2 + +# extend sparse1 to 10 1M blocks, sparse at the end +truncate ${TMPDIR}/sparse1 -s 10M +# import from stdin just for fun, verify still sparse +rbd import $RBD_CREATE_ARGS --order 20 - sparse1 < ${TMPDIR}/sparse1 +rbd ls -l | grep sparse1 | grep -Ei '(10 MiB|10240k)' +[ $tiered -eq 1 -o "$(objects sparse1)" = '1' ] +rbd export sparse1 ${TMPDIR}/sparse1.out +compare_files_and_ondisk_sizes ${TMPDIR}/sparse1 ${TMPDIR}/sparse1.out +rm ${TMPDIR}/sparse1.out +rbd rm sparse1 + +# extend sparse2 to 4M total with two more nonsparse megs +dd if=/dev/urandom bs=2M count=1 of=${TMPDIR}/sparse2 oflag=append conv=notrunc +# again from stding +rbd import $RBD_CREATE_ARGS --order 20 - sparse2 < ${TMPDIR}/sparse2 +rbd ls -l | grep sparse2 | grep -Ei '(4 MiB|4096k)' +[ $tiered -eq 1 -o "$(objects sparse2)" = '0 2 3' ] +rbd export sparse2 ${TMPDIR}/sparse2.out +compare_files_and_ondisk_sizes ${TMPDIR}/sparse2 ${TMPDIR}/sparse2.out +rm ${TMPDIR}/sparse2.out +rbd rm sparse2 + +# zeros import to a sparse image. Note: all zeros currently +# doesn't work right now due to the way we handle 'empty' fiemaps; +# the image ends up zero-filled. + +echo "partially-sparse file imports to partially-sparse image" +rbd import $RBD_CREATE_ARGS --order 20 ${TMPDIR}/sparse1 sparse +[ $tiered -eq 1 -o "$(objects sparse)" = '1' ] +rbd rm sparse + +echo "zeros import through stdin to sparse image" +# stdin +dd if=/dev/zero bs=1M count=4 | rbd import $RBD_CREATE_ARGS - sparse +[ $tiered -eq 1 -o "$(objects sparse)" = '' ] +rbd rm sparse + +echo "zeros export to sparse file" +# Must be tricky to make image "by hand" ; import won't create a zero image +rbd create $RBD_CREATE_ARGS sparse --size 4 +prefix=$(rbd info sparse | grep block_name_prefix | awk '{print $NF;}') +# drop in 0 object directly +dd if=/dev/zero bs=4M count=1 | rados -p $(get_image_data_pool sparse) \ + put ${prefix}.000000000000 - +[ $tiered -eq 1 -o "$(objects sparse)" = '0' ] +# 1 object full of zeros; export should still create 0-disk-usage file +rm ${TMPDIR}/sparse || true +rbd export sparse ${TMPDIR}/sparse +[ $(stat ${TMPDIR}/sparse --format=%b) = '0' ] +rbd rm sparse + +rm ${TMPDIR}/sparse ${TMPDIR}/sparse1 ${TMPDIR}/sparse2 ${TMPDIR}/sparse3 || true + +echo OK diff --git a/qa/workunits/rbd/issue-20295.sh b/qa/workunits/rbd/issue-20295.sh new file mode 100755 index 00000000..3d617a06 --- /dev/null +++ b/qa/workunits/rbd/issue-20295.sh @@ -0,0 +1,18 @@ +#!/bin/sh -ex + +TEST_POOL=ecpool +TEST_IMAGE=test1 +PGS=12 + +ceph osd pool create $TEST_POOL $PGS $PGS erasure +ceph osd pool application enable $TEST_POOL rbd +ceph osd pool set $TEST_POOL allow_ec_overwrites true +rbd --data-pool $TEST_POOL create --size 1024G $TEST_IMAGE +rbd bench \ + --io-type write \ + --io-size 4096 \ + --io-pattern=rand \ + --io-total 100M \ + $TEST_IMAGE + +echo "OK" diff --git a/qa/workunits/rbd/journal.sh b/qa/workunits/rbd/journal.sh new file mode 100755 index 00000000..ba89e75c --- /dev/null +++ b/qa/workunits/rbd/journal.sh @@ -0,0 +1,326 @@ +#!/usr/bin/env bash +set -e + +. $(dirname $0)/../../standalone/ceph-helpers.sh + +function list_tests() +{ + echo "AVAILABLE TESTS" + for i in $TESTS; do + echo " $i" + done +} + +function usage() +{ + echo "usage: $0 [-h|-l|-t [-t ...] [--no-cleanup]]" +} + +function expect_false() +{ + set -x + if "$@"; then return 1; else return 0; fi +} + +function save_commit_position() +{ + local journal=$1 + + rados -p rbd getomapval journal.${journal} client_ \ + $TMPDIR/${journal}.client_.omap +} + +function restore_commit_position() +{ + local journal=$1 + + rados -p rbd setomapval journal.${journal} client_ \ + < $TMPDIR/${journal}.client_.omap +} + +test_rbd_journal() +{ + local image=testrbdjournal$$ + + rbd create --image-feature exclusive-lock --image-feature journaling \ + --size 128 ${image} + local journal=$(rbd info ${image} --format=xml 2>/dev/null | + $XMLSTARLET sel -t -v "//image/journal") + test -n "${journal}" + rbd journal info ${journal} + rbd journal info --journal ${journal} + rbd journal info --image ${image} + + rbd feature disable ${image} journaling + + rbd info ${image} --format=xml 2>/dev/null | + expect_false $XMLSTARLET sel -t -v "//image/journal" + expect_false rbd journal info ${journal} + expect_false rbd journal info --image ${image} + + rbd feature enable ${image} journaling + + local journal1=$(rbd info ${image} --format=xml 2>/dev/null | + $XMLSTARLET sel -t -v "//image/journal") + test "${journal}" = "${journal1}" + + rbd journal info ${journal} + + rbd journal status ${journal} + + local count=10 + save_commit_position ${journal} + rbd bench --io-type write ${image} --io-size 4096 --io-threads 1 \ + --io-total $((4096 * count)) --io-pattern seq + rbd journal status --image ${image} | fgrep "tid=$((count - 1))" + restore_commit_position ${journal} + rbd journal status --image ${image} | fgrep "positions=[]" + local count1=$(rbd journal inspect --verbose ${journal} | + grep -c 'event_type.*AioWrite') + test "${count}" -eq "${count1}" + + rbd journal export ${journal} $TMPDIR/journal.export + local size=$(stat -c "%s" $TMPDIR/journal.export) + test "${size}" -gt 0 + + rbd export ${image} $TMPDIR/${image}.export + + local image1=${image}1 + rbd create --image-feature exclusive-lock --image-feature journaling \ + --size 128 ${image1} + journal1=$(rbd info ${image1} --format=xml 2>/dev/null | + $XMLSTARLET sel -t -v "//image/journal") + + save_commit_position ${journal1} + rbd journal import --dest ${image1} $TMPDIR/journal.export + rbd snap create ${image1}@test + restore_commit_position ${journal1} + # check that commit position is properly updated: the journal should contain + # 14 entries (2 AioFlush + 10 AioWrite + 1 SnapCreate + 1 OpFinish) and + # commit position set to tid=14 + rbd journal inspect --image ${image1} --verbose | awk ' + /AioFlush/ {a++} # match: "event_type": "AioFlush", + /AioWrite/ {w++} # match: "event_type": "AioWrite", + /SnapCreate/ {s++} # match: "event_type": "SnapCreate", + /OpFinish/ {f++} # match: "event_type": "OpFinish", + /entries inspected/ {t=$1; e=$4} # match: 14 entries inspected, 0 errors + {print} # for diagnostic + END { + if (a != 2 || w != 10 || s != 1 || f != 1 || t != 14 || e != 0) exit(1) + } + ' + + rbd export ${image1}@test $TMPDIR/${image1}.export + cmp $TMPDIR/${image}.export $TMPDIR/${image1}.export + + rbd journal reset ${journal} + + rbd journal inspect --verbose ${journal} | expect_false grep 'event_type' + + rbd snap purge ${image1} + rbd remove ${image1} + rbd remove ${image} +} + + +rbd_assert_eq() { + local image=$1 + local cmd=$2 + local param=$3 + local expected_val=$4 + + local val=$(rbd --format xml ${cmd} --image ${image} | + $XMLSTARLET sel -t -v "${param}") + test "${val}" = "${expected_val}" +} + +test_rbd_create() +{ + local image=testrbdcreate$$ + + rbd create --image-feature exclusive-lock --image-feature journaling \ + --journal-pool rbd \ + --journal-object-size 20M \ + --journal-splay-width 6 \ + --size 256 ${image} + + rbd_assert_eq ${image} 'journal info' '//journal/order' 25 + rbd_assert_eq ${image} 'journal info' '//journal/splay_width' 6 + rbd_assert_eq ${image} 'journal info' '//journal/object_pool' rbd + + rbd remove ${image} +} + +test_rbd_copy() +{ + local src=testrbdcopys$$ + rbd create --size 256 ${src} + + local image=testrbdcopy$$ + rbd copy --image-feature exclusive-lock --image-feature journaling \ + --journal-pool rbd \ + --journal-object-size 20M \ + --journal-splay-width 6 \ + ${src} ${image} + + rbd remove ${src} + + rbd_assert_eq ${image} 'journal info' '//journal/order' 25 + rbd_assert_eq ${image} 'journal info' '//journal/splay_width' 6 + rbd_assert_eq ${image} 'journal info' '//journal/object_pool' rbd + + rbd remove ${image} +} + +test_rbd_deep_copy() +{ + local src=testrbdcopys$$ + rbd create --size 256 ${src} + rbd snap create ${src}@snap1 + + local dest=testrbdcopy$$ + rbd deep copy --image-feature exclusive-lock --image-feature journaling \ + --journal-pool rbd \ + --journal-object-size 20M \ + --journal-splay-width 6 \ + ${src} ${dest} + + rbd snap purge ${src} + rbd remove ${src} + + rbd_assert_eq ${dest} 'journal info' '//journal/order' 25 + rbd_assert_eq ${dest} 'journal info' '//journal/splay_width' 6 + rbd_assert_eq ${dest} 'journal info' '//journal/object_pool' rbd + + rbd snap purge ${dest} + rbd remove ${dest} +} + +test_rbd_clone() +{ + local parent=testrbdclonep$$ + rbd create --image-feature layering --size 256 ${parent} + rbd snap create ${parent}@snap + rbd snap protect ${parent}@snap + + local image=testrbdclone$$ + rbd clone --image-feature layering --image-feature exclusive-lock --image-feature journaling \ + --journal-pool rbd \ + --journal-object-size 20M \ + --journal-splay-width 6 \ + ${parent}@snap ${image} + + rbd_assert_eq ${image} 'journal info' '//journal/order' 25 + rbd_assert_eq ${image} 'journal info' '//journal/splay_width' 6 + rbd_assert_eq ${image} 'journal info' '//journal/object_pool' rbd + + rbd remove ${image} + rbd snap unprotect ${parent}@snap + rbd snap purge ${parent} + rbd remove ${parent} +} + +test_rbd_import() +{ + local src=testrbdimports$$ + rbd create --size 256 ${src} + + rbd export ${src} $TMPDIR/${src}.export + rbd remove ${src} + + local image=testrbdimport$$ + rbd import --image-feature exclusive-lock --image-feature journaling \ + --journal-pool rbd \ + --journal-object-size 20M \ + --journal-splay-width 6 \ + $TMPDIR/${src}.export ${image} + + rbd_assert_eq ${image} 'journal info' '//journal/order' 25 + rbd_assert_eq ${image} 'journal info' '//journal/splay_width' 6 + rbd_assert_eq ${image} 'journal info' '//journal/object_pool' rbd + + rbd remove ${image} +} + +test_rbd_feature() +{ + local image=testrbdfeature$$ + + rbd create --image-feature exclusive-lock --size 256 ${image} + + rbd feature enable ${image} journaling \ + --journal-pool rbd \ + --journal-object-size 20M \ + --journal-splay-width 6 + + rbd_assert_eq ${image} 'journal info' '//journal/order' 25 + rbd_assert_eq ${image} 'journal info' '//journal/splay_width' 6 + rbd_assert_eq ${image} 'journal info' '//journal/object_pool' rbd + + rbd remove ${image} +} + +TESTS+=" rbd_journal" +TESTS+=" rbd_create" +TESTS+=" rbd_copy" +TESTS+=" rbd_clone" +TESTS+=" rbd_import" +TESTS+=" rbd_feature" + +# +# "main" follows +# + +tests_to_run=() + +cleanup=true + +while [[ $# -gt 0 ]]; do + opt=$1 + + case "$opt" in + "-l" ) + do_list=1 + ;; + "--no-cleanup" ) + cleanup=false + ;; + "-t" ) + shift + if [[ -z "$1" ]]; then + echo "missing argument to '-t'" + usage ; + exit 1 + fi + tests_to_run+=" $1" + ;; + "-h" ) + usage ; + exit 0 + ;; + esac + shift +done + +if [[ $do_list -eq 1 ]]; then + list_tests ; + exit 0 +fi + +TMPDIR=/tmp/rbd_journal$$ +mkdir $TMPDIR +if $cleanup; then + trap "rm -fr $TMPDIR" 0 +fi + +if test -z "$tests_to_run" ; then + tests_to_run="$TESTS" +fi + +for i in $tests_to_run; do + set -x + test_${i} + set +x +done + +echo OK diff --git a/qa/workunits/rbd/kernel.sh b/qa/workunits/rbd/kernel.sh new file mode 100755 index 00000000..faa5760e --- /dev/null +++ b/qa/workunits/rbd/kernel.sh @@ -0,0 +1,100 @@ +#!/usr/bin/env bash +set -ex + +CEPH_SECRET_FILE=${CEPH_SECRET_FILE:-} +CEPH_ID=${CEPH_ID:-admin} +SECRET_ARGS='' +if [ ! -z $CEPH_SECRET_FILE ]; then + SECRET_ARGS="--secret $CEPH_SECRET_FILE" +fi + +TMP_FILES="/tmp/img1 /tmp/img1.small /tmp/img1.snap1 /tmp/img1.export /tmp/img1.trunc" + +function expect_false() { + if "$@"; then return 1; else return 0; fi +} + +function get_device_dir { + local POOL=$1 + local IMAGE=$2 + local SNAP=$3 + rbd device list | tail -n +2 | egrep "\s+$POOL\s+$IMAGE\s+$SNAP\s+" | + awk '{print $1;}' +} + +function clean_up { + [ -e /dev/rbd/rbd/testimg1@snap1 ] && + sudo rbd device unmap /dev/rbd/rbd/testimg1@snap1 + if [ -e /dev/rbd/rbd/testimg1 ]; then + sudo rbd device unmap /dev/rbd/rbd/testimg1 + rbd snap purge testimg1 || true + fi + rbd ls | grep testimg1 > /dev/null && rbd rm testimg1 || true + sudo rm -f $TMP_FILES +} + +clean_up + +trap clean_up INT TERM EXIT + +# create an image +dd if=/bin/sh of=/tmp/img1 bs=1k count=1 seek=10 +dd if=/bin/dd of=/tmp/img1 bs=1k count=10 seek=100 +dd if=/bin/rm of=/tmp/img1 bs=1k count=100 seek=1000 +dd if=/bin/ls of=/tmp/img1 bs=1k seek=10000 +dd if=/bin/ln of=/tmp/img1 bs=1k seek=100000 +dd if=/dev/zero of=/tmp/img1 count=0 seek=150000 + +# import +rbd import /tmp/img1 testimg1 +sudo rbd device map testimg1 --user $CEPH_ID $SECRET_ARGS + +DEV_ID1=$(get_device_dir rbd testimg1 -) +echo "dev_id1 = $DEV_ID1" +cat /sys/bus/rbd/devices/$DEV_ID1/size +cat /sys/bus/rbd/devices/$DEV_ID1/size | grep 76800000 + +sudo dd if=/dev/rbd/rbd/testimg1 of=/tmp/img1.export +cmp /tmp/img1 /tmp/img1.export + +# snapshot +rbd snap create testimg1 --snap=snap1 +sudo rbd device map --snap=snap1 testimg1 --user $CEPH_ID $SECRET_ARGS + +DEV_ID2=$(get_device_dir rbd testimg1 snap1) +cat /sys/bus/rbd/devices/$DEV_ID2/size | grep 76800000 + +sudo dd if=/dev/rbd/rbd/testimg1@snap1 of=/tmp/img1.snap1 +cmp /tmp/img1 /tmp/img1.snap1 + +# resize +rbd resize testimg1 --size=40 --allow-shrink +cat /sys/bus/rbd/devices/$DEV_ID1/size | grep 41943040 +cat /sys/bus/rbd/devices/$DEV_ID2/size | grep 76800000 + +sudo dd if=/dev/rbd/rbd/testimg1 of=/tmp/img1.small +cp /tmp/img1 /tmp/img1.trunc +truncate -s 41943040 /tmp/img1.trunc +cmp /tmp/img1.trunc /tmp/img1.small + +# rollback expects an unlocked image +# (acquire and) release the lock as a side effect +rbd bench --io-type read --io-size 1 --io-threads 1 --io-total 1 testimg1 + +# rollback and check data again +rbd snap rollback --snap=snap1 testimg1 +cat /sys/bus/rbd/devices/$DEV_ID1/size | grep 76800000 +cat /sys/bus/rbd/devices/$DEV_ID2/size | grep 76800000 +sudo rm -f /tmp/img1.snap1 /tmp/img1.export + +sudo dd if=/dev/rbd/rbd/testimg1@snap1 of=/tmp/img1.snap1 +cmp /tmp/img1 /tmp/img1.snap1 +sudo dd if=/dev/rbd/rbd/testimg1 of=/tmp/img1.export +cmp /tmp/img1 /tmp/img1.export + +# zeros are returned if an image or a snapshot is removed +expect_false cmp -n 76800000 /dev/rbd/rbd/testimg1@snap1 /dev/zero +rbd snap rm --snap=snap1 testimg1 +cmp -n 76800000 /dev/rbd/rbd/testimg1@snap1 /dev/zero + +echo OK diff --git a/qa/workunits/rbd/krbd_data_pool.sh b/qa/workunits/rbd/krbd_data_pool.sh new file mode 100755 index 00000000..e8fc8348 --- /dev/null +++ b/qa/workunits/rbd/krbd_data_pool.sh @@ -0,0 +1,206 @@ +#!/usr/bin/env bash + +set -ex + +export RBD_FORCE_ALLOW_V1=1 + +function fill_image() { + local spec=$1 + + local dev + dev=$(sudo rbd map $spec) + xfs_io -c "pwrite -b $OBJECT_SIZE -S 0x78 -W 0 $IMAGE_SIZE" $dev + sudo rbd unmap $dev +} + +function create_clones() { + local spec=$1 + + rbd snap create $spec@snap + rbd snap protect $spec@snap + + local pool=${spec%/*} # pool/image is assumed + local image=${spec#*/} + local child_pool + for child_pool in $pool clonesonly; do + rbd clone $spec@snap $child_pool/$pool-$image-clone1 + rbd clone $spec@snap --data-pool repdata $child_pool/$pool-$image-clone2 + rbd clone $spec@snap --data-pool ecdata $child_pool/$pool-$image-clone3 + done +} + +function trigger_copyup() { + local spec=$1 + + local dev + dev=$(sudo rbd map $spec) + local i + { + for ((i = 0; i < $NUM_OBJECTS; i++)); do + echo pwrite -b $OBJECT_SIZE -S 0x59 $((i * OBJECT_SIZE + OBJECT_SIZE / 2)) $((OBJECT_SIZE / 2)) + done + echo fsync + echo quit + } | xfs_io $dev + sudo rbd unmap $dev +} + +function compare() { + local spec=$1 + local object=$2 + + local dev + dev=$(sudo rbd map $spec) + local i + for ((i = 0; i < $NUM_OBJECTS; i++)); do + dd if=$dev bs=$OBJECT_SIZE count=1 skip=$i | cmp $object - + done + sudo rbd unmap $dev +} + +function mkfs_and_mount() { + local spec=$1 + + local dev + dev=$(sudo rbd map $spec) + blkdiscard $dev + mkfs.ext4 -q -E nodiscard $dev + sudo mount $dev /mnt + sudo umount /mnt + sudo rbd unmap $dev +} + +function list_HEADs() { + local pool=$1 + + rados -p $pool ls | while read obj; do + if rados -p $pool stat $obj >/dev/null 2>&1; then + echo $obj + fi + done +} + +function count_data_objects() { + local spec=$1 + + local pool + pool=$(rbd info $spec | grep 'data_pool: ' | awk '{ print $NF }') + if [[ -z $pool ]]; then + pool=${spec%/*} # pool/image is assumed + fi + + local prefix + prefix=$(rbd info $spec | grep 'block_name_prefix: ' | awk '{ print $NF }') + rados -p $pool ls | grep -c $prefix +} + +function get_num_clones() { + local pool=$1 + + rados -p $pool --format=json df | + python -c 'import sys, json; print json.load(sys.stdin)["pools"][0]["num_object_clones"]' +} + +ceph osd pool create repdata 24 24 +rbd pool init repdata +ceph osd erasure-code-profile set teuthologyprofile crush-failure-domain=osd m=1 k=2 +ceph osd pool create ecdata 24 24 erasure teuthologyprofile +rbd pool init ecdata +ceph osd pool set ecdata allow_ec_overwrites true +ceph osd pool create rbdnonzero 24 24 +rbd pool init rbdnonzero +ceph osd pool create clonesonly 24 24 +rbd pool init clonesonly + +for pool in rbd rbdnonzero; do + rbd create --size 200 --image-format 1 $pool/img0 + rbd create --size 200 $pool/img1 + rbd create --size 200 --data-pool repdata $pool/img2 + rbd create --size 200 --data-pool ecdata $pool/img3 +done + +IMAGE_SIZE=$(rbd info --format=json img1 | python -c 'import sys, json; print json.load(sys.stdin)["size"]') +OBJECT_SIZE=$(rbd info --format=json img1 | python -c 'import sys, json; print json.load(sys.stdin)["object_size"]') +NUM_OBJECTS=$((IMAGE_SIZE / OBJECT_SIZE)) +[[ $((IMAGE_SIZE % OBJECT_SIZE)) -eq 0 ]] + +OBJECT_X=$(mktemp) # xxxx +xfs_io -c "pwrite -b $OBJECT_SIZE -S 0x78 0 $OBJECT_SIZE" $OBJECT_X + +OBJECT_XY=$(mktemp) # xxYY +xfs_io -c "pwrite -b $OBJECT_SIZE -S 0x78 0 $((OBJECT_SIZE / 2))" \ + -c "pwrite -b $OBJECT_SIZE -S 0x59 $((OBJECT_SIZE / 2)) $((OBJECT_SIZE / 2))" \ + $OBJECT_XY + +for pool in rbd rbdnonzero; do + for i in {0..3}; do + fill_image $pool/img$i + if [[ $i -ne 0 ]]; then + create_clones $pool/img$i + for child_pool in $pool clonesonly; do + for j in {1..3}; do + trigger_copyup $child_pool/$pool-img$i-clone$j + done + done + fi + done +done + +# rbd_directory, rbd_children, rbd_info + img0 header + ... +NUM_META_RBDS=$((3 + 1 + 3 * (1*2 + 3*2))) +# rbd_directory, rbd_children, rbd_info + ... +NUM_META_CLONESONLY=$((3 + 2 * 3 * (3*2))) + +[[ $(rados -p rbd ls | wc -l) -eq $((NUM_META_RBDS + 5 * NUM_OBJECTS)) ]] +[[ $(rados -p repdata ls | wc -l) -eq $((1 + 14 * NUM_OBJECTS)) ]] +[[ $(rados -p ecdata ls | wc -l) -eq $((1 + 14 * NUM_OBJECTS)) ]] +[[ $(rados -p rbdnonzero ls | wc -l) -eq $((NUM_META_RBDS + 5 * NUM_OBJECTS)) ]] +[[ $(rados -p clonesonly ls | wc -l) -eq $((NUM_META_CLONESONLY + 6 * NUM_OBJECTS)) ]] + +for pool in rbd rbdnonzero; do + for i in {0..3}; do + [[ $(count_data_objects $pool/img$i) -eq $NUM_OBJECTS ]] + if [[ $i -ne 0 ]]; then + for child_pool in $pool clonesonly; do + for j in {1..3}; do + [[ $(count_data_objects $child_pool/$pool-img$i-clone$j) -eq $NUM_OBJECTS ]] + done + done + fi + done +done + +[[ $(get_num_clones rbd) -eq 0 ]] +[[ $(get_num_clones repdata) -eq 0 ]] +[[ $(get_num_clones ecdata) -eq 0 ]] +[[ $(get_num_clones rbdnonzero) -eq 0 ]] +[[ $(get_num_clones clonesonly) -eq 0 ]] + +for pool in rbd rbdnonzero; do + for i in {0..3}; do + compare $pool/img$i $OBJECT_X + mkfs_and_mount $pool/img$i + if [[ $i -ne 0 ]]; then + for child_pool in $pool clonesonly; do + for j in {1..3}; do + compare $child_pool/$pool-img$i-clone$j $OBJECT_XY + done + done + fi + done +done + +# mkfs_and_mount should discard some objects everywhere but in clonesonly +[[ $(list_HEADs rbd | wc -l) -lt $((NUM_META_RBDS + 5 * NUM_OBJECTS)) ]] +[[ $(list_HEADs repdata | wc -l) -lt $((1 + 14 * NUM_OBJECTS)) ]] +[[ $(list_HEADs ecdata | wc -l) -lt $((1 + 14 * NUM_OBJECTS)) ]] +[[ $(list_HEADs rbdnonzero | wc -l) -lt $((NUM_META_RBDS + 5 * NUM_OBJECTS)) ]] +[[ $(list_HEADs clonesonly | wc -l) -eq $((NUM_META_CLONESONLY + 6 * NUM_OBJECTS)) ]] + +[[ $(get_num_clones rbd) -eq $NUM_OBJECTS ]] +[[ $(get_num_clones repdata) -eq $((2 * NUM_OBJECTS)) ]] +[[ $(get_num_clones ecdata) -eq $((2 * NUM_OBJECTS)) ]] +[[ $(get_num_clones rbdnonzero) -eq $NUM_OBJECTS ]] +[[ $(get_num_clones clonesonly) -eq 0 ]] + +echo OK diff --git a/qa/workunits/rbd/krbd_exclusive_option.sh b/qa/workunits/rbd/krbd_exclusive_option.sh new file mode 100755 index 00000000..d7bcbb6d --- /dev/null +++ b/qa/workunits/rbd/krbd_exclusive_option.sh @@ -0,0 +1,233 @@ +#!/usr/bin/env bash + +set -ex + +function expect_false() { + if "$@"; then return 1; else return 0; fi +} + +function assert_locked() { + local dev_id="${1#/dev/rbd}" + + local client_addr + client_addr="$(< $SYSFS_DIR/$dev_id/client_addr)" + + local client_id + client_id="$(< $SYSFS_DIR/$dev_id/client_id)" + # client4324 -> client.4324 + client_id="client.${client_id#client}" + + local watch_cookie + watch_cookie="$(rados -p rbd listwatchers rbd_header.$IMAGE_ID | + grep $client_id | cut -d ' ' -f 3 | cut -d '=' -f 2)" + [[ $(echo -n "$watch_cookie" | grep -c '^') -eq 1 ]] + + local actual + actual="$(rados -p rbd --format=json lock info rbd_header.$IMAGE_ID rbd_lock | + python -m json.tool)" + + local expected + expected="$(cat <