diff options
Diffstat (limited to 'qa/suites')
110 files changed, 1091 insertions, 65 deletions
diff --git a/qa/suites/fs/cephadm/renamevolume/1-rename.yaml b/qa/suites/fs/cephadm/renamevolume/1-rename.yaml index 7f9bc8906..e1d5b9b33 100644 --- a/qa/suites/fs/cephadm/renamevolume/1-rename.yaml +++ b/qa/suites/fs/cephadm/renamevolume/1-rename.yaml @@ -1,7 +1,11 @@ tasks: - cephadm.shell: host.a: + - ceph fs fail foo + - ceph fs set foo refuse_client_session true - ceph fs volume rename foo bar --yes-i-really-mean-it + - ceph fs set bar joinable true + - ceph fs set bar refuse_client_session false - fs.ready: timeout: 300 - cephadm.shell: diff --git a/qa/suites/fs/full/tasks/mgr-osd-full.yaml b/qa/suites/fs/full/tasks/mgr-osd-full.yaml index b4f673e39..a005f5203 100644 --- a/qa/suites/fs/full/tasks/mgr-osd-full.yaml +++ b/qa/suites/fs/full/tasks/mgr-osd-full.yaml @@ -12,7 +12,7 @@ overrides: debug mds: 20 osd: # force bluestore since it's required for ec overwrites osd objectstore: bluestore - bluestore block size: 1073741824 + bluestore block size: 2147483648 tasks: - workunit: cleanup: true diff --git a/qa/suites/fs/functional/subvol_versions/.qa b/qa/suites/fs/functional/subvol_versions/.qa new file mode 120000 index 000000000..fea2489fd --- /dev/null +++ b/qa/suites/fs/functional/subvol_versions/.qa @@ -0,0 +1 @@ +../.qa
\ No newline at end of file diff --git a/qa/suites/fs/functional/subvol_versions/create_subvol_version_v1.yaml b/qa/suites/fs/functional/subvol_versions/create_subvol_version_v1.yaml new file mode 120000 index 000000000..09cfdb59e --- /dev/null +++ b/qa/suites/fs/functional/subvol_versions/create_subvol_version_v1.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/subvol_versions/create_subvol_version_v1.yaml
\ No newline at end of file diff --git a/qa/suites/fs/functional/subvol_versions/create_subvol_version_v2.yaml b/qa/suites/fs/functional/subvol_versions/create_subvol_version_v2.yaml new file mode 120000 index 000000000..5a4de14e7 --- /dev/null +++ b/qa/suites/fs/functional/subvol_versions/create_subvol_version_v2.yaml @@ -0,0 +1 @@ +.qa/cephfs/overrides/subvol_versions/create_subvol_version_v2.yaml
\ No newline at end of file diff --git a/qa/suites/fs/functional/tasks/client-recovery.yaml b/qa/suites/fs/functional/tasks/client-recovery.yaml index e67acc3ab..7ea93a367 100644 --- a/qa/suites/fs/functional/tasks/client-recovery.yaml +++ b/qa/suites/fs/functional/tasks/client-recovery.yaml @@ -9,6 +9,9 @@ overrides: - MDS_CLIENT_LATE_RELEASE - t responding to mclientcaps - file system flag refuse_client_session is set + - Degraded data redundancy + - MDS_CLIENTS_LAGGY + - Reduced data availability tasks: - cephfs_test_runner: fail_on_skip: false diff --git a/qa/suites/fs/functional/tasks/snap-schedule.yaml b/qa/suites/fs/functional/tasks/snap-schedule.yaml index f2e62b050..26922abed 100644 --- a/qa/suites/fs/functional/tasks/snap-schedule.yaml +++ b/qa/suites/fs/functional/tasks/snap-schedule.yaml @@ -6,7 +6,7 @@ overrides: debug ms: 1 debug finisher: 20 debug client: 20 - log-whitelist: + log-ignorelist: - OSD full dropping all updates - OSD near full - pausewr flag diff --git a/qa/suites/fs/functional/tasks/snap_schedule_snapdir.yaml b/qa/suites/fs/functional/tasks/snap_schedule_snapdir.yaml index 7bbcf000f..2a175dbf1 100644 --- a/qa/suites/fs/functional/tasks/snap_schedule_snapdir.yaml +++ b/qa/suites/fs/functional/tasks/snap_schedule_snapdir.yaml @@ -6,7 +6,7 @@ overrides: debug ms: 1 debug finisher: 20 debug client: 20 - log-whitelist: + log-ignorelist: - OSD full dropping all updates - OSD near full - pausewr flag diff --git a/qa/suites/fs/mirror-ha/overrides/ignorelist_health.yaml b/qa/suites/fs/mirror-ha/overrides/ignorelist_health.yaml new file mode 120000 index 000000000..4cb7d981d --- /dev/null +++ b/qa/suites/fs/mirror-ha/overrides/ignorelist_health.yaml @@ -0,0 +1 @@ +./.qa/cephfs/overrides/ignorelist_health.yaml
\ No newline at end of file diff --git a/qa/suites/fs/mirror-ha/overrides/whitelist_health.yaml b/qa/suites/fs/mirror-ha/overrides/whitelist_health.yaml deleted file mode 100644 index d40fa4cb8..000000000 --- a/qa/suites/fs/mirror-ha/overrides/whitelist_health.yaml +++ /dev/null @@ -1,14 +0,0 @@ -overrides: - ceph: - log-ignorelist: - - overall HEALTH_ - - \(FS_DEGRADED\) - - \(MDS_FAILED\) - - \(MDS_DEGRADED\) - - \(FS_WITH_FAILED_MDS\) - - \(MDS_DAMAGE\) - - \(MDS_ALL_DOWN\) - - \(MDS_UP_LESS_THAN_MAX\) - - \(FS_INLINE_DATA_DEPRECATED\) - - Reduced data availability - - Degraded data redundancy diff --git a/qa/suites/fs/mirror/overrides/ignorelist_health.yaml b/qa/suites/fs/mirror/overrides/ignorelist_health.yaml new file mode 120000 index 000000000..4cb7d981d --- /dev/null +++ b/qa/suites/fs/mirror/overrides/ignorelist_health.yaml @@ -0,0 +1 @@ +./.qa/cephfs/overrides/ignorelist_health.yaml
\ No newline at end of file diff --git a/qa/suites/fs/mirror/overrides/whitelist_health.yaml b/qa/suites/fs/mirror/overrides/whitelist_health.yaml deleted file mode 100644 index d40fa4cb8..000000000 --- a/qa/suites/fs/mirror/overrides/whitelist_health.yaml +++ /dev/null @@ -1,14 +0,0 @@ -overrides: - ceph: - log-ignorelist: - - overall HEALTH_ - - \(FS_DEGRADED\) - - \(MDS_FAILED\) - - \(MDS_DEGRADED\) - - \(FS_WITH_FAILED_MDS\) - - \(MDS_DAMAGE\) - - \(MDS_ALL_DOWN\) - - \(MDS_UP_LESS_THAN_MAX\) - - \(FS_INLINE_DATA_DEPRECATED\) - - Reduced data availability - - Degraded data redundancy diff --git a/qa/suites/fs/nfs/overrides/ignorelist_health.yaml b/qa/suites/fs/nfs/overrides/ignorelist_health.yaml index 8bfe4dc6f..5cb891a95 100644..120000 --- a/qa/suites/fs/nfs/overrides/ignorelist_health.yaml +++ b/qa/suites/fs/nfs/overrides/ignorelist_health.yaml @@ -1,13 +1 @@ -overrides: - ceph: - log-ignorelist: - - overall HEALTH_ - - \(FS_DEGRADED\) - - \(MDS_FAILED\) - - \(MDS_DEGRADED\) - - \(FS_WITH_FAILED_MDS\) - - \(MDS_DAMAGE\) - - \(MDS_ALL_DOWN\) - - \(MDS_UP_LESS_THAN_MAX\) - - \(FS_INLINE_DATA_DEPRECATED\) - - \(OSD_DOWN\) +.qa/cephfs/overrides/ignorelist_health.yaml
\ No newline at end of file diff --git a/qa/suites/fs/upgrade/mds_upgrade_sequence/overrides/ignorelist_upgrade.yaml b/qa/suites/fs/upgrade/mds_upgrade_sequence/overrides/ignorelist_upgrade.yaml new file mode 100644 index 000000000..713adb962 --- /dev/null +++ b/qa/suites/fs/upgrade/mds_upgrade_sequence/overrides/ignorelist_upgrade.yaml @@ -0,0 +1,4 @@ +overrides: + ceph: + log-ignorelist: + - OSD_DOWN diff --git a/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/0-from/quincy.yaml b/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/0-from/quincy.yaml new file mode 100644 index 000000000..4a21021c0 --- /dev/null +++ b/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/0-from/quincy.yaml @@ -0,0 +1,32 @@ +meta: +- desc: | + setup ceph/quincy + +tasks: +- install: + branch: quincy + exclude_packages: + - ceph-volume +- print: "**** done install task..." +- cephadm: + image: quay.ceph.io/ceph-ci/ceph:quincy + roleless: true + cephadm_branch: quincy + cephadm_git_url: https://github.com/ceph/ceph + conf: + osd: + #set config option for which cls modules are allowed to be loaded / used + osd_class_load_list: "*" + osd_class_default_list: "*" +- print: "**** done end installing quincy cephadm ..." +- cephadm.shell: + host.a: + - ceph config set mgr mgr/cephadm/use_repo_digest true --force +- print: "**** done cephadm.shell ceph config set mgr..." +- cephadm.shell: + host.a: + - ceph orch status + - ceph orch ps + - ceph orch ls + - ceph orch host ls + - ceph orch device ls diff --git a/qa/suites/fs/workload/tasks/0-subvolume/no-subvolume.yaml b/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/0-from/reef/$ index e69de29bb..e69de29bb 100644 --- a/qa/suites/fs/workload/tasks/0-subvolume/no-subvolume.yaml +++ b/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/0-from/reef/$ diff --git a/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/0-from/reef/reef.yaml b/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/0-from/reef/reef.yaml new file mode 100644 index 000000000..c53e8b55d --- /dev/null +++ b/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/0-from/reef/reef.yaml @@ -0,0 +1,31 @@ +meta: +- desc: | + setup ceph/reef + +tasks: +- install: + branch: reef + exclude_packages: + - ceph-volume +- print: "**** done install task..." +- cephadm: + image: quay.ceph.io/ceph-ci/ceph:reef + roleless: true + compiled_cephadm_branch: reef + conf: + osd: + #set config option for which cls modules are allowed to be loaded / used + osd_class_load_list: "*" + osd_class_default_list: "*" +- print: "**** done end installing reef cephadm ..." +- cephadm.shell: + host.a: + - ceph config set mgr mgr/cephadm/use_repo_digest true --force +- print: "**** done cephadm.shell ceph config set mgr..." +- cephadm.shell: + host.a: + - ceph orch status + - ceph orch ps + - ceph orch ls + - ceph orch host ls + - ceph orch device ls diff --git a/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/0-from/reef/v18.2.0.yaml b/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/0-from/reef/v18.2.0.yaml new file mode 100644 index 000000000..98bb210d1 --- /dev/null +++ b/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/0-from/reef/v18.2.0.yaml @@ -0,0 +1,31 @@ +meta: +- desc: | + setup ceph/v18.2.0 + +tasks: +- install: + tag: v18.2.0 + exclude_packages: + - ceph-volume +- print: "**** done install task..." +- cephadm: + image: quay.io/ceph/ceph:v18.2.0 + roleless: true + compiled_cephadm_branch: reef + conf: + osd: + #set config option for which cls modules are allowed to be loaded / used + osd_class_load_list: "*" + osd_class_default_list: "*" +- print: "**** done end installing v18.2.0 cephadm ..." +- cephadm.shell: + host.a: + - ceph config set mgr mgr/cephadm/use_repo_digest true --force +- print: "**** done cephadm.shell ceph config set mgr..." +- cephadm.shell: + host.a: + - ceph orch status + - ceph orch ps + - ceph orch ls + - ceph orch host ls + - ceph orch device ls diff --git a/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/0-from/reef/v18.2.1.yaml b/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/0-from/reef/v18.2.1.yaml new file mode 100644 index 000000000..ce45d9ea9 --- /dev/null +++ b/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/0-from/reef/v18.2.1.yaml @@ -0,0 +1,31 @@ +meta: +- desc: | + setup ceph/v18.2.1 + +tasks: +- install: + tag: v18.2.1 + exclude_packages: + - ceph-volume +- print: "**** done install task..." +- cephadm: + image: quay.io/ceph/ceph:v18.2.1 + roleless: true + compiled_cephadm_branch: reef + conf: + osd: + #set config option for which cls modules are allowed to be loaded / used + osd_class_load_list: "*" + osd_class_default_list: "*" +- print: "**** done end installing v18.2.1 cephadm ..." +- cephadm.shell: + host.a: + - ceph config set mgr mgr/cephadm/use_repo_digest true --force +- print: "**** done cephadm.shell ceph config set mgr..." +- cephadm.shell: + host.a: + - ceph orch status + - ceph orch ps + - ceph orch ls + - ceph orch host ls + - ceph orch device ls diff --git a/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/2-client/fuse.yaml b/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/2-client/fuse.yaml new file mode 100644 index 000000000..5318fd1a9 --- /dev/null +++ b/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/2-client/fuse.yaml @@ -0,0 +1,3 @@ +tasks: +- ceph-fuse: +- print: "**** done client" diff --git a/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/2-client.yaml b/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/2-client/kclient.yaml index 92b9dda84..92b9dda84 100644 --- a/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/2-client.yaml +++ b/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/2-client/kclient.yaml diff --git a/qa/suites/fs/valgrind/mirror/overrides/ignorelist_health.yaml b/qa/suites/fs/valgrind/mirror/overrides/ignorelist_health.yaml new file mode 120000 index 000000000..4cb7d981d --- /dev/null +++ b/qa/suites/fs/valgrind/mirror/overrides/ignorelist_health.yaml @@ -0,0 +1 @@ +./.qa/cephfs/overrides/ignorelist_health.yaml
\ No newline at end of file diff --git a/qa/suites/fs/valgrind/mirror/overrides/whitelist_health.yaml b/qa/suites/fs/valgrind/mirror/overrides/whitelist_health.yaml deleted file mode 100644 index d40fa4cb8..000000000 --- a/qa/suites/fs/valgrind/mirror/overrides/whitelist_health.yaml +++ /dev/null @@ -1,14 +0,0 @@ -overrides: - ceph: - log-ignorelist: - - overall HEALTH_ - - \(FS_DEGRADED\) - - \(MDS_FAILED\) - - \(MDS_DEGRADED\) - - \(FS_WITH_FAILED_MDS\) - - \(MDS_DAMAGE\) - - \(MDS_ALL_DOWN\) - - \(MDS_UP_LESS_THAN_MAX\) - - \(FS_INLINE_DATA_DEPRECATED\) - - Reduced data availability - - Degraded data redundancy diff --git a/qa/suites/fs/workload/begin/3-modules.yaml b/qa/suites/fs/workload/begin/3-modules.yaml new file mode 120000 index 000000000..1eba706a5 --- /dev/null +++ b/qa/suites/fs/workload/begin/3-modules.yaml @@ -0,0 +1 @@ +.qa/cephfs/begin/3-modules.yaml
\ No newline at end of file diff --git a/qa/suites/fs/workload/ranks/1.yaml b/qa/suites/fs/workload/ranks/1.yaml index e69de29bb..f9e95daa9 100644 --- a/qa/suites/fs/workload/ranks/1.yaml +++ b/qa/suites/fs/workload/ranks/1.yaml @@ -0,0 +1,4 @@ +overrides: + ceph: + cephfs: + max_mds: 1 diff --git a/qa/suites/fs/workload/ranks/multi/balancer/automatic.yaml b/qa/suites/fs/workload/ranks/multi/balancer/automatic.yaml new file mode 100644 index 000000000..020eaa4bf --- /dev/null +++ b/qa/suites/fs/workload/ranks/multi/balancer/automatic.yaml @@ -0,0 +1,4 @@ +tasks: +- exec: + mon.a: + - ceph fs set cephfs balance_automate true diff --git a/qa/suites/fs/workload/ranks/multi/balancer/distributed.yaml.disabled b/qa/suites/fs/workload/ranks/multi/balancer/distributed.yaml.disabled new file mode 100644 index 000000000..be06d5186 --- /dev/null +++ b/qa/suites/fs/workload/ranks/multi/balancer/distributed.yaml.disabled @@ -0,0 +1,6 @@ +# distributed pins would be interesting if we had workloads on multiple clients. We do not yet. So it's disabled. +tasks: +- exec: + mon.a: + - ceph fs set cephfs balance_automate false + - ceph fs subvolumegroup pin cephfs qa distributed 1 diff --git a/qa/suites/fs/workload/ranks/multi/balancer/random.yaml b/qa/suites/fs/workload/ranks/multi/balancer/random.yaml new file mode 100644 index 000000000..977e83fc2 --- /dev/null +++ b/qa/suites/fs/workload/ranks/multi/balancer/random.yaml @@ -0,0 +1,10 @@ +overrides: + ceph: + conf: + mds: + mds_export_ephemeral_random_max: 0.10 +tasks: +- exec: + mon.a: + - ceph fs set cephfs balance_automate false + - ceph fs subvolumegroup pin cephfs qa random 0.10 diff --git a/qa/suites/fs/workload/tasks/3-snaps/yes.yaml b/qa/suites/fs/workload/tasks/3-snaps/yes.yaml index 598f7e215..69f53768d 100644 --- a/qa/suites/fs/workload/tasks/3-snaps/yes.yaml +++ b/qa/suites/fs/workload/tasks/3-snaps/yes.yaml @@ -1,3 +1,10 @@ +mgrmodules: + sequential: + - exec: + mon.a: + - ceph mgr module enable snap_schedule + - ceph config set mgr mgr/snap_schedule/allow_m_granularity true + - ceph config set mgr mgr/snap_schedule/dump_on_update true overrides: ceph: conf: @@ -12,11 +19,8 @@ overrides: tasks: - exec: mon.a: - - ceph mgr module enable snap_schedule - - ceph config set mgr mgr/snap_schedule/allow_m_granularity true - - ceph config set mgr mgr/snap_schedule/dump_on_update true - - ceph fs snap-schedule add --fs=cephfs --path=/ --snap_schedule=1M - - ceph fs snap-schedule retention add --fs=cephfs --path=/ --retention-spec-or-period=6M3h + - ceph fs snap-schedule add --fs=cephfs --path=/ --snap_schedule=1m + - ceph fs snap-schedule retention add --fs=cephfs --path=/ --retention-spec-or-period=6m3h - ceph fs snap-schedule status --fs=cephfs --path=/ - ceph fs snap-schedule list --fs=cephfs --path=/ --recursive=true - date +%s > START_TIME diff --git a/qa/suites/rbd/nbd/% b/qa/suites/krbd/mirror/% index e69de29bb..e69de29bb 100644 --- a/qa/suites/rbd/nbd/% +++ b/qa/suites/krbd/mirror/% diff --git a/qa/suites/rbd/nbd/.qa b/qa/suites/krbd/mirror/.qa index a602a0353..a602a0353 120000 --- a/qa/suites/rbd/nbd/.qa +++ b/qa/suites/krbd/mirror/.qa diff --git a/qa/suites/krbd/mirror/bluestore-bitmap.yaml b/qa/suites/krbd/mirror/bluestore-bitmap.yaml new file mode 120000 index 000000000..a59cf5175 --- /dev/null +++ b/qa/suites/krbd/mirror/bluestore-bitmap.yaml @@ -0,0 +1 @@ +.qa/objectstore/bluestore-bitmap.yaml
\ No newline at end of file diff --git a/qa/suites/rbd/nbd/cluster/.qa b/qa/suites/krbd/mirror/clusters/.qa index a602a0353..a602a0353 120000 --- a/qa/suites/rbd/nbd/cluster/.qa +++ b/qa/suites/krbd/mirror/clusters/.qa diff --git a/qa/suites/krbd/mirror/clusters/2-node.yaml b/qa/suites/krbd/mirror/clusters/2-node.yaml new file mode 100644 index 000000000..e5036ea72 --- /dev/null +++ b/qa/suites/krbd/mirror/clusters/2-node.yaml @@ -0,0 +1,17 @@ +meta: +- desc: 2 ceph clusters with 1 mon, 1 mgr and 3 osd each +roles: +- - cluster1.mon.a + - cluster1.mgr.x + - cluster1.osd.0 + - cluster1.osd.1 + - cluster1.osd.2 +- - cluster2.mon.a + - cluster2.mgr.x + - cluster2.osd.0 + - cluster2.osd.1 + - cluster2.osd.2 + - cluster1.client.mirror + - cluster1.client.mirror.0 + - cluster2.client.mirror + - cluster2.client.mirror.0 diff --git a/qa/suites/krbd/mirror/conf.yaml b/qa/suites/krbd/mirror/conf.yaml new file mode 100644 index 000000000..eb6d72a80 --- /dev/null +++ b/qa/suites/krbd/mirror/conf.yaml @@ -0,0 +1,6 @@ +overrides: + ceph: + conf: + global: + mon warn on pool no app: false + ms die on skipped message: false diff --git a/qa/suites/rbd/nbd/workloads/.qa b/qa/suites/krbd/mirror/install/.qa index a602a0353..a602a0353 120000 --- a/qa/suites/rbd/nbd/workloads/.qa +++ b/qa/suites/krbd/mirror/install/.qa diff --git a/qa/suites/krbd/mirror/install/ceph.yaml b/qa/suites/krbd/mirror/install/ceph.yaml new file mode 100644 index 000000000..08bb1faa6 --- /dev/null +++ b/qa/suites/krbd/mirror/install/ceph.yaml @@ -0,0 +1,14 @@ +tasks: +- install: + extra_packages: + - rbd-mirror +- ceph: + cluster: cluster1 +- ceph: + cluster: cluster2 +- rbd-mirror: + client: cluster1.client.mirror.0 + thrash: False +- rbd-mirror: + client: cluster2.client.mirror.0 + thrash: False diff --git a/qa/suites/krbd/mirror/ms_mode$/.qa b/qa/suites/krbd/mirror/ms_mode$/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/krbd/mirror/ms_mode$/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/krbd/mirror/ms_mode$/crc-rxbounce.yaml b/qa/suites/krbd/mirror/ms_mode$/crc-rxbounce.yaml new file mode 100644 index 000000000..4d27d0113 --- /dev/null +++ b/qa/suites/krbd/mirror/ms_mode$/crc-rxbounce.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + client: + rbd default map options: ms_mode=crc,rxbounce diff --git a/qa/suites/krbd/mirror/ms_mode$/crc.yaml b/qa/suites/krbd/mirror/ms_mode$/crc.yaml new file mode 100644 index 000000000..3b072578f --- /dev/null +++ b/qa/suites/krbd/mirror/ms_mode$/crc.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + client: + rbd default map options: ms_mode=crc diff --git a/qa/suites/krbd/mirror/ms_mode$/legacy-rxbounce.yaml b/qa/suites/krbd/mirror/ms_mode$/legacy-rxbounce.yaml new file mode 100644 index 000000000..244e45cbc --- /dev/null +++ b/qa/suites/krbd/mirror/ms_mode$/legacy-rxbounce.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + client: + rbd default map options: ms_mode=legacy,rxbounce diff --git a/qa/suites/krbd/mirror/ms_mode$/legacy.yaml b/qa/suites/krbd/mirror/ms_mode$/legacy.yaml new file mode 100644 index 000000000..0048dcb0c --- /dev/null +++ b/qa/suites/krbd/mirror/ms_mode$/legacy.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + client: + rbd default map options: ms_mode=legacy diff --git a/qa/suites/krbd/mirror/ms_mode$/secure.yaml b/qa/suites/krbd/mirror/ms_mode$/secure.yaml new file mode 100644 index 000000000..a735db18d --- /dev/null +++ b/qa/suites/krbd/mirror/ms_mode$/secure.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + client: + rbd default map options: ms_mode=secure diff --git a/qa/suites/krbd/mirror/tasks/.qa b/qa/suites/krbd/mirror/tasks/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/krbd/mirror/tasks/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/krbd/mirror/tasks/compare-mirror-image-alternate-primary.yaml b/qa/suites/krbd/mirror/tasks/compare-mirror-image-alternate-primary.yaml new file mode 100644 index 000000000..42ee5a274 --- /dev/null +++ b/qa/suites/krbd/mirror/tasks/compare-mirror-image-alternate-primary.yaml @@ -0,0 +1,14 @@ +overrides: + install: + ceph: + extra_system_packages: + - pv +tasks: +- workunit: + clients: + cluster1.client.mirror: + - rbd/compare_mirror_image_alternate_primary.sh + env: + RBD_DEVICE_TYPE: 'krbd' + RBD_MIRROR_USE_RBD_MIRROR: '1' + timeout: 3h diff --git a/qa/suites/krbd/mirror/tasks/compare-mirror-images.yaml b/qa/suites/krbd/mirror/tasks/compare-mirror-images.yaml new file mode 100644 index 000000000..30d147de9 --- /dev/null +++ b/qa/suites/krbd/mirror/tasks/compare-mirror-images.yaml @@ -0,0 +1,14 @@ +overrides: + install: + ceph: + extra_system_packages: + - pv +tasks: +- workunit: + clients: + cluster1.client.mirror: + - rbd/compare_mirror_images.sh + env: + RBD_DEVICE_TYPE: 'krbd' + RBD_MIRROR_USE_RBD_MIRROR: '1' + timeout: 3h diff --git a/qa/suites/netsplit/ceph.yaml b/qa/suites/netsplit/ceph.yaml index ddf54b3a3..7bdb78c9e 100644 --- a/qa/suites/netsplit/ceph.yaml +++ b/qa/suites/netsplit/ceph.yaml @@ -11,7 +11,7 @@ overrides: mon osdmap full prune interval: 2 mon osdmap full prune txsize: 2 # thrashing monitors may make mgr have trouble w/ its keepalive - log-whitelist: + log-ignorelist: - overall HEALTH_ - \(MGR_DOWN\) - \(MON_DOWN\) diff --git a/qa/suites/rbd/nbd/cluster/+ b/qa/suites/orch/cephadm/no-agent-workunits/% index e69de29bb..e69de29bb 100644 --- a/qa/suites/rbd/nbd/cluster/+ +++ b/qa/suites/orch/cephadm/no-agent-workunits/% diff --git a/qa/suites/orch/cephadm/no-agent-workunits/.qa b/qa/suites/orch/cephadm/no-agent-workunits/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/orch/cephadm/no-agent-workunits/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/orch/cephadm/no-agent-workunits/0-distro b/qa/suites/orch/cephadm/no-agent-workunits/0-distro new file mode 120000 index 000000000..4b341719d --- /dev/null +++ b/qa/suites/orch/cephadm/no-agent-workunits/0-distro @@ -0,0 +1 @@ +.qa/distros/container-hosts
\ No newline at end of file diff --git a/qa/suites/orch/cephadm/no-agent-workunits/mon_election b/qa/suites/orch/cephadm/no-agent-workunits/mon_election new file mode 120000 index 000000000..3f331e621 --- /dev/null +++ b/qa/suites/orch/cephadm/no-agent-workunits/mon_election @@ -0,0 +1 @@ +.qa/mon_election
\ No newline at end of file diff --git a/qa/suites/orch/cephadm/workunits/task/test_adoption.yaml b/qa/suites/orch/cephadm/no-agent-workunits/task/test_adoption.yaml index e04fc1eea..e04fc1eea 100644 --- a/qa/suites/orch/cephadm/workunits/task/test_adoption.yaml +++ b/qa/suites/orch/cephadm/no-agent-workunits/task/test_adoption.yaml diff --git a/qa/suites/orch/cephadm/no-agent-workunits/task/test_cephadm_timeout.yaml b/qa/suites/orch/cephadm/no-agent-workunits/task/test_cephadm_timeout.yaml new file mode 100644 index 000000000..24b53d029 --- /dev/null +++ b/qa/suites/orch/cephadm/no-agent-workunits/task/test_cephadm_timeout.yaml @@ -0,0 +1,13 @@ +roles: +- - host.a + - mon.a + - mgr.a + - osd.0 + - client.0 +tasks: +- install: +- cephadm: +- workunit: + clients: + client.0: + - cephadm/test_cephadm_timeout.py
\ No newline at end of file diff --git a/qa/suites/orch/cephadm/workunits/task/test_orch_cli.yaml b/qa/suites/orch/cephadm/no-agent-workunits/task/test_orch_cli.yaml index ec65fb116..ec65fb116 100644 --- a/qa/suites/orch/cephadm/workunits/task/test_orch_cli.yaml +++ b/qa/suites/orch/cephadm/no-agent-workunits/task/test_orch_cli.yaml diff --git a/qa/suites/orch/cephadm/workunits/task/test_orch_cli_mon.yaml b/qa/suites/orch/cephadm/no-agent-workunits/task/test_orch_cli_mon.yaml index 2a33dc839..2a33dc839 100644 --- a/qa/suites/orch/cephadm/workunits/task/test_orch_cli_mon.yaml +++ b/qa/suites/orch/cephadm/no-agent-workunits/task/test_orch_cli_mon.yaml diff --git a/qa/suites/orch/cephadm/workunits/task/test_extra_daemon_features.yaml b/qa/suites/orch/cephadm/workunits/task/test_extra_daemon_features.yaml new file mode 100644 index 000000000..b5e0ec98f --- /dev/null +++ b/qa/suites/orch/cephadm/workunits/task/test_extra_daemon_features.yaml @@ -0,0 +1,74 @@ +roles: +- - host.a + - mon.a + - mgr.a + - osd.0 +- - host.b + - mon.b + - mgr.b + - osd.1 +tasks: +- install: +- cephadm: +- exec: + all-hosts: + - mkdir /etc/cephadm_testing +- cephadm.apply: + specs: + - service_type: mon + placement: + host_pattern: '*' + extra_container_args: + - "--cpus=2" + extra_entrypoint_args: + - "--debug_ms 10" + - service_type: container + service_id: foo + placement: + host_pattern: '*' + spec: + image: "quay.io/fedora/fedora:latest" + entrypoint: "bash" + extra_container_args: + - "-v" + - "/etc/cephadm_testing:/root/cephadm_testing" + extra_entrypoint_args: + - "/root/write_thing_to_file.sh" + - "-c" + - "testing_custom_containers" + - "-o" + - "/root/cephadm_testing/testing.txt" + custom_configs: + - mount_path: "/root/write_thing_to_file.sh" + content: | + while getopts "o:c:" opt; do + case ${opt} in + o ) + OUT_FILE=${OPTARG} + ;; + c ) + CONTENT=${OPTARG} + esac + done + echo $CONTENT > $OUT_FILE + sleep infinity +- cephadm.wait_for_service: + service: mon +- cephadm.wait_for_service: + service: container.foo +- exec: + host.a: + - | + set -ex + FSID=$(/home/ubuntu/cephtest/cephadm shell -- ceph fsid) + sleep 60 + # check extra container and entrypoint args written to mon unit run file + grep "\-\-cpus=2" /var/lib/ceph/$FSID/mon.*/unit.run + grep "\-\-debug_ms 10" /var/lib/ceph/$FSID/mon.*/unit.run + # check that custom container properly wrote content to file. + # This requires the custom config, extra container args, and + # entrypoint args to all be working in order for this to have + # been written. The container entrypoint was set up with custom_configs, + # the content and where to write to with the entrypoint args, and the mounting + # of the /etc/cephadm_testing dir with extra container args + grep "testing_custom_containers" /etc/cephadm_testing/testing.txt diff --git a/qa/suites/orch/cephadm/workunits/task/test_host_drain.yaml b/qa/suites/orch/cephadm/workunits/task/test_host_drain.yaml new file mode 100644 index 000000000..c195bc052 --- /dev/null +++ b/qa/suites/orch/cephadm/workunits/task/test_host_drain.yaml @@ -0,0 +1,72 @@ +roles: +- - host.a + - mon.a + - mgr.a + - osd.0 + - osd.1 +- - host.b + - mon.b + - mgr.b + - osd.2 + - osd.3 +- - host.c + - mon.c + - osd.4 + - osd.5 +tasks: +- install: +- cephadm: +- cephadm.shell: + host.a: + - | + set -ex + HOSTNAMES=$(ceph orch host ls --format json | jq -r '.[] | .hostname') + for host in $HOSTNAMES; do + # find the hostname for "host.c" which will have no mgr + HAS_MGRS=$(ceph orch ps --hostname ${host} --format json | jq 'any(.daemon_type == "mgr")') + if [ "$HAS_MGRS" == "false" ]; then + HOST_C="${host}" + fi + done + # One last thing to worry about before draining the host + # is that the teuthology test tends to put the explicit + # hostnames in the placement for the mon service. + # We want to make sure we can drain without providing + # --force and there is a check for the host being removed + # being listed explicitly in the placements. Therefore, + # we should remove it from the mon placement. + ceph orch ls mon --export > mon.yaml + sed /"$HOST_C"/d mon.yaml > mon_adjusted.yaml + ceph orch apply -i mon_adjusted.yaml + # now drain that host + ceph orch host drain $HOST_C --zap-osd-devices + # wait for drain to complete + HOST_C_DAEMONS=$(ceph orch ps --hostname $HOST_C) + while [ "$HOST_C_DAEMONS" != "No daemons reported" ]; do + sleep 15 + HOST_C_DAEMONS=$(ceph orch ps --hostname $HOST_C) + done + # we want to check the ability to remove the host from + # the CRUSH map, so we should first verify the host is in + # the CRUSH map. + ceph osd getcrushmap -o compiled-crushmap + crushtool -d compiled-crushmap -o crushmap.txt + CRUSH_MAP=$(cat crushmap.txt) + if ! grep -q "$HOST_C" <<< "$CRUSH_MAP"; then + printf "Expected to see $HOST_C in CRUSH map. Saw:\n\n$CRUSH_MAP" + exit 1 + fi + # If the drain was successful, we should be able to remove the + # host without force with no issues. If there are still daemons + # we will get a response telling us to drain the host and a + # non-zero return code + ceph orch host rm $HOST_C --rm-crush-entry + # verify we've successfully removed the host from the CRUSH map + sleep 30 + ceph osd getcrushmap -o compiled-crushmap + crushtool -d compiled-crushmap -o crushmap.txt + CRUSH_MAP=$(cat crushmap.txt) + if grep -q "$HOST_C" <<< "$CRUSH_MAP"; then + printf "Saw $HOST_C in CRUSH map after it should have been removed.\n\n$CRUSH_MAP" + exit 1 + fi diff --git a/qa/suites/rados/singleton/all/ec-inconsistent-hinfo.yaml b/qa/suites/rados/singleton/all/ec-inconsistent-hinfo.yaml index 31724f9e8..84abb702c 100644 --- a/qa/suites/rados/singleton/all/ec-inconsistent-hinfo.yaml +++ b/qa/suites/rados/singleton/all/ec-inconsistent-hinfo.yaml @@ -30,6 +30,7 @@ tasks: - slow request - unfound - \(POOL_APP_NOT_ENABLED\) + - enough copies available conf: osd: osd min pg log entries: 5 diff --git a/qa/suites/rados/singleton/all/mon-config.yaml b/qa/suites/rados/singleton/all/mon-config.yaml index ab1eb81b0..5e36a34a6 100644 --- a/qa/suites/rados/singleton/all/mon-config.yaml +++ b/qa/suites/rados/singleton/all/mon-config.yaml @@ -6,7 +6,7 @@ roles: - osd.0 - osd.1 - osd.2 - - client.0 + - client.rgw openstack: - volumes: # attached to each instance count: 3 @@ -18,6 +18,7 @@ tasks: - sudo ceph config set mgr mgr_pool false --force log-ignorelist: - \(POOL_APP_NOT_ENABLED\) +- rgw: [client.rgw] - workunit: clients: all: diff --git a/qa/suites/rbd/device/% b/qa/suites/rbd/device/% new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/qa/suites/rbd/device/% diff --git a/qa/suites/rbd/device/.qa b/qa/suites/rbd/device/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/rbd/device/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/rbd/nbd/base b/qa/suites/rbd/device/base index fd10a859d..fd10a859d 120000 --- a/qa/suites/rbd/nbd/base +++ b/qa/suites/rbd/device/base diff --git a/qa/suites/rbd/device/cluster/+ b/qa/suites/rbd/device/cluster/+ new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/qa/suites/rbd/device/cluster/+ diff --git a/qa/suites/rbd/device/cluster/.qa b/qa/suites/rbd/device/cluster/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/rbd/device/cluster/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/rbd/nbd/cluster/fixed-3.yaml b/qa/suites/rbd/device/cluster/fixed-3.yaml index 182589152..182589152 100644 --- a/qa/suites/rbd/nbd/cluster/fixed-3.yaml +++ b/qa/suites/rbd/device/cluster/fixed-3.yaml diff --git a/qa/suites/rbd/nbd/cluster/openstack.yaml b/qa/suites/rbd/device/cluster/openstack.yaml index 48becbb83..48becbb83 120000 --- a/qa/suites/rbd/nbd/cluster/openstack.yaml +++ b/qa/suites/rbd/device/cluster/openstack.yaml diff --git a/qa/suites/rbd/nbd/conf b/qa/suites/rbd/device/conf index 4bc0fe86c..4bc0fe86c 120000 --- a/qa/suites/rbd/nbd/conf +++ b/qa/suites/rbd/device/conf diff --git a/qa/suites/rbd/nbd/msgr-failures b/qa/suites/rbd/device/msgr-failures index 03689aa44..03689aa44 120000 --- a/qa/suites/rbd/nbd/msgr-failures +++ b/qa/suites/rbd/device/msgr-failures diff --git a/qa/suites/rbd/nbd/objectstore b/qa/suites/rbd/device/objectstore index c40bd3261..c40bd3261 120000 --- a/qa/suites/rbd/nbd/objectstore +++ b/qa/suites/rbd/device/objectstore diff --git a/qa/suites/rbd/nbd/supported-random-distro$ b/qa/suites/rbd/device/supported-random-distro$ index 0862b4457..0862b4457 120000 --- a/qa/suites/rbd/nbd/supported-random-distro$ +++ b/qa/suites/rbd/device/supported-random-distro$ diff --git a/qa/suites/rbd/nbd/thrashers b/qa/suites/rbd/device/thrashers index f461dadc3..f461dadc3 120000 --- a/qa/suites/rbd/nbd/thrashers +++ b/qa/suites/rbd/device/thrashers diff --git a/qa/suites/rbd/nbd/thrashosds-health.yaml b/qa/suites/rbd/device/thrashosds-health.yaml index 9124eb1aa..9124eb1aa 120000 --- a/qa/suites/rbd/nbd/thrashosds-health.yaml +++ b/qa/suites/rbd/device/thrashosds-health.yaml diff --git a/qa/suites/rbd/device/workloads/.qa b/qa/suites/rbd/device/workloads/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/rbd/device/workloads/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/rbd/device/workloads/diff-continuous-krbd.yaml b/qa/suites/rbd/device/workloads/diff-continuous-krbd.yaml new file mode 100644 index 000000000..5907718d5 --- /dev/null +++ b/qa/suites/rbd/device/workloads/diff-continuous-krbd.yaml @@ -0,0 +1,12 @@ +overrides: + install: + ceph: + extra_system_packages: + - pv +tasks: +- workunit: + clients: + all: + - rbd/diff_continuous.sh + env: + RBD_DEVICE_TYPE: "krbd" diff --git a/qa/suites/rbd/nbd/workloads/rbd_nbd_diff_continuous.yaml b/qa/suites/rbd/device/workloads/diff-continuous-nbd.yaml index e0a7ebe33..e0a7ebe33 100644 --- a/qa/suites/rbd/nbd/workloads/rbd_nbd_diff_continuous.yaml +++ b/qa/suites/rbd/device/workloads/diff-continuous-nbd.yaml diff --git a/qa/suites/rbd/nbd/workloads/rbd_fsx_nbd.yaml b/qa/suites/rbd/device/workloads/rbd_fsx_nbd.yaml index b5737671f..b5737671f 100644 --- a/qa/suites/rbd/nbd/workloads/rbd_fsx_nbd.yaml +++ b/qa/suites/rbd/device/workloads/rbd_fsx_nbd.yaml diff --git a/qa/suites/rbd/nbd/workloads/rbd_nbd.yaml b/qa/suites/rbd/device/workloads/rbd_nbd.yaml index ededea024..ededea024 100644 --- a/qa/suites/rbd/nbd/workloads/rbd_nbd.yaml +++ b/qa/suites/rbd/device/workloads/rbd_nbd.yaml diff --git a/qa/suites/rbd/mirror/workloads/compare-mirror-image-alternate-primary-krbd.yaml b/qa/suites/rbd/mirror/workloads/compare-mirror-image-alternate-primary-krbd.yaml new file mode 100644 index 000000000..771400d01 --- /dev/null +++ b/qa/suites/rbd/mirror/workloads/compare-mirror-image-alternate-primary-krbd.yaml @@ -0,0 +1,13 @@ +overrides: + install: + ceph: + extra_system_packages: + - pv +tasks: +- workunit: + clients: + cluster1.client.mirror: + - rbd/compare_mirror_image_alternate_primary.sh + env: + RBD_DEVICE_TYPE: 'krbd' + timeout: 3h diff --git a/qa/suites/rbd/mirror/workloads/compare-mirror-image-alternate-primary-nbd.yaml b/qa/suites/rbd/mirror/workloads/compare-mirror-image-alternate-primary-nbd.yaml new file mode 100644 index 000000000..e87d0e8ce --- /dev/null +++ b/qa/suites/rbd/mirror/workloads/compare-mirror-image-alternate-primary-nbd.yaml @@ -0,0 +1,15 @@ +overrides: + install: + ceph: + extra_packages: + - rbd-nbd + extra_system_packages: + - pv +tasks: +- workunit: + clients: + cluster1.client.mirror: + - rbd/compare_mirror_image_alternate_primary.sh + env: + RBD_DEVICE_TYPE: 'nbd' + timeout: 3h diff --git a/qa/suites/rbd/mirror/workloads/compare-mirror-images-krbd.yaml b/qa/suites/rbd/mirror/workloads/compare-mirror-images-krbd.yaml new file mode 100644 index 000000000..fc161987f --- /dev/null +++ b/qa/suites/rbd/mirror/workloads/compare-mirror-images-krbd.yaml @@ -0,0 +1,13 @@ +overrides: + install: + ceph: + extra_system_packages: + - pv +tasks: +- workunit: + clients: + cluster1.client.mirror: + - rbd/compare_mirror_images.sh + env: + RBD_DEVICE_TYPE: 'krbd' + timeout: 3h diff --git a/qa/suites/rbd/mirror/workloads/compare-mirror-images-nbd.yaml b/qa/suites/rbd/mirror/workloads/compare-mirror-images-nbd.yaml new file mode 100644 index 000000000..ed02ed257 --- /dev/null +++ b/qa/suites/rbd/mirror/workloads/compare-mirror-images-nbd.yaml @@ -0,0 +1,15 @@ +overrides: + install: + ceph: + extra_packages: + - rbd-nbd + extra_system_packages: + - pv +tasks: +- workunit: + clients: + cluster1.client.mirror: + - rbd/compare_mirror_images.sh + env: + RBD_DEVICE_TYPE: 'nbd' + timeout: 3h diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-parallel/% b/qa/suites/upgrade/reef-p2p/reef-p2p-parallel/% new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/qa/suites/upgrade/reef-p2p/reef-p2p-parallel/% diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-parallel/point-to-point-upgrade.yaml b/qa/suites/upgrade/reef-p2p/reef-p2p-parallel/point-to-point-upgrade.yaml new file mode 100644 index 000000000..443b89fcf --- /dev/null +++ b/qa/suites/upgrade/reef-p2p/reef-p2p-parallel/point-to-point-upgrade.yaml @@ -0,0 +1,173 @@ +meta: +- desc: | + Run ceph on two nodes, using one of them as a client, + with a separate client-only node. + Use xfs beneath the osds. + install ceph/reef v18.2.1 and the v18.2.x point versions + run workload and upgrade-sequence in parallel + (every point release should be tested) + run workload and upgrade-sequence in parallel + install ceph/reef latest version + run workload and upgrade-sequence in parallel + Overall upgrade path is - reef-latest.point-1 => reef-latest.point => reef-latest +overrides: + ceph: + log-ignorelist: + - reached quota + - scrub + - osd_map_max_advance + - wrongly marked + - FS_DEGRADED + - POOL_APP_NOT_ENABLED + - CACHE_POOL_NO_HIT_SET + - POOL_FULL + - SMALLER_PG + - pool\(s\) full + - OSD_DOWN + - missing hit_sets + - CACHE_POOL_NEAR_FULL + - PG_AVAILABILITY + - PG_DEGRADED + - application not enabled + - cache pools at or near target size + - filesystem is degraded + - OBJECT_MISPLACED + ### ref: https://tracker.ceph.com/issues/40251 + #removed see ^ - failed to encode map + + fs: xfs + + conf: + global: + mon_warn_on_pool_no_app: false + mon_mds_skip_sanity: true + mon: + mon debug unsafe allow tier with nonempty snaps: true + osd: + osd map max advance: 1000 + osd_class_default_list: "*" + osd_class_load_list: "*" + client: + rgw_crypt_require_ssl: false + rgw crypt s3 kms backend: testing + rgw crypt s3 kms encryption keys: testkey-1=YmluCmJvb3N0CmJvb3N0LWJ1aWxkCmNlcGguY29uZgo= testkey-2=aWIKTWFrZWZpbGUKbWFuCm91dApzcmMKVGVzdGluZwo= +roles: +- - mon.a + - mds.a + - osd.0 + - osd.1 + - osd.2 + - mgr.x +- - mon.b + - mon.c + - osd.3 + - osd.4 + - osd.5 + - client.0 +- - client.1 +openstack: +- volumes: # attached to each instance + count: 3 + size: 30 # GB +tasks: +- print: "**** done reef about to install v18.2.0 " +- install: + tag: v18.2.0 + # line below can be removed its from jewel test + #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev', 'librgw2'] +- print: "**** done v18.2.0 install" +- ceph: + fs: xfs + add_osds_to_crush: true +- print: "**** done ceph xfs" +- sequential: + - workload +- print: "**** done workload v18.2.0" + + +####### upgrade to v18.2.1 +- install.upgrade: + #exclude_packages: ['ceph-mgr','libcephfs2','libcephfs-devel','libcephfs-dev'] + mon.a: + tag: v18.2.1 + mon.b: + tag: v18.2.1 +- parallel: + - workload_reef + - upgrade-sequence_reef +- print: "**** done parallel reef v18.2.1" + +#### upgrade to latest reef +- install.upgrade: + mon.a: + mon.b: +- parallel: + - workload_reef + - upgrade-sequence_reef +- print: "**** done parallel reef branch" + +####################### +workload: + sequential: + - workunit: + clients: + client.0: + - suites/blogbench.sh + +workload_reef: + full_sequential: + - workunit: + branch: reef + # tag: v18.2.1 + clients: + client.1: + - rados/test.sh + - cls + env: + CLS_RBD_GTEST_FILTER: '*:-TestClsRbd.mirror_snapshot' + - print: "**** done rados/test.sh & cls workload_reef" + - sequential: + - rgw: [client.0] + - print: "**** done rgw workload_reef" + - rbd_fsx: + clients: [client.0] + size: 134217728 + - print: "**** done rbd_fsx workload_reef" + +upgrade-sequence_reef: + sequential: + - print: "**** done branch: reef install.upgrade" + - ceph.restart: [mds.a] + - sleep: + duration: 60 + - ceph.restart: [osd.0] + - sleep: + duration: 30 + - ceph.restart: [osd.1] + - sleep: + duration: 30 + - ceph.restart: [osd.2] + - sleep: + duration: 30 + - ceph.restart: [osd.3] + - sleep: + duration: 30 + - ceph.restart: [osd.4] + - sleep: + duration: 30 + - ceph.restart: [osd.5] + - sleep: + duration: 60 + - ceph.restart: [mgr.x] + - sleep: + duration: 60 + - ceph.restart: [mon.a] + - sleep: + duration: 60 + - ceph.restart: [mon.b] + - sleep: + duration: 60 + - ceph.restart: [mon.c] + - sleep: + duration: 60 + - print: "**** done ceph.restart all reef branch mds/osd/mon" diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-parallel/supported-all-distro/centos_8.yaml b/qa/suites/upgrade/reef-p2p/reef-p2p-parallel/supported-all-distro/centos_8.yaml new file mode 120000 index 000000000..bb4a6aaf3 --- /dev/null +++ b/qa/suites/upgrade/reef-p2p/reef-p2p-parallel/supported-all-distro/centos_8.yaml @@ -0,0 +1 @@ +../../../../../distros/supported-all-distro/centos_8.yaml
\ No newline at end of file diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-parallel/supported-all-distro/ubuntu_latest.yaml b/qa/suites/upgrade/reef-p2p/reef-p2p-parallel/supported-all-distro/ubuntu_latest.yaml new file mode 100644 index 000000000..f20398230 --- /dev/null +++ b/qa/suites/upgrade/reef-p2p/reef-p2p-parallel/supported-all-distro/ubuntu_latest.yaml @@ -0,0 +1,2 @@ +os_type: ubuntu +os_version: "20.04" diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/% b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/% new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/% diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/0-cluster/+ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/0-cluster/+ new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/0-cluster/+ diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/0-cluster/openstack.yaml b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/0-cluster/openstack.yaml new file mode 100644 index 000000000..5caffc353 --- /dev/null +++ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/0-cluster/openstack.yaml @@ -0,0 +1,6 @@ +openstack: + - machine: + disk: 100 # GB + - volumes: # attached to each instance + count: 4 + size: 30 # GB diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/0-cluster/start.yaml b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/0-cluster/start.yaml new file mode 100644 index 000000000..1271edd8b --- /dev/null +++ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/0-cluster/start.yaml @@ -0,0 +1,33 @@ +meta: +- desc: | + Run ceph on two nodes, + with a separate client-only node. + Use xfs beneath the osds. +overrides: + ceph: + fs: xfs + log-ignorelist: + - overall HEALTH_ + - \(MON_DOWN\) + - \(MGR_DOWN\) + ### ref: https://tracker.ceph.com/issues/40251 + #removed see ^ - failed to encode map + conf: + global: + enable experimental unrecoverable data corrupting features: "*" + mon: + mon warn on osd down out interval zero: false +roles: +- - mon.a + - mon.b + - mon.c + - mgr.x + - osd.0 + - osd.1 + - osd.2 + - osd.3 +- - osd.4 + - osd.5 + - osd.6 + - osd.7 +- - client.0 diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/1-ceph-install/reef.yaml b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/1-ceph-install/reef.yaml new file mode 100644 index 000000000..0c7db6ae4 --- /dev/null +++ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/1-ceph-install/reef.yaml @@ -0,0 +1,21 @@ +meta: +- desc: | + install ceph/reef v18.2.0 + Overall upgrade path is - reef-latest.point -1 => reef-latest +tasks: +- install: + tag: v18.2.0 + exclude_packages: ['librados3'] + extra_packages: ['librados2'] +- print: "**** done install reef v18.2.0" +- ceph: +- exec: + osd.0: + - ceph osd require-osd-release reef + - ceph osd set-require-min-compat-client reef +- print: "**** done ceph" +overrides: + ceph: + conf: + mon: + mon warn on osd down out interval zero: false diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/1.1.short_pg_log.yaml b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/1.1.short_pg_log.yaml new file mode 100644 index 000000000..20cc101de --- /dev/null +++ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/1.1.short_pg_log.yaml @@ -0,0 +1,6 @@ +overrides: + ceph: + conf: + global: + osd_min_pg_log_entries: 1 + osd_max_pg_log_entries: 2 diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/2-partial-upgrade/firsthalf.yaml b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/2-partial-upgrade/firsthalf.yaml new file mode 100644 index 000000000..02ba5c1bb --- /dev/null +++ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/2-partial-upgrade/firsthalf.yaml @@ -0,0 +1,13 @@ +meta: +- desc: | + install upgrade ceph/-x on one node only + 1st half + restart : osd.0,1,2,3 +tasks: +- install.upgrade: + osd.0: +- print: "**** done install.upgrade osd.0" +- ceph.restart: + daemons: [mon.a,mon.b,mon.c,mgr.x,osd.0,osd.1,osd.2,osd.3] + mon-health-to-clog: false +- print: "**** done ceph.restart 1st half" diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/3-thrash/default.yaml b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/3-thrash/default.yaml new file mode 100644 index 000000000..c739d8fea --- /dev/null +++ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/3-thrash/default.yaml @@ -0,0 +1,27 @@ +meta: +- desc: | + randomly kill and revive osd + small chance to increase the number of pgs +overrides: + ceph: + log-ignorelist: + - but it is still running + - wrongly marked me down + - objects unfound and apparently lost + - log bound mismatch + ### ref: https://tracker.ceph.com/issues/40251 + - failed to encode map +tasks: +- parallel: + - stress-tasks +stress-tasks: +- thrashosds: + timeout: 1200 + chance_pgnum_grow: 1 + chance_pgpnum_fix: 1 + chance_thrash_cluster_full: 0 + chance_thrash_pg_upmap: 0 + chance_thrash_pg_upmap_items: 0 + disable_objectstore_tool_tests: true + chance_force_recovery: 0 +- print: "**** done thrashosds 3-thrash" diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/+ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/+ new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/+ diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/fsx.yaml b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/fsx.yaml new file mode 100644 index 000000000..fd4081f23 --- /dev/null +++ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/fsx.yaml @@ -0,0 +1,8 @@ +meta: +- desc: | + run basic fsx tests for rbd +stress-tasks: +- rbd_fsx: + clients: [client.0] + size: 134217728 +- print: "**** done rbd_fsx 4-workload" diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/radosbench.yaml b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/radosbench.yaml new file mode 100644 index 000000000..c545936c0 --- /dev/null +++ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/radosbench.yaml @@ -0,0 +1,52 @@ +meta: +- desc: | + run randomized correctness test for rados operations + generate write load with rados bench +stress-tasks: +- full_sequential: + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 + - radosbench: + clients: [client.0] + time: 90 +- print: "**** done radosbench 4-workload" diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/rbd-cls.yaml b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/rbd-cls.yaml new file mode 100644 index 000000000..c0445533d --- /dev/null +++ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/rbd-cls.yaml @@ -0,0 +1,12 @@ +meta: +- desc: | + run basic cls tests for rbd +stress-tasks: +- workunit: + branch: reef + clients: + client.0: + - cls/test_cls_rbd.sh + env: + CLS_RBD_GTEST_FILTER: '*:-TestClsRbd.mirror_snapshot' +- print: "**** done cls/test_cls_rbd.sh 4-workload" diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/rbd-import-export.yaml b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/rbd-import-export.yaml new file mode 100644 index 000000000..a4bea35a4 --- /dev/null +++ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/rbd-import-export.yaml @@ -0,0 +1,12 @@ +meta: +- desc: | + run basic import/export cli tests for rbd +stress-tasks: +- workunit: + branch: reef + clients: + client.0: + - rbd/import_export.sh + env: + RBD_CREATE_ARGS: --new-format +- print: "**** done rbd/import_export.sh 4-workload" diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/rbd_api.yaml b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/rbd_api.yaml new file mode 100644 index 000000000..025616655 --- /dev/null +++ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/rbd_api.yaml @@ -0,0 +1,18 @@ +meta: +- desc: | + librbd C and C++ api tests +overrides: + ceph: + log-ignorelist: + - overall HEALTH_ + - \(CACHE_POOL_NO_HIT_SET\) + - \(POOL_APP_NOT_ENABLED\) + - is full \(reached quota + - \(POOL_FULL\) +stress-tasks: +- workunit: + branch: reef + clients: + client.0: + - rbd/test_librbd.sh +- print: "**** done rbd/test_librbd.sh 4-workload" diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/readwrite.yaml b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/readwrite.yaml new file mode 100644 index 000000000..456868998 --- /dev/null +++ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/readwrite.yaml @@ -0,0 +1,16 @@ +meta: +- desc: | + randomized correctness test for rados operations on a replicated pool, + using only reads, writes, and deletes +stress-tasks: +- full_sequential: + - rados: + clients: [client.0] + ops: 4000 + objects: 500 + write_append_excl: false + op_weights: + read: 45 + write: 45 + delete: 10 +- print: "**** done rados/readwrite 4-workload" diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/snaps-few-objects.yaml b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/snaps-few-objects.yaml new file mode 100644 index 000000000..ae232d867 --- /dev/null +++ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/4-workload/snaps-few-objects.yaml @@ -0,0 +1,18 @@ +meta: +- desc: | + randomized correctness test for rados operations on a replicated pool with snapshot operations +stress-tasks: +- full_sequential: + - rados: + clients: [client.0] + ops: 4000 + objects: 50 + write_append_excl: false + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 +- print: "**** done rados/snaps-few-objects 4-workload" diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/5-finish-upgrade.yaml b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/5-finish-upgrade.yaml new file mode 100644 index 000000000..803737c72 --- /dev/null +++ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/5-finish-upgrade.yaml @@ -0,0 +1,8 @@ +tasks: +- install.upgrade: + osd.4: + client.0: +- ceph.restart: + daemons: [osd.4, osd.5, osd.6, osd.7] + wait-for-healthy: false + wait-for-osds-up: true diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/6-final-workload/+ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/6-final-workload/+ new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/6-final-workload/+ diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/6-final-workload/rbd-python.yaml b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/6-final-workload/rbd-python.yaml new file mode 100644 index 000000000..78e68dbdb --- /dev/null +++ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/6-final-workload/rbd-python.yaml @@ -0,0 +1,10 @@ +meta: +- desc: | + librbd python api tests +tasks: +- workunit: + branch: reef + clients: + client.0: + - rbd/test_librbd_python.sh +- print: "**** done rbd/test_librbd_python.sh 7-workload" diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/6-final-workload/snaps-many-objects.yaml b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/6-final-workload/snaps-many-objects.yaml new file mode 100644 index 000000000..805bf97c3 --- /dev/null +++ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/6-final-workload/snaps-many-objects.yaml @@ -0,0 +1,16 @@ +meta: +- desc: | + randomized correctness test for rados operations on a replicated pool with snapshot operations +tasks: +- rados: + clients: [client.0] + ops: 4000 + objects: 500 + write_append_excl: false + op_weights: + read: 100 + write: 100 + delete: 50 + snap_create: 50 + snap_remove: 50 + rollback: 50 diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/objectstore/bluestore-bitmap.yaml b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/objectstore/bluestore-bitmap.yaml new file mode 100644 index 000000000..b18e04bee --- /dev/null +++ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/objectstore/bluestore-bitmap.yaml @@ -0,0 +1,43 @@ +overrides: + thrashosds: + bdev_inject_crash: 2 + bdev_inject_crash_probability: .5 + ceph: + fs: xfs + conf: + osd: + osd objectstore: bluestore + bluestore block size: 96636764160 + debug bluestore: 20 + debug bluefs: 20 + debug rocksdb: 10 + bluestore fsck on mount: true + bluestore allocator: bitmap + # lower the full ratios since we can fill up a 100gb osd so quickly + mon osd full ratio: .9 + mon osd backfillfull_ratio: .85 + mon osd nearfull ratio: .8 + osd failsafe full ratio: .95 +# this doesn't work with failures bc the log writes are not atomic across the two backends +# bluestore bluefs env mirror: true + bdev enable discard: true + bdev async discard: true + ceph-deploy: + fs: xfs + bluestore: yes + conf: + osd: + osd objectstore: bluestore + bluestore block size: 96636764160 + debug bluestore: 20 + debug bluefs: 20 + debug rocksdb: 10 + bluestore fsck on mount: true + # lower the full ratios since we can fill up a 100gb osd so quickly + mon osd full ratio: .9 + mon osd backfillfull_ratio: .85 + mon osd nearfull ratio: .8 + osd failsafe full ratio: .95 + bdev enable discard: true + bdev async discard: true + diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/objectstore/bluestore-comp.yaml b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/objectstore/bluestore-comp.yaml new file mode 100644 index 000000000..b408032fd --- /dev/null +++ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/objectstore/bluestore-comp.yaml @@ -0,0 +1,23 @@ +overrides: + thrashosds: + bdev_inject_crash: 2 + bdev_inject_crash_probability: .5 + ceph: + fs: xfs + conf: + osd: + osd objectstore: bluestore + bluestore block size: 96636764160 + debug bluestore: 20 + debug bluefs: 20 + debug rocksdb: 10 + bluestore compression mode: aggressive + bluestore fsck on mount: true + # lower the full ratios since we can fill up a 100gb osd so quickly + mon osd full ratio: .9 + mon osd backfillfull_ratio: .85 + mon osd nearfull ratio: .8 + osd failsafe full ratio: .95 + +# this doesn't work with failures bc the log writes are not atomic across the two backends +# bluestore bluefs env mirror: true diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/objectstore/bluestore-stupid.yaml b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/objectstore/bluestore-stupid.yaml new file mode 100644 index 000000000..ca811f131 --- /dev/null +++ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/objectstore/bluestore-stupid.yaml @@ -0,0 +1,43 @@ +overrides: + thrashosds: + bdev_inject_crash: 2 + bdev_inject_crash_probability: .5 + ceph: + fs: xfs + conf: + osd: + osd objectstore: bluestore + bluestore block size: 96636764160 + debug bluestore: 20 + debug bluefs: 20 + debug rocksdb: 10 + bluestore fsck on mount: true + bluestore allocator: stupid + # lower the full ratios since we can fill up a 100gb osd so quickly + mon osd full ratio: .9 + mon osd backfillfull_ratio: .85 + mon osd nearfull ratio: .8 + osd failsafe full ratio: .95 +# this doesn't work with failures bc the log writes are not atomic across the two backends +# bluestore bluefs env mirror: true + bdev enable discard: true + bdev async discard: true + ceph-deploy: + fs: xfs + bluestore: yes + conf: + osd: + osd objectstore: bluestore + bluestore block size: 96636764160 + debug bluestore: 20 + debug bluefs: 20 + debug rocksdb: 10 + bluestore fsck on mount: true + # lower the full ratios since we can fill up a 100gb osd so quickly + mon osd full ratio: .9 + mon osd backfillfull_ratio: .85 + mon osd nearfull ratio: .8 + osd failsafe full ratio: .95 + bdev enable discard: true + bdev async discard: true + diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/supported-all-distro/ubuntu_latest.yaml b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/supported-all-distro/ubuntu_latest.yaml new file mode 100644 index 000000000..f20398230 --- /dev/null +++ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/supported-all-distro/ubuntu_latest.yaml @@ -0,0 +1,2 @@ +os_type: ubuntu +os_version: "20.04" diff --git a/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/thrashosds-health.yaml b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/thrashosds-health.yaml new file mode 100644 index 000000000..9903fa578 --- /dev/null +++ b/qa/suites/upgrade/reef-p2p/reef-p2p-stress-split/thrashosds-health.yaml @@ -0,0 +1,15 @@ +overrides: + ceph: + log-ignorelist: + - overall HEALTH_ + - \(OSDMAP_FLAGS\) + - \(OSD_ + - \(PG_ + - \(POOL_ + - \(CACHE_POOL_ + - \(SMALLER_PGP_NUM\) + - \(OBJECT_ + - \(SLOW_OPS\) + - \(REQUEST_SLOW\) + - \(TOO_FEW_PGS\) + - slow request |