diff options
Diffstat (limited to 'qa/suites/kcephfs/recovery/tasks')
l--------- | qa/suites/kcephfs/recovery/tasks/.qa | 1 | ||||
-rw-r--r-- | qa/suites/kcephfs/recovery/tasks/auto-repair.yaml | 13 | ||||
-rw-r--r-- | qa/suites/kcephfs/recovery/tasks/backtrace.yaml | 5 | ||||
-rw-r--r-- | qa/suites/kcephfs/recovery/tasks/client-limits.yaml | 20 | ||||
-rw-r--r-- | qa/suites/kcephfs/recovery/tasks/client-recovery.yaml | 15 | ||||
-rw-r--r-- | qa/suites/kcephfs/recovery/tasks/damage.yaml | 27 | ||||
-rw-r--r-- | qa/suites/kcephfs/recovery/tasks/data-scan.yaml | 19 | ||||
-rw-r--r-- | qa/suites/kcephfs/recovery/tasks/failover.yaml | 12 | ||||
-rw-r--r-- | qa/suites/kcephfs/recovery/tasks/forward-scrub.yaml | 14 | ||||
-rw-r--r-- | qa/suites/kcephfs/recovery/tasks/journal-repair.yaml | 14 | ||||
-rw-r--r-- | qa/suites/kcephfs/recovery/tasks/mds-flush.yaml | 5 | ||||
-rw-r--r-- | qa/suites/kcephfs/recovery/tasks/mds-full.yaml | 29 | ||||
-rw-r--r-- | qa/suites/kcephfs/recovery/tasks/pool-perm.yaml | 5 | ||||
-rw-r--r-- | qa/suites/kcephfs/recovery/tasks/sessionmap.yaml | 10 | ||||
-rw-r--r-- | qa/suites/kcephfs/recovery/tasks/strays.yaml | 5 | ||||
-rw-r--r-- | qa/suites/kcephfs/recovery/tasks/volume-client.yaml | 9 |
16 files changed, 203 insertions, 0 deletions
diff --git a/qa/suites/kcephfs/recovery/tasks/.qa b/qa/suites/kcephfs/recovery/tasks/.qa new file mode 120000 index 00000000..a602a035 --- /dev/null +++ b/qa/suites/kcephfs/recovery/tasks/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/kcephfs/recovery/tasks/auto-repair.yaml b/qa/suites/kcephfs/recovery/tasks/auto-repair.yaml new file mode 100644 index 00000000..90d0e7bc --- /dev/null +++ b/qa/suites/kcephfs/recovery/tasks/auto-repair.yaml @@ -0,0 +1,13 @@ +overrides: + ceph: + log-whitelist: + - force file system read-only + - bad backtrace + - MDS in read-only mode + - \(MDS_READ_ONLY\) + + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_auto_repair diff --git a/qa/suites/kcephfs/recovery/tasks/backtrace.yaml b/qa/suites/kcephfs/recovery/tasks/backtrace.yaml new file mode 100644 index 00000000..d740a5f6 --- /dev/null +++ b/qa/suites/kcephfs/recovery/tasks/backtrace.yaml @@ -0,0 +1,5 @@ + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_backtrace diff --git a/qa/suites/kcephfs/recovery/tasks/client-limits.yaml b/qa/suites/kcephfs/recovery/tasks/client-limits.yaml new file mode 100644 index 00000000..f816cee9 --- /dev/null +++ b/qa/suites/kcephfs/recovery/tasks/client-limits.yaml @@ -0,0 +1,20 @@ + +overrides: + ceph: + log-whitelist: + - responding to mclientcaps\(revoke\) + - not advance its oldest_client_tid + - failing to advance its oldest client/flush tid + - Too many inodes in cache + - failing to respond to cache pressure + - slow requests are blocked + - failing to respond to capability release + - MDS cache is too large + - \(MDS_CLIENT_OLDEST_TID\) + - \(MDS_CACHE_OVERSIZED\) + +tasks: + - cephfs_test_runner: + fail_on_skip: false + modules: + - tasks.cephfs.test_client_limits diff --git a/qa/suites/kcephfs/recovery/tasks/client-recovery.yaml b/qa/suites/kcephfs/recovery/tasks/client-recovery.yaml new file mode 100644 index 00000000..725a259d --- /dev/null +++ b/qa/suites/kcephfs/recovery/tasks/client-recovery.yaml @@ -0,0 +1,15 @@ + +# The task interferes with the network, so we need +# to permit OSDs to complain about that. +overrides: + ceph: + log-whitelist: + - but it is still running + - slow request + - evicting unresponsive client + +tasks: + - cephfs_test_runner: + fail_on_skip: false + modules: + - tasks.cephfs.test_client_recovery diff --git a/qa/suites/kcephfs/recovery/tasks/damage.yaml b/qa/suites/kcephfs/recovery/tasks/damage.yaml new file mode 100644 index 00000000..9ae738f0 --- /dev/null +++ b/qa/suites/kcephfs/recovery/tasks/damage.yaml @@ -0,0 +1,27 @@ + +overrides: + ceph: + log-whitelist: + - bad backtrace + - object missing on disk + - error reading table object + - error reading sessionmap + - Error loading MDS rank + - missing journal object + - Error recovering journal + - error decoding table object + - failed to read JournalPointer + - Corrupt directory entry + - Corrupt fnode header + - corrupt sessionmap header + - Corrupt dentry + - Scrub error on inode + - Metadata damage detected + - MDS_READ_ONLY + - force file system read-only + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_damage + diff --git a/qa/suites/kcephfs/recovery/tasks/data-scan.yaml b/qa/suites/kcephfs/recovery/tasks/data-scan.yaml new file mode 100644 index 00000000..8a05e22a --- /dev/null +++ b/qa/suites/kcephfs/recovery/tasks/data-scan.yaml @@ -0,0 +1,19 @@ + +overrides: + ceph: + log-whitelist: + - bad backtrace + - object missing on disk + - error reading table object + - error reading sessionmap + - unmatched fragstat + - was unreadable, recreating it now + - Scrub error on inode + - Metadata damage detected + - inconsistent rstat on inode + - Error recovering journal + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_data_scan diff --git a/qa/suites/kcephfs/recovery/tasks/failover.yaml b/qa/suites/kcephfs/recovery/tasks/failover.yaml new file mode 100644 index 00000000..ab7b4d37 --- /dev/null +++ b/qa/suites/kcephfs/recovery/tasks/failover.yaml @@ -0,0 +1,12 @@ +overrides: + ceph: + log-whitelist: + - not responding, replacing + - \(MDS_INSUFFICIENT_STANDBY\) + - \(MDS_ALL_DOWN\) + - \(MDS_UP_LESS_THAN_MAX\) +tasks: + - cephfs_test_runner: + fail_on_skip: false + modules: + - tasks.cephfs.test_failover diff --git a/qa/suites/kcephfs/recovery/tasks/forward-scrub.yaml b/qa/suites/kcephfs/recovery/tasks/forward-scrub.yaml new file mode 100644 index 00000000..b92cf105 --- /dev/null +++ b/qa/suites/kcephfs/recovery/tasks/forward-scrub.yaml @@ -0,0 +1,14 @@ + +overrides: + ceph: + log-whitelist: + - inode wrongly marked free + - bad backtrace on inode + - inode table repaired for inode + - Scrub error on inode + - Metadata damage detected + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_forward_scrub diff --git a/qa/suites/kcephfs/recovery/tasks/journal-repair.yaml b/qa/suites/kcephfs/recovery/tasks/journal-repair.yaml new file mode 100644 index 00000000..66f819d0 --- /dev/null +++ b/qa/suites/kcephfs/recovery/tasks/journal-repair.yaml @@ -0,0 +1,14 @@ + +overrides: + ceph: + log-whitelist: + - bad backtrace on directory inode + - error reading table object + - Metadata damage detected + - slow requests are blocked + - Behind on trimming + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_journal_repair diff --git a/qa/suites/kcephfs/recovery/tasks/mds-flush.yaml b/qa/suites/kcephfs/recovery/tasks/mds-flush.yaml new file mode 100644 index 00000000..d59a8ad5 --- /dev/null +++ b/qa/suites/kcephfs/recovery/tasks/mds-flush.yaml @@ -0,0 +1,5 @@ + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_flush diff --git a/qa/suites/kcephfs/recovery/tasks/mds-full.yaml b/qa/suites/kcephfs/recovery/tasks/mds-full.yaml new file mode 100644 index 00000000..e9744e71 --- /dev/null +++ b/qa/suites/kcephfs/recovery/tasks/mds-full.yaml @@ -0,0 +1,29 @@ + +overrides: + ceph: + cephfs_ec_profile: + - disabled + log-whitelist: + - OSD full dropping all updates + - OSD near full + - pausewr flag + - failsafe engaged, dropping updates + - failsafe disengaged, no longer dropping + - is full \(reached quota + - POOL_FULL + - POOL_BACKFILLFULL + conf: + mon: + mon osd nearfull ratio: 0.6 + mon osd backfillfull ratio: 0.6 + mon osd full ratio: 0.7 + osd: + osd mon report interval: 5 + osd objectstore: memstore + osd failsafe full ratio: 1.0 + memstore device bytes: 200000000 + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_full diff --git a/qa/suites/kcephfs/recovery/tasks/pool-perm.yaml b/qa/suites/kcephfs/recovery/tasks/pool-perm.yaml new file mode 100644 index 00000000..f220626d --- /dev/null +++ b/qa/suites/kcephfs/recovery/tasks/pool-perm.yaml @@ -0,0 +1,5 @@ + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_pool_perm diff --git a/qa/suites/kcephfs/recovery/tasks/sessionmap.yaml b/qa/suites/kcephfs/recovery/tasks/sessionmap.yaml new file mode 100644 index 00000000..88ae6019 --- /dev/null +++ b/qa/suites/kcephfs/recovery/tasks/sessionmap.yaml @@ -0,0 +1,10 @@ +overrides: + ceph: + log-whitelist: + - client session with non-allowable root + +tasks: + - cephfs_test_runner: + fail_on_skip: false + modules: + - tasks.cephfs.test_sessionmap diff --git a/qa/suites/kcephfs/recovery/tasks/strays.yaml b/qa/suites/kcephfs/recovery/tasks/strays.yaml new file mode 100644 index 00000000..2809fc14 --- /dev/null +++ b/qa/suites/kcephfs/recovery/tasks/strays.yaml @@ -0,0 +1,5 @@ + +tasks: + - cephfs_test_runner: + modules: + - tasks.cephfs.test_strays diff --git a/qa/suites/kcephfs/recovery/tasks/volume-client.yaml b/qa/suites/kcephfs/recovery/tasks/volume-client.yaml new file mode 100644 index 00000000..9ecaaf4f --- /dev/null +++ b/qa/suites/kcephfs/recovery/tasks/volume-client.yaml @@ -0,0 +1,9 @@ +overrides: + ceph: + log-whitelist: + - MON_DOWN +tasks: + - cephfs_test_runner: + fail_on_skip: false + modules: + - tasks.cephfs.test_volume_client |