summaryrefslogtreecommitdiffstats
path: root/qa/suites/fs
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-23 16:45:17 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-23 16:45:17 +0000
commitb98f2fb9922af9b7a8ec418716d79ee2a4af5b77 (patch)
treea0f4f617c881a28eb0d52754b15b0a082bb545e1 /qa/suites/fs
parentAdding debian version 18.2.2-1. (diff)
downloadceph-b98f2fb9922af9b7a8ec418716d79ee2a4af5b77.tar.xz
ceph-b98f2fb9922af9b7a8ec418716d79ee2a4af5b77.zip
Merging upstream version 18.2.3.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'qa/suites/fs')
-rw-r--r--qa/suites/fs/cephadm/renamevolume/1-rename.yaml4
-rw-r--r--qa/suites/fs/full/tasks/mgr-osd-full.yaml2
l---------qa/suites/fs/functional/subvol_versions/.qa1
l---------qa/suites/fs/functional/subvol_versions/create_subvol_version_v1.yaml1
l---------qa/suites/fs/functional/subvol_versions/create_subvol_version_v2.yaml1
-rw-r--r--qa/suites/fs/functional/tasks/client-recovery.yaml3
-rw-r--r--qa/suites/fs/functional/tasks/snap-schedule.yaml2
-rw-r--r--qa/suites/fs/functional/tasks/snap_schedule_snapdir.yaml2
l---------qa/suites/fs/mirror-ha/overrides/ignorelist_health.yaml1
-rw-r--r--qa/suites/fs/mirror-ha/overrides/whitelist_health.yaml14
l---------qa/suites/fs/mirror/overrides/ignorelist_health.yaml1
-rw-r--r--qa/suites/fs/mirror/overrides/whitelist_health.yaml14
l---------[-rw-r--r--]qa/suites/fs/nfs/overrides/ignorelist_health.yaml14
-rw-r--r--qa/suites/fs/upgrade/mds_upgrade_sequence/overrides/ignorelist_upgrade.yaml4
-rw-r--r--qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/0-from/quincy.yaml32
-rw-r--r--qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/0-from/reef/$ (renamed from qa/suites/fs/workload/tasks/0-subvolume/no-subvolume.yaml)0
-rw-r--r--qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/0-from/reef/reef.yaml31
-rw-r--r--qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/0-from/reef/v18.2.0.yaml31
-rw-r--r--qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/0-from/reef/v18.2.1.yaml31
-rw-r--r--qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/2-client/fuse.yaml3
-rw-r--r--qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/2-client/kclient.yaml (renamed from qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/2-client.yaml)0
l---------qa/suites/fs/valgrind/mirror/overrides/ignorelist_health.yaml1
-rw-r--r--qa/suites/fs/valgrind/mirror/overrides/whitelist_health.yaml14
l---------qa/suites/fs/workload/begin/3-modules.yaml1
-rw-r--r--qa/suites/fs/workload/ranks/1.yaml4
-rw-r--r--qa/suites/fs/workload/ranks/multi/balancer/automatic.yaml4
-rw-r--r--qa/suites/fs/workload/ranks/multi/balancer/distributed.yaml.disabled6
-rw-r--r--qa/suites/fs/workload/ranks/multi/balancer/random.yaml10
-rw-r--r--qa/suites/fs/workload/tasks/3-snaps/yes.yaml14
29 files changed, 183 insertions, 63 deletions
diff --git a/qa/suites/fs/cephadm/renamevolume/1-rename.yaml b/qa/suites/fs/cephadm/renamevolume/1-rename.yaml
index 7f9bc8906..e1d5b9b33 100644
--- a/qa/suites/fs/cephadm/renamevolume/1-rename.yaml
+++ b/qa/suites/fs/cephadm/renamevolume/1-rename.yaml
@@ -1,7 +1,11 @@
tasks:
- cephadm.shell:
host.a:
+ - ceph fs fail foo
+ - ceph fs set foo refuse_client_session true
- ceph fs volume rename foo bar --yes-i-really-mean-it
+ - ceph fs set bar joinable true
+ - ceph fs set bar refuse_client_session false
- fs.ready:
timeout: 300
- cephadm.shell:
diff --git a/qa/suites/fs/full/tasks/mgr-osd-full.yaml b/qa/suites/fs/full/tasks/mgr-osd-full.yaml
index b4f673e39..a005f5203 100644
--- a/qa/suites/fs/full/tasks/mgr-osd-full.yaml
+++ b/qa/suites/fs/full/tasks/mgr-osd-full.yaml
@@ -12,7 +12,7 @@ overrides:
debug mds: 20
osd: # force bluestore since it's required for ec overwrites
osd objectstore: bluestore
- bluestore block size: 1073741824
+ bluestore block size: 2147483648
tasks:
- workunit:
cleanup: true
diff --git a/qa/suites/fs/functional/subvol_versions/.qa b/qa/suites/fs/functional/subvol_versions/.qa
new file mode 120000
index 000000000..fea2489fd
--- /dev/null
+++ b/qa/suites/fs/functional/subvol_versions/.qa
@@ -0,0 +1 @@
+../.qa \ No newline at end of file
diff --git a/qa/suites/fs/functional/subvol_versions/create_subvol_version_v1.yaml b/qa/suites/fs/functional/subvol_versions/create_subvol_version_v1.yaml
new file mode 120000
index 000000000..09cfdb59e
--- /dev/null
+++ b/qa/suites/fs/functional/subvol_versions/create_subvol_version_v1.yaml
@@ -0,0 +1 @@
+.qa/cephfs/overrides/subvol_versions/create_subvol_version_v1.yaml \ No newline at end of file
diff --git a/qa/suites/fs/functional/subvol_versions/create_subvol_version_v2.yaml b/qa/suites/fs/functional/subvol_versions/create_subvol_version_v2.yaml
new file mode 120000
index 000000000..5a4de14e7
--- /dev/null
+++ b/qa/suites/fs/functional/subvol_versions/create_subvol_version_v2.yaml
@@ -0,0 +1 @@
+.qa/cephfs/overrides/subvol_versions/create_subvol_version_v2.yaml \ No newline at end of file
diff --git a/qa/suites/fs/functional/tasks/client-recovery.yaml b/qa/suites/fs/functional/tasks/client-recovery.yaml
index e67acc3ab..7ea93a367 100644
--- a/qa/suites/fs/functional/tasks/client-recovery.yaml
+++ b/qa/suites/fs/functional/tasks/client-recovery.yaml
@@ -9,6 +9,9 @@ overrides:
- MDS_CLIENT_LATE_RELEASE
- t responding to mclientcaps
- file system flag refuse_client_session is set
+ - Degraded data redundancy
+ - MDS_CLIENTS_LAGGY
+ - Reduced data availability
tasks:
- cephfs_test_runner:
fail_on_skip: false
diff --git a/qa/suites/fs/functional/tasks/snap-schedule.yaml b/qa/suites/fs/functional/tasks/snap-schedule.yaml
index f2e62b050..26922abed 100644
--- a/qa/suites/fs/functional/tasks/snap-schedule.yaml
+++ b/qa/suites/fs/functional/tasks/snap-schedule.yaml
@@ -6,7 +6,7 @@ overrides:
debug ms: 1
debug finisher: 20
debug client: 20
- log-whitelist:
+ log-ignorelist:
- OSD full dropping all updates
- OSD near full
- pausewr flag
diff --git a/qa/suites/fs/functional/tasks/snap_schedule_snapdir.yaml b/qa/suites/fs/functional/tasks/snap_schedule_snapdir.yaml
index 7bbcf000f..2a175dbf1 100644
--- a/qa/suites/fs/functional/tasks/snap_schedule_snapdir.yaml
+++ b/qa/suites/fs/functional/tasks/snap_schedule_snapdir.yaml
@@ -6,7 +6,7 @@ overrides:
debug ms: 1
debug finisher: 20
debug client: 20
- log-whitelist:
+ log-ignorelist:
- OSD full dropping all updates
- OSD near full
- pausewr flag
diff --git a/qa/suites/fs/mirror-ha/overrides/ignorelist_health.yaml b/qa/suites/fs/mirror-ha/overrides/ignorelist_health.yaml
new file mode 120000
index 000000000..4cb7d981d
--- /dev/null
+++ b/qa/suites/fs/mirror-ha/overrides/ignorelist_health.yaml
@@ -0,0 +1 @@
+./.qa/cephfs/overrides/ignorelist_health.yaml \ No newline at end of file
diff --git a/qa/suites/fs/mirror-ha/overrides/whitelist_health.yaml b/qa/suites/fs/mirror-ha/overrides/whitelist_health.yaml
deleted file mode 100644
index d40fa4cb8..000000000
--- a/qa/suites/fs/mirror-ha/overrides/whitelist_health.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-overrides:
- ceph:
- log-ignorelist:
- - overall HEALTH_
- - \(FS_DEGRADED\)
- - \(MDS_FAILED\)
- - \(MDS_DEGRADED\)
- - \(FS_WITH_FAILED_MDS\)
- - \(MDS_DAMAGE\)
- - \(MDS_ALL_DOWN\)
- - \(MDS_UP_LESS_THAN_MAX\)
- - \(FS_INLINE_DATA_DEPRECATED\)
- - Reduced data availability
- - Degraded data redundancy
diff --git a/qa/suites/fs/mirror/overrides/ignorelist_health.yaml b/qa/suites/fs/mirror/overrides/ignorelist_health.yaml
new file mode 120000
index 000000000..4cb7d981d
--- /dev/null
+++ b/qa/suites/fs/mirror/overrides/ignorelist_health.yaml
@@ -0,0 +1 @@
+./.qa/cephfs/overrides/ignorelist_health.yaml \ No newline at end of file
diff --git a/qa/suites/fs/mirror/overrides/whitelist_health.yaml b/qa/suites/fs/mirror/overrides/whitelist_health.yaml
deleted file mode 100644
index d40fa4cb8..000000000
--- a/qa/suites/fs/mirror/overrides/whitelist_health.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-overrides:
- ceph:
- log-ignorelist:
- - overall HEALTH_
- - \(FS_DEGRADED\)
- - \(MDS_FAILED\)
- - \(MDS_DEGRADED\)
- - \(FS_WITH_FAILED_MDS\)
- - \(MDS_DAMAGE\)
- - \(MDS_ALL_DOWN\)
- - \(MDS_UP_LESS_THAN_MAX\)
- - \(FS_INLINE_DATA_DEPRECATED\)
- - Reduced data availability
- - Degraded data redundancy
diff --git a/qa/suites/fs/nfs/overrides/ignorelist_health.yaml b/qa/suites/fs/nfs/overrides/ignorelist_health.yaml
index 8bfe4dc6f..5cb891a95 100644..120000
--- a/qa/suites/fs/nfs/overrides/ignorelist_health.yaml
+++ b/qa/suites/fs/nfs/overrides/ignorelist_health.yaml
@@ -1,13 +1 @@
-overrides:
- ceph:
- log-ignorelist:
- - overall HEALTH_
- - \(FS_DEGRADED\)
- - \(MDS_FAILED\)
- - \(MDS_DEGRADED\)
- - \(FS_WITH_FAILED_MDS\)
- - \(MDS_DAMAGE\)
- - \(MDS_ALL_DOWN\)
- - \(MDS_UP_LESS_THAN_MAX\)
- - \(FS_INLINE_DATA_DEPRECATED\)
- - \(OSD_DOWN\)
+.qa/cephfs/overrides/ignorelist_health.yaml \ No newline at end of file
diff --git a/qa/suites/fs/upgrade/mds_upgrade_sequence/overrides/ignorelist_upgrade.yaml b/qa/suites/fs/upgrade/mds_upgrade_sequence/overrides/ignorelist_upgrade.yaml
new file mode 100644
index 000000000..713adb962
--- /dev/null
+++ b/qa/suites/fs/upgrade/mds_upgrade_sequence/overrides/ignorelist_upgrade.yaml
@@ -0,0 +1,4 @@
+overrides:
+ ceph:
+ log-ignorelist:
+ - OSD_DOWN
diff --git a/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/0-from/quincy.yaml b/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/0-from/quincy.yaml
new file mode 100644
index 000000000..4a21021c0
--- /dev/null
+++ b/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/0-from/quincy.yaml
@@ -0,0 +1,32 @@
+meta:
+- desc: |
+ setup ceph/quincy
+
+tasks:
+- install:
+ branch: quincy
+ exclude_packages:
+ - ceph-volume
+- print: "**** done install task..."
+- cephadm:
+ image: quay.ceph.io/ceph-ci/ceph:quincy
+ roleless: true
+ cephadm_branch: quincy
+ cephadm_git_url: https://github.com/ceph/ceph
+ conf:
+ osd:
+ #set config option for which cls modules are allowed to be loaded / used
+ osd_class_load_list: "*"
+ osd_class_default_list: "*"
+- print: "**** done end installing quincy cephadm ..."
+- cephadm.shell:
+ host.a:
+ - ceph config set mgr mgr/cephadm/use_repo_digest true --force
+- print: "**** done cephadm.shell ceph config set mgr..."
+- cephadm.shell:
+ host.a:
+ - ceph orch status
+ - ceph orch ps
+ - ceph orch ls
+ - ceph orch host ls
+ - ceph orch device ls
diff --git a/qa/suites/fs/workload/tasks/0-subvolume/no-subvolume.yaml b/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/0-from/reef/$
index e69de29bb..e69de29bb 100644
--- a/qa/suites/fs/workload/tasks/0-subvolume/no-subvolume.yaml
+++ b/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/0-from/reef/$
diff --git a/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/0-from/reef/reef.yaml b/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/0-from/reef/reef.yaml
new file mode 100644
index 000000000..c53e8b55d
--- /dev/null
+++ b/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/0-from/reef/reef.yaml
@@ -0,0 +1,31 @@
+meta:
+- desc: |
+ setup ceph/reef
+
+tasks:
+- install:
+ branch: reef
+ exclude_packages:
+ - ceph-volume
+- print: "**** done install task..."
+- cephadm:
+ image: quay.ceph.io/ceph-ci/ceph:reef
+ roleless: true
+ compiled_cephadm_branch: reef
+ conf:
+ osd:
+ #set config option for which cls modules are allowed to be loaded / used
+ osd_class_load_list: "*"
+ osd_class_default_list: "*"
+- print: "**** done end installing reef cephadm ..."
+- cephadm.shell:
+ host.a:
+ - ceph config set mgr mgr/cephadm/use_repo_digest true --force
+- print: "**** done cephadm.shell ceph config set mgr..."
+- cephadm.shell:
+ host.a:
+ - ceph orch status
+ - ceph orch ps
+ - ceph orch ls
+ - ceph orch host ls
+ - ceph orch device ls
diff --git a/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/0-from/reef/v18.2.0.yaml b/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/0-from/reef/v18.2.0.yaml
new file mode 100644
index 000000000..98bb210d1
--- /dev/null
+++ b/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/0-from/reef/v18.2.0.yaml
@@ -0,0 +1,31 @@
+meta:
+- desc: |
+ setup ceph/v18.2.0
+
+tasks:
+- install:
+ tag: v18.2.0
+ exclude_packages:
+ - ceph-volume
+- print: "**** done install task..."
+- cephadm:
+ image: quay.io/ceph/ceph:v18.2.0
+ roleless: true
+ compiled_cephadm_branch: reef
+ conf:
+ osd:
+ #set config option for which cls modules are allowed to be loaded / used
+ osd_class_load_list: "*"
+ osd_class_default_list: "*"
+- print: "**** done end installing v18.2.0 cephadm ..."
+- cephadm.shell:
+ host.a:
+ - ceph config set mgr mgr/cephadm/use_repo_digest true --force
+- print: "**** done cephadm.shell ceph config set mgr..."
+- cephadm.shell:
+ host.a:
+ - ceph orch status
+ - ceph orch ps
+ - ceph orch ls
+ - ceph orch host ls
+ - ceph orch device ls
diff --git a/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/0-from/reef/v18.2.1.yaml b/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/0-from/reef/v18.2.1.yaml
new file mode 100644
index 000000000..ce45d9ea9
--- /dev/null
+++ b/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/0-from/reef/v18.2.1.yaml
@@ -0,0 +1,31 @@
+meta:
+- desc: |
+ setup ceph/v18.2.1
+
+tasks:
+- install:
+ tag: v18.2.1
+ exclude_packages:
+ - ceph-volume
+- print: "**** done install task..."
+- cephadm:
+ image: quay.io/ceph/ceph:v18.2.1
+ roleless: true
+ compiled_cephadm_branch: reef
+ conf:
+ osd:
+ #set config option for which cls modules are allowed to be loaded / used
+ osd_class_load_list: "*"
+ osd_class_default_list: "*"
+- print: "**** done end installing v18.2.1 cephadm ..."
+- cephadm.shell:
+ host.a:
+ - ceph config set mgr mgr/cephadm/use_repo_digest true --force
+- print: "**** done cephadm.shell ceph config set mgr..."
+- cephadm.shell:
+ host.a:
+ - ceph orch status
+ - ceph orch ps
+ - ceph orch ls
+ - ceph orch host ls
+ - ceph orch device ls
diff --git a/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/2-client/fuse.yaml b/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/2-client/fuse.yaml
new file mode 100644
index 000000000..5318fd1a9
--- /dev/null
+++ b/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/2-client/fuse.yaml
@@ -0,0 +1,3 @@
+tasks:
+- ceph-fuse:
+- print: "**** done client"
diff --git a/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/2-client.yaml b/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/2-client/kclient.yaml
index 92b9dda84..92b9dda84 100644
--- a/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/2-client.yaml
+++ b/qa/suites/fs/upgrade/mds_upgrade_sequence/tasks/2-client/kclient.yaml
diff --git a/qa/suites/fs/valgrind/mirror/overrides/ignorelist_health.yaml b/qa/suites/fs/valgrind/mirror/overrides/ignorelist_health.yaml
new file mode 120000
index 000000000..4cb7d981d
--- /dev/null
+++ b/qa/suites/fs/valgrind/mirror/overrides/ignorelist_health.yaml
@@ -0,0 +1 @@
+./.qa/cephfs/overrides/ignorelist_health.yaml \ No newline at end of file
diff --git a/qa/suites/fs/valgrind/mirror/overrides/whitelist_health.yaml b/qa/suites/fs/valgrind/mirror/overrides/whitelist_health.yaml
deleted file mode 100644
index d40fa4cb8..000000000
--- a/qa/suites/fs/valgrind/mirror/overrides/whitelist_health.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-overrides:
- ceph:
- log-ignorelist:
- - overall HEALTH_
- - \(FS_DEGRADED\)
- - \(MDS_FAILED\)
- - \(MDS_DEGRADED\)
- - \(FS_WITH_FAILED_MDS\)
- - \(MDS_DAMAGE\)
- - \(MDS_ALL_DOWN\)
- - \(MDS_UP_LESS_THAN_MAX\)
- - \(FS_INLINE_DATA_DEPRECATED\)
- - Reduced data availability
- - Degraded data redundancy
diff --git a/qa/suites/fs/workload/begin/3-modules.yaml b/qa/suites/fs/workload/begin/3-modules.yaml
new file mode 120000
index 000000000..1eba706a5
--- /dev/null
+++ b/qa/suites/fs/workload/begin/3-modules.yaml
@@ -0,0 +1 @@
+.qa/cephfs/begin/3-modules.yaml \ No newline at end of file
diff --git a/qa/suites/fs/workload/ranks/1.yaml b/qa/suites/fs/workload/ranks/1.yaml
index e69de29bb..f9e95daa9 100644
--- a/qa/suites/fs/workload/ranks/1.yaml
+++ b/qa/suites/fs/workload/ranks/1.yaml
@@ -0,0 +1,4 @@
+overrides:
+ ceph:
+ cephfs:
+ max_mds: 1
diff --git a/qa/suites/fs/workload/ranks/multi/balancer/automatic.yaml b/qa/suites/fs/workload/ranks/multi/balancer/automatic.yaml
new file mode 100644
index 000000000..020eaa4bf
--- /dev/null
+++ b/qa/suites/fs/workload/ranks/multi/balancer/automatic.yaml
@@ -0,0 +1,4 @@
+tasks:
+- exec:
+ mon.a:
+ - ceph fs set cephfs balance_automate true
diff --git a/qa/suites/fs/workload/ranks/multi/balancer/distributed.yaml.disabled b/qa/suites/fs/workload/ranks/multi/balancer/distributed.yaml.disabled
new file mode 100644
index 000000000..be06d5186
--- /dev/null
+++ b/qa/suites/fs/workload/ranks/multi/balancer/distributed.yaml.disabled
@@ -0,0 +1,6 @@
+# distributed pins would be interesting if we had workloads on multiple clients. We do not yet. So it's disabled.
+tasks:
+- exec:
+ mon.a:
+ - ceph fs set cephfs balance_automate false
+ - ceph fs subvolumegroup pin cephfs qa distributed 1
diff --git a/qa/suites/fs/workload/ranks/multi/balancer/random.yaml b/qa/suites/fs/workload/ranks/multi/balancer/random.yaml
new file mode 100644
index 000000000..977e83fc2
--- /dev/null
+++ b/qa/suites/fs/workload/ranks/multi/balancer/random.yaml
@@ -0,0 +1,10 @@
+overrides:
+ ceph:
+ conf:
+ mds:
+ mds_export_ephemeral_random_max: 0.10
+tasks:
+- exec:
+ mon.a:
+ - ceph fs set cephfs balance_automate false
+ - ceph fs subvolumegroup pin cephfs qa random 0.10
diff --git a/qa/suites/fs/workload/tasks/3-snaps/yes.yaml b/qa/suites/fs/workload/tasks/3-snaps/yes.yaml
index 598f7e215..69f53768d 100644
--- a/qa/suites/fs/workload/tasks/3-snaps/yes.yaml
+++ b/qa/suites/fs/workload/tasks/3-snaps/yes.yaml
@@ -1,3 +1,10 @@
+mgrmodules:
+ sequential:
+ - exec:
+ mon.a:
+ - ceph mgr module enable snap_schedule
+ - ceph config set mgr mgr/snap_schedule/allow_m_granularity true
+ - ceph config set mgr mgr/snap_schedule/dump_on_update true
overrides:
ceph:
conf:
@@ -12,11 +19,8 @@ overrides:
tasks:
- exec:
mon.a:
- - ceph mgr module enable snap_schedule
- - ceph config set mgr mgr/snap_schedule/allow_m_granularity true
- - ceph config set mgr mgr/snap_schedule/dump_on_update true
- - ceph fs snap-schedule add --fs=cephfs --path=/ --snap_schedule=1M
- - ceph fs snap-schedule retention add --fs=cephfs --path=/ --retention-spec-or-period=6M3h
+ - ceph fs snap-schedule add --fs=cephfs --path=/ --snap_schedule=1m
+ - ceph fs snap-schedule retention add --fs=cephfs --path=/ --retention-spec-or-period=6m3h
- ceph fs snap-schedule status --fs=cephfs --path=/
- ceph fs snap-schedule list --fs=cephfs --path=/ --recursive=true
- date +%s > START_TIME