diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-21 11:54:28 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-21 11:54:28 +0000 |
commit | e6918187568dbd01842d8d1d2c808ce16a894239 (patch) | |
tree | 64f88b554b444a49f656b6c656111a145cbbaa28 /qa/suites/rados/perf | |
parent | Initial commit. (diff) | |
download | ceph-e6918187568dbd01842d8d1d2c808ce16a894239.tar.xz ceph-e6918187568dbd01842d8d1d2c808ce16a894239.zip |
Adding upstream version 18.2.2.upstream/18.2.2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'qa/suites/rados/perf')
32 files changed, 571 insertions, 0 deletions
diff --git a/qa/suites/rados/perf/% b/qa/suites/rados/perf/% new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/qa/suites/rados/perf/% diff --git a/qa/suites/rados/perf/.qa b/qa/suites/rados/perf/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/rados/perf/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/rados/perf/ceph.yaml b/qa/suites/rados/perf/ceph.yaml new file mode 100644 index 000000000..ca229dd46 --- /dev/null +++ b/qa/suites/rados/perf/ceph.yaml @@ -0,0 +1,19 @@ +overrides: + ceph: + conf: + global: + osd client message cap: 5000 +roles: +- [mon.a, mgr.x, osd.0, osd.1, osd.2, client.0] +tasks: +- install: +- ceph: + fs: xfs + wait-for-scrub: false + log-ignorelist: + - \(PG_ + - \(OSD_ + - \(OBJECT_ + - overall HEALTH + - \(POOL_APP_NOT_ENABLED\) +- ssh_keys: diff --git a/qa/suites/rados/perf/mon_election b/qa/suites/rados/perf/mon_election new file mode 120000 index 000000000..3f331e621 --- /dev/null +++ b/qa/suites/rados/perf/mon_election @@ -0,0 +1 @@ +.qa/mon_election
\ No newline at end of file diff --git a/qa/suites/rados/perf/objectstore/.qa b/qa/suites/rados/perf/objectstore/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/rados/perf/objectstore/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/rados/perf/objectstore/bluestore-basic-min-osd-mem-target.yaml b/qa/suites/rados/perf/objectstore/bluestore-basic-min-osd-mem-target.yaml new file mode 100644 index 000000000..32f596da1 --- /dev/null +++ b/qa/suites/rados/perf/objectstore/bluestore-basic-min-osd-mem-target.yaml @@ -0,0 +1,25 @@ +overrides: + thrashosds: + bdev_inject_crash: 2 + bdev_inject_crash_probability: .5 + ceph: + fs: xfs + conf: + osd: + osd objectstore: bluestore + osd memory target: 2147483648 # min recommended is 2_G + bluestore block size: 96636764160 + debug bluestore: 20 + debug bluefs: 20 + debug rocksdb: 10 + bluestore fsck on mount: true + # lower the full ratios since we can fill up a 100gb osd so quickly + mon osd full ratio: .9 + mon osd backfillfull_ratio: .85 + mon osd nearfull ratio: .8 + osd failsafe full ratio: .95 +# this doesn't work with failures bc the log writes are not atomic across the two backends +# bluestore bluefs env mirror: true + bdev enable discard: true + bdev async discard: true + diff --git a/qa/suites/rados/perf/objectstore/bluestore-bitmap.yaml b/qa/suites/rados/perf/objectstore/bluestore-bitmap.yaml new file mode 100644 index 000000000..b18e04bee --- /dev/null +++ b/qa/suites/rados/perf/objectstore/bluestore-bitmap.yaml @@ -0,0 +1,43 @@ +overrides: + thrashosds: + bdev_inject_crash: 2 + bdev_inject_crash_probability: .5 + ceph: + fs: xfs + conf: + osd: + osd objectstore: bluestore + bluestore block size: 96636764160 + debug bluestore: 20 + debug bluefs: 20 + debug rocksdb: 10 + bluestore fsck on mount: true + bluestore allocator: bitmap + # lower the full ratios since we can fill up a 100gb osd so quickly + mon osd full ratio: .9 + mon osd backfillfull_ratio: .85 + mon osd nearfull ratio: .8 + osd failsafe full ratio: .95 +# this doesn't work with failures bc the log writes are not atomic across the two backends +# bluestore bluefs env mirror: true + bdev enable discard: true + bdev async discard: true + ceph-deploy: + fs: xfs + bluestore: yes + conf: + osd: + osd objectstore: bluestore + bluestore block size: 96636764160 + debug bluestore: 20 + debug bluefs: 20 + debug rocksdb: 10 + bluestore fsck on mount: true + # lower the full ratios since we can fill up a 100gb osd so quickly + mon osd full ratio: .9 + mon osd backfillfull_ratio: .85 + mon osd nearfull ratio: .8 + osd failsafe full ratio: .95 + bdev enable discard: true + bdev async discard: true + diff --git a/qa/suites/rados/perf/objectstore/bluestore-comp.yaml b/qa/suites/rados/perf/objectstore/bluestore-comp.yaml new file mode 100644 index 000000000..b408032fd --- /dev/null +++ b/qa/suites/rados/perf/objectstore/bluestore-comp.yaml @@ -0,0 +1,23 @@ +overrides: + thrashosds: + bdev_inject_crash: 2 + bdev_inject_crash_probability: .5 + ceph: + fs: xfs + conf: + osd: + osd objectstore: bluestore + bluestore block size: 96636764160 + debug bluestore: 20 + debug bluefs: 20 + debug rocksdb: 10 + bluestore compression mode: aggressive + bluestore fsck on mount: true + # lower the full ratios since we can fill up a 100gb osd so quickly + mon osd full ratio: .9 + mon osd backfillfull_ratio: .85 + mon osd nearfull ratio: .8 + osd failsafe full ratio: .95 + +# this doesn't work with failures bc the log writes are not atomic across the two backends +# bluestore bluefs env mirror: true diff --git a/qa/suites/rados/perf/objectstore/bluestore-low-osd-mem-target.yaml b/qa/suites/rados/perf/objectstore/bluestore-low-osd-mem-target.yaml new file mode 100644 index 000000000..b2a49790b --- /dev/null +++ b/qa/suites/rados/perf/objectstore/bluestore-low-osd-mem-target.yaml @@ -0,0 +1,25 @@ +overrides: + thrashosds: + bdev_inject_crash: 2 + bdev_inject_crash_probability: .5 + ceph: + fs: xfs + conf: + osd: + osd objectstore: bluestore + osd memory target: 1610612736 # reduced to 1.5_G + bluestore block size: 96636764160 + debug bluestore: 20 + debug bluefs: 20 + debug rocksdb: 10 + bluestore fsck on mount: true + # lower the full ratios since we can fill up a 100gb osd so quickly + mon osd full ratio: .9 + mon osd backfillfull_ratio: .85 + mon osd nearfull ratio: .8 + osd failsafe full ratio: .95 +# this doesn't work with failures bc the log writes are not atomic across the two backends +# bluestore bluefs env mirror: true + bdev enable discard: true + bdev async discard: true + diff --git a/qa/suites/rados/perf/objectstore/bluestore-stupid.yaml b/qa/suites/rados/perf/objectstore/bluestore-stupid.yaml new file mode 100644 index 000000000..ca811f131 --- /dev/null +++ b/qa/suites/rados/perf/objectstore/bluestore-stupid.yaml @@ -0,0 +1,43 @@ +overrides: + thrashosds: + bdev_inject_crash: 2 + bdev_inject_crash_probability: .5 + ceph: + fs: xfs + conf: + osd: + osd objectstore: bluestore + bluestore block size: 96636764160 + debug bluestore: 20 + debug bluefs: 20 + debug rocksdb: 10 + bluestore fsck on mount: true + bluestore allocator: stupid + # lower the full ratios since we can fill up a 100gb osd so quickly + mon osd full ratio: .9 + mon osd backfillfull_ratio: .85 + mon osd nearfull ratio: .8 + osd failsafe full ratio: .95 +# this doesn't work with failures bc the log writes are not atomic across the two backends +# bluestore bluefs env mirror: true + bdev enable discard: true + bdev async discard: true + ceph-deploy: + fs: xfs + bluestore: yes + conf: + osd: + osd objectstore: bluestore + bluestore block size: 96636764160 + debug bluestore: 20 + debug bluefs: 20 + debug rocksdb: 10 + bluestore fsck on mount: true + # lower the full ratios since we can fill up a 100gb osd so quickly + mon osd full ratio: .9 + mon osd backfillfull_ratio: .85 + mon osd nearfull ratio: .8 + osd failsafe full ratio: .95 + bdev enable discard: true + bdev async discard: true + diff --git a/qa/suites/rados/perf/openstack.yaml b/qa/suites/rados/perf/openstack.yaml new file mode 100644 index 000000000..f4d1349b4 --- /dev/null +++ b/qa/suites/rados/perf/openstack.yaml @@ -0,0 +1,4 @@ +openstack: + - volumes: # attached to each instance + count: 3 + size: 30 # GB diff --git a/qa/suites/rados/perf/scheduler/.qa b/qa/suites/rados/perf/scheduler/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/rados/perf/scheduler/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/rados/perf/scheduler/dmclock_1Shard_16Threads.yaml b/qa/suites/rados/perf/scheduler/dmclock_1Shard_16Threads.yaml new file mode 100644 index 000000000..10388ad72 --- /dev/null +++ b/qa/suites/rados/perf/scheduler/dmclock_1Shard_16Threads.yaml @@ -0,0 +1,7 @@ +overrides: + ceph: + conf: + osd: + osd op num shards: 1 + osd op num threads per shard: 16 + osd op queue: mclock_scheduler diff --git a/qa/suites/rados/perf/scheduler/dmclock_default_shards.yaml b/qa/suites/rados/perf/scheduler/dmclock_default_shards.yaml new file mode 100644 index 000000000..57a0ed912 --- /dev/null +++ b/qa/suites/rados/perf/scheduler/dmclock_default_shards.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + osd: + osd op queue: mclock_scheduler diff --git a/qa/suites/rados/perf/scheduler/wpq_default_shards.yaml b/qa/suites/rados/perf/scheduler/wpq_default_shards.yaml new file mode 100644 index 000000000..25d358f27 --- /dev/null +++ b/qa/suites/rados/perf/scheduler/wpq_default_shards.yaml @@ -0,0 +1,5 @@ +overrides: + ceph: + conf: + osd: + osd op queue: wpq diff --git a/qa/suites/rados/perf/settings/.qa b/qa/suites/rados/perf/settings/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/rados/perf/settings/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/rados/perf/settings/optimized.yaml b/qa/suites/rados/perf/settings/optimized.yaml new file mode 100644 index 000000000..dc4dcbb96 --- /dev/null +++ b/qa/suites/rados/perf/settings/optimized.yaml @@ -0,0 +1,74 @@ +overrides: + ceph: + conf: + mon: + debug mon: "0/0" + debug ms: "0/0" + debug paxos: "0/0" + osd: + debug filestore: "0/0" + debug journal: "0/0" + debug ms: "0/0" + debug osd: "0/0" + global: + auth client required: none + auth cluster required: none + auth service required: none + auth supported: none + + debug lockdep: "0/0" + debug context: "0/0" + debug crush: "0/0" + debug mds: "0/0" + debug mds balancer: "0/0" + debug mds locker: "0/0" + debug mds log: "0/0" + debug mds log expire: "0/0" + debug mds migrator: "0/0" + debug buffer: "0/0" + debug timer: "0/0" + debug filer: "0/0" + debug striper: "0/0" + debug objecter: "0/0" + debug rados: "0/0" + debug rbd: "0/0" + debug rbd mirror: "0/0" + debug rbd replay: "0/0" + debug journaler: "0/0" + debug objectcacher: "0/0" + debug client: "0/0" + debug osd: "0/0" + debug optracker: "0/0" + debug objclass: "0/0" + debug filestore: "0/0" + debug journal: "0/0" + debug ms: "0/0" + debug mon: "0/0" + debug monc: "0/0" + debug paxos: "0/0" + debug tp: "0/0" + debug auth: "0/0" + debug crypto: "0/0" + debug finisher: "0/0" + debug heartbeatmap: "0/0" + debug perfcounter: "0/0" + debug rgw: "0/0" + debug rgw sync: "0/0" + debug civetweb: "0/0" + debug javaclient: "0/0" + debug asok: "0/0" + debug throttle: "0/0" + debug refs: "0/0" + debug compressor: "0/0" + debug bluestore: "0/0" + debug bluefs: "0/0" + debug bdev: "0/0" + debug kstore: "0/0" + debug rocksdb: "0/0" + debug leveldb: "0/0" + debug memdb: "0/0" + debug fuse: "0/0" + debug mgr: "0/0" + debug mgrc: "0/0" + debug dpdk: "0/0" + debug eventtrace: "0/0" diff --git a/qa/suites/rados/perf/ubuntu_latest.yaml b/qa/suites/rados/perf/ubuntu_latest.yaml new file mode 120000 index 000000000..3a09f9abb --- /dev/null +++ b/qa/suites/rados/perf/ubuntu_latest.yaml @@ -0,0 +1 @@ +.qa/distros/supported/ubuntu_latest.yaml
\ No newline at end of file diff --git a/qa/suites/rados/perf/workloads/.qa b/qa/suites/rados/perf/workloads/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/rados/perf/workloads/.qa @@ -0,0 +1 @@ +../.qa/
\ No newline at end of file diff --git a/qa/suites/rados/perf/workloads/fio_4K_rand_read.yaml b/qa/suites/rados/perf/workloads/fio_4K_rand_read.yaml new file mode 100644 index 000000000..d5ef33ab3 --- /dev/null +++ b/qa/suites/rados/perf/workloads/fio_4K_rand_read.yaml @@ -0,0 +1,24 @@ +tasks: +- cbt: + benchmarks: + librbdfio: + op_size: [4096] + time: 60 + mode: ['randread'] + norandommap: True + vol_size: 4096 + procs_per_volume: [1] + volumes_per_client: [2] + iodepth: [32] + osd_ra: [4096] + pool_profile: 'rbd' + log_avg_msec: 100 + cluster: + user: 'ubuntu' + osds_per_node: 3 + iterations: 1 + pool_profiles: + rbd: + pg_size: 128 + pgp_size: 128 + replication: 3 diff --git a/qa/suites/rados/perf/workloads/fio_4K_rand_rw.yaml b/qa/suites/rados/perf/workloads/fio_4K_rand_rw.yaml new file mode 100644 index 000000000..14d43f143 --- /dev/null +++ b/qa/suites/rados/perf/workloads/fio_4K_rand_rw.yaml @@ -0,0 +1,24 @@ +tasks: +- cbt: + benchmarks: + librbdfio: + op_size: [4096] + time: 60 + mode: ['randrw'] + norandommap: True + vol_size: 4096 + procs_per_volume: [1] + volumes_per_client: [2] + iodepth: [32] + osd_ra: [4096] + pool_profile: 'rbd' + log_avg_msec: 100 + cluster: + user: 'ubuntu' + osds_per_node: 3 + iterations: 1 + pool_profiles: + rbd: + pg_size: 128 + pgp_size: 128 + replication: 3 diff --git a/qa/suites/rados/perf/workloads/fio_4M_rand_read.yaml b/qa/suites/rados/perf/workloads/fio_4M_rand_read.yaml new file mode 100644 index 000000000..b07432243 --- /dev/null +++ b/qa/suites/rados/perf/workloads/fio_4M_rand_read.yaml @@ -0,0 +1,24 @@ +tasks: +- cbt: + benchmarks: + librbdfio: + op_size: [4194304] + time: 60 + mode: ['randread'] + norandommap: True + vol_size: 4096 + procs_per_volume: [1] + volumes_per_client: [2] + iodepth: [32] + osd_ra: [4096] + pool_profile: 'rbd' + log_avg_msec: 100 + cluster: + user: 'ubuntu' + osds_per_node: 3 + iterations: 1 + pool_profiles: + rbd: + pg_size: 128 + pgp_size: 128 + replication: 3 diff --git a/qa/suites/rados/perf/workloads/fio_4M_rand_rw.yaml b/qa/suites/rados/perf/workloads/fio_4M_rand_rw.yaml new file mode 100644 index 000000000..5fd6e2877 --- /dev/null +++ b/qa/suites/rados/perf/workloads/fio_4M_rand_rw.yaml @@ -0,0 +1,24 @@ +tasks: +- cbt: + benchmarks: + librbdfio: + op_size: [4194304] + time: 60 + mode: ['randrw'] + norandommap: True + vol_size: 4096 + procs_per_volume: [1] + volumes_per_client: [2] + iodepth: [32] + osd_ra: [4096] + pool_profile: 'rbd' + log_avg_msec: 100 + cluster: + user: 'ubuntu' + osds_per_node: 3 + iterations: 1 + pool_profiles: + rbd: + pg_size: 128 + pgp_size: 128 + replication: 3 diff --git a/qa/suites/rados/perf/workloads/fio_4M_rand_write.yaml b/qa/suites/rados/perf/workloads/fio_4M_rand_write.yaml new file mode 100644 index 000000000..2d9d83611 --- /dev/null +++ b/qa/suites/rados/perf/workloads/fio_4M_rand_write.yaml @@ -0,0 +1,24 @@ +tasks: +- cbt: + benchmarks: + librbdfio: + op_size: [4194304] + time: 60 + mode: ['randwrite'] + norandommap: True + vol_size: 4096 + procs_per_volume: [1] + volumes_per_client: [2] + iodepth: [32] + osd_ra: [4096] + pool_profile: 'rbd' + log_avg_msec: 100 + cluster: + user: 'ubuntu' + osds_per_node: 3 + iterations: 1 + pool_profiles: + rbd: + pg_size: 128 + pgp_size: 128 + replication: 3 diff --git a/qa/suites/rados/perf/workloads/radosbench_4K_rand_read.yaml b/qa/suites/rados/perf/workloads/radosbench_4K_rand_read.yaml new file mode 100644 index 000000000..f1de9b41b --- /dev/null +++ b/qa/suites/rados/perf/workloads/radosbench_4K_rand_read.yaml @@ -0,0 +1,24 @@ +tasks: +- cbt: + benchmarks: + radosbench: + concurrent_ops: 4 + concurrent_procs: 2 + op_size: [4096] + pool_monitoring_list: + - collectl + pool_profile: 'replicated' + run_monitoring_list: + - collectl + time: 60 + write_only: false + readmode: 'rand' + cluster: + user: 'ubuntu' + osds_per_node: 3 + iterations: 1 + pool_profiles: + replicated: + pg_size: 256 + pgp_size: 256 + replication: 'replicated' diff --git a/qa/suites/rados/perf/workloads/radosbench_4K_seq_read.yaml b/qa/suites/rados/perf/workloads/radosbench_4K_seq_read.yaml new file mode 100644 index 000000000..8fb204a2f --- /dev/null +++ b/qa/suites/rados/perf/workloads/radosbench_4K_seq_read.yaml @@ -0,0 +1,23 @@ +tasks: +- cbt: + benchmarks: + radosbench: + concurrent_ops: 4 + concurrent_procs: 2 + op_size: [4096] + pool_monitoring_list: + - collectl + pool_profile: 'replicated' + run_monitoring_list: + - collectl + time: 60 + write_only: false + cluster: + user: 'ubuntu' + osds_per_node: 3 + iterations: 1 + pool_profiles: + replicated: + pg_size: 256 + pgp_size: 256 + replication: 'replicated' diff --git a/qa/suites/rados/perf/workloads/radosbench_4M_rand_read.yaml b/qa/suites/rados/perf/workloads/radosbench_4M_rand_read.yaml new file mode 100644 index 000000000..cc1c74489 --- /dev/null +++ b/qa/suites/rados/perf/workloads/radosbench_4M_rand_read.yaml @@ -0,0 +1,24 @@ +tasks: +- cbt: + benchmarks: + radosbench: + concurrent_ops: 4 + concurrent_procs: 2 + op_size: [4194304] + pool_monitoring_list: + - collectl + pool_profile: 'replicated' + run_monitoring_list: + - collectl + time: 60 + write_only: false + readmode: 'rand' + cluster: + user: 'ubuntu' + osds_per_node: 3 + iterations: 1 + pool_profiles: + replicated: + pg_size: 256 + pgp_size: 256 + replication: 'replicated' diff --git a/qa/suites/rados/perf/workloads/radosbench_4M_seq_read.yaml b/qa/suites/rados/perf/workloads/radosbench_4M_seq_read.yaml new file mode 100644 index 000000000..3ab55cf51 --- /dev/null +++ b/qa/suites/rados/perf/workloads/radosbench_4M_seq_read.yaml @@ -0,0 +1,23 @@ +tasks: +- cbt: + benchmarks: + radosbench: + concurrent_ops: 4 + concurrent_procs: 2 + op_size: [4194304] + pool_monitoring_list: + - collectl + pool_profile: 'replicated' + run_monitoring_list: + - collectl + time: 60 + write_only: false + cluster: + user: 'ubuntu' + osds_per_node: 3 + iterations: 1 + pool_profiles: + replicated: + pg_size: 256 + pgp_size: 256 + replication: 'replicated' diff --git a/qa/suites/rados/perf/workloads/radosbench_4M_write.yaml b/qa/suites/rados/perf/workloads/radosbench_4M_write.yaml new file mode 100644 index 000000000..f6a5d715c --- /dev/null +++ b/qa/suites/rados/perf/workloads/radosbench_4M_write.yaml @@ -0,0 +1,23 @@ +tasks: +- cbt: + benchmarks: + radosbench: + concurrent_ops: 4 + concurrent_procs: 2 + op_size: [4194304] + pool_monitoring_list: + - collectl + pool_profile: 'replicated' + run_monitoring_list: + - collectl + time: 60 + write_only: true + cluster: + user: 'ubuntu' + osds_per_node: 3 + iterations: 1 + pool_profiles: + replicated: + pg_size: 256 + pgp_size: 256 + replication: 'replicated' diff --git a/qa/suites/rados/perf/workloads/radosbench_omap_write.yaml b/qa/suites/rados/perf/workloads/radosbench_omap_write.yaml new file mode 100644 index 000000000..5df4674d9 --- /dev/null +++ b/qa/suites/rados/perf/workloads/radosbench_omap_write.yaml @@ -0,0 +1,7 @@ +tasks: +- radosbench: + clients: [client.0] + write-omap: True + objectsize: 4096 + size: 4096 + time: 300 diff --git a/qa/suites/rados/perf/workloads/sample_fio.yaml b/qa/suites/rados/perf/workloads/sample_fio.yaml new file mode 100644 index 000000000..98411392d --- /dev/null +++ b/qa/suites/rados/perf/workloads/sample_fio.yaml @@ -0,0 +1,24 @@ +tasks: +- cbt: + benchmarks: + librbdfio: + op_size: [4096] + time: 60 + mode: ['randwrite'] + norandommap: True + vol_size: 4096 + procs_per_volume: [1] + volumes_per_client: [2] + iodepth: [32] + osd_ra: [4096] + pool_profile: 'rbd' + log_avg_msec: 100 + cluster: + user: 'ubuntu' + osds_per_node: 3 + iterations: 1 + pool_profiles: + rbd: + pg_size: 128 + pgp_size: 128 + replication: 3 diff --git a/qa/suites/rados/perf/workloads/sample_radosbench.yaml b/qa/suites/rados/perf/workloads/sample_radosbench.yaml new file mode 100644 index 000000000..e3dc47ae6 --- /dev/null +++ b/qa/suites/rados/perf/workloads/sample_radosbench.yaml @@ -0,0 +1,23 @@ +tasks: +- cbt: + benchmarks: + radosbench: + concurrent_ops: 4 + concurrent_procs: 2 + op_size: [4096] + pool_monitoring_list: + - collectl + pool_profile: 'replicated' + run_monitoring_list: + - collectl + time: 60 + write_only: true + cluster: + user: 'ubuntu' + osds_per_node: 3 + iterations: 1 + pool_profiles: + replicated: + pg_size: 256 + pgp_size: 256 + replication: 'replicated' |