From e6918187568dbd01842d8d1d2c808ce16a894239 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 21 Apr 2024 13:54:28 +0200 Subject: Adding upstream version 18.2.2. Signed-off-by: Daniel Baumann --- doc/rados/configuration/pool-pg.conf | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 doc/rados/configuration/pool-pg.conf (limited to 'doc/rados/configuration/pool-pg.conf') diff --git a/doc/rados/configuration/pool-pg.conf b/doc/rados/configuration/pool-pg.conf new file mode 100644 index 000000000..6765d37df --- /dev/null +++ b/doc/rados/configuration/pool-pg.conf @@ -0,0 +1,21 @@ +[global] + + # By default, Ceph makes three replicas of RADOS objects. If you want + # to maintain four copies of an object the default value--a primary + # copy and three replica copies--reset the default values as shown in + # 'osd_pool_default_size'. If you want to allow Ceph to accept an I/O + # operation to a degraded PG, set 'osd_pool_default_min_size' to a + # number less than the 'osd_pool_default_size' value. + + osd_pool_default_size = 3 # Write an object three times. + osd_pool_default_min_size = 2 # Accept an I/O operation to a PG that has two copies of an object. + + # Note: by default, PG autoscaling is enabled and this value is used only + # in specific circumstances. It is however still recommend to set it. + # Ensure you have a realistic number of placement groups. We recommend + # approximately 100 per OSD. E.g., total number of OSDs multiplied by 100 + # divided by the number of replicas (i.e., 'osd_pool_default_size'). So for + # 10 OSDs and 'osd_pool_default_size' = 4, we'd recommend approximately + # (100 * 10) / 4 = 250. + # Always use the nearest power of two. + osd_pool_default_pg_num = 256 -- cgit v1.2.3