summaryrefslogtreecommitdiffstats
path: root/src/common/options
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
commite6918187568dbd01842d8d1d2c808ce16a894239 (patch)
tree64f88b554b444a49f656b6c656111a145cbbaa28 /src/common/options
parentInitial commit. (diff)
downloadceph-upstream/18.2.2.tar.xz
ceph-upstream/18.2.2.zip
Adding upstream version 18.2.2.upstream/18.2.2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/common/options')
-rw-r--r--src/common/options/CMakeLists.txt112
-rw-r--r--src/common/options/build_options.cc53
-rw-r--r--src/common/options/build_options.h8
-rw-r--r--src/common/options/ceph-exporter.yaml.in54
-rw-r--r--src/common/options/cephfs-mirror.yaml.in94
-rw-r--r--src/common/options/crimson.yaml.in119
-rw-r--r--src/common/options/global.yaml.in6396
-rw-r--r--src/common/options/immutable-object-cache.yaml.in98
-rw-r--r--src/common/options/legacy_config_opts.h11
-rw-r--r--src/common/options/mds-client.yaml.in580
-rw-r--r--src/common/options/mds.yaml.in1536
-rw-r--r--src/common/options/mgr.yaml.in362
-rw-r--r--src/common/options/mon.yaml.in1340
-rw-r--r--src/common/options/osd.yaml.in1415
-rw-r--r--src/common/options/rbd-mirror.yaml.in210
-rw-r--r--src/common/options/rbd.yaml.in881
-rw-r--r--src/common/options/rgw.yaml.in3770
-rwxr-xr-xsrc/common/options/validate-options.py49
-rwxr-xr-xsrc/common/options/y2c.py366
19 files changed, 17454 insertions, 0 deletions
diff --git a/src/common/options/CMakeLists.txt b/src/common/options/CMakeLists.txt
new file mode 100644
index 000000000..f12a5513a
--- /dev/null
+++ b/src/common/options/CMakeLists.txt
@@ -0,0 +1,112 @@
+set(common_options_srcs build_options.cc)
+set(legacy_options_headers)
+set(options_yamls)
+
+# to mimic the behavior of file(CONFIGURE ...)
+file(GENERATE OUTPUT configure_file.cmake
+ CONTENT "configure_file(\${input_file} \${output_file} @ONLY)")
+function(file_configure input_file output_file)
+ set(cmake_defs
+ -D input_file=${input_file}
+ -D output_file=${output_file})
+ file(STRINGS ${input_file} subvars REGEX "@[^@]+@")
+ foreach(line ${subvars})
+ string(REGEX REPLACE ".*@([^@]+)@.*" "\\1"
+ var "${line}")
+ set(value ${${var}})
+ list(APPEND cmake_defs -D ${var}=${value})
+ endforeach()
+ add_custom_command(OUTPUT ${output_file}
+ COMMAND ${CMAKE_COMMAND} ${cmake_defs} -P configure_file.cmake
+ DEPENDS ${input_file}
+ VERBATIM)
+endfunction()
+
+function(add_options name)
+ set(yaml_in_file ${CMAKE_CURRENT_SOURCE_DIR}/${name}.yaml.in)
+ set(yaml_file ${CMAKE_CURRENT_BINARY_DIR}/${name}.yaml)
+ file_configure("${yaml_in_file}"
+ "${yaml_file}" @ONLY)
+ list(APPEND options_yamls ${yaml_file})
+ set(options_yamls ${options_yamls} PARENT_SCOPE)
+ set(cc_file "${name}_options.cc")
+ set(h_file "${PROJECT_BINARY_DIR}/include/${name}_legacy_options.h")
+ add_custom_command(PRE_BUILD
+ OUTPUT ${cc_file} ${h_file}
+ COMMAND ${Python3_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/y2c.py
+ --input ${yaml_file}
+ --output ${cc_file}
+ --legacy ${h_file}
+ --name ${name}
+ DEPENDS ${yaml_file})
+ list(APPEND common_options_srcs ${cc_file})
+ set(common_options_srcs ${common_options_srcs} PARENT_SCOPE)
+ list(APPEND legacy_options_headers ${h_file})
+ set(legacy_options_headers ${legacy_options_headers} PARENT_SCOPE)
+endfunction()
+
+set(osd_erasure_code_plugins "jerasure" "lrc")
+if(WITH_EC_ISA_PLUGIN)
+ list(APPEND osd_erasure_code_plugins "isa")
+endif()
+string(REPLACE ";" " " osd_erasure_code_plugins "${osd_erasure_code_plugins}")
+
+set(keyring_paths
+ "/etc/ceph/$cluster.$name.keyring"
+ "/etc/ceph/$cluster.keyring"
+ "/etc/ceph/keyring"
+ "/etc/ceph/keyring.bin")
+if(FREEBSD)
+ list(APPEND keyring_paths
+ "/usr/local/etc/ceph/$cluster.$name.keyring"
+ "/usr/local/etc/ceph/$cluster.keyring"
+ "/usr/local/etc/ceph/keyring"
+ "/usr/local/etc/ceph/keyring.bin")
+endif()
+string(REPLACE ";" "," keyring_paths "${keyring_paths}")
+
+set(ms_bind_retry_count 3)
+set(ms_bind_retry_delay 5)
+if(FREEBSD)
+ # FreeBSD does not use SO_REAUSEADDR so allow for a bit more time per default
+ set(ms_bind_retry_count 6)
+ set(ms_bind_retry_delay 6)
+endif()
+
+set(mgr_disabled_modules "")
+if(WITH_MGR)
+ # https://tracker.ceph.com/issues/45147
+ if(Python3_VERSION VERSION_EQUAL 3.8)
+ set(mgr_disabled_modules "diskprediction_local")
+ message(STATUS "mgr module disabled for ${Python3_VERSION}: ${mgr_disabled_modules}")
+ endif()
+endif()
+
+add_options(global)
+add_options(cephfs-mirror)
+add_options(crimson)
+add_options(mgr)
+add_options(mds)
+add_options(mds-client)
+add_options(mon)
+add_options(osd)
+add_options(rbd)
+add_options(rbd-mirror)
+add_options(immutable-object-cache)
+add_options(ceph-exporter)
+
+# if set to empty string, system default luarocks package location (if exist) will be used
+set(rgw_luarocks_location "")
+if(WITH_RADOSGW_LUA_PACKAGES)
+ set(rgw_luarocks_location "/tmp/luarocks")
+endif()
+add_options(rgw)
+
+add_library(common-options-objs OBJECT
+ ${common_options_srcs})
+add_custom_target(legacy-option-headers
+ DEPENDS ${legacy_options_headers})
+
+include(AddCephTest)
+add_ceph_test(validate-options
+ ${Python3_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/validate-options.py ${options_yamls})
diff --git a/src/common/options/build_options.cc b/src/common/options/build_options.cc
new file mode 100644
index 000000000..867fc2efd
--- /dev/null
+++ b/src/common/options/build_options.cc
@@ -0,0 +1,53 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#include "build_options.h"
+
+#include <algorithm>
+#include <cstring>
+
+std::vector<Option> get_global_options();
+std::vector<Option> get_mgr_options();
+std::vector<Option> get_mon_options();
+std::vector<Option> get_crimson_options();
+std::vector<Option> get_osd_options();
+std::vector<Option> get_rgw_options();
+std::vector<Option> get_rbd_options();
+std::vector<Option> get_rbd_mirror_options();
+std::vector<Option> get_immutable_object_cache_options();
+std::vector<Option> get_mds_options();
+std::vector<Option> get_mds_client_options();
+std::vector<Option> get_cephfs_mirror_options();
+std::vector<Option> get_ceph_exporter_options();
+
+std::vector<Option> build_options()
+{
+ std::vector<Option> result = get_global_options();
+
+ auto ingest = [&result](std::vector<Option>&& options, const char* svc) {
+ for (auto &o : options) {
+ if (std::none_of(o.services.begin(), o.services.end(),
+ [svc](const char* known_svc) {
+ return std::strcmp(known_svc, svc) == 0;
+ })) {
+ o.add_service(svc);
+ }
+ result.push_back(std::move(o));
+ }
+ };
+
+ ingest(get_crimson_options(), "osd");
+ ingest(get_mgr_options(), "mgr");
+ ingest(get_mon_options(), "mon");
+ ingest(get_osd_options(), "osd");
+ ingest(get_rgw_options(), "rgw");
+ ingest(get_rbd_options(), "rbd");
+ ingest(get_rbd_mirror_options(), "rbd-mirror");
+ ingest(get_immutable_object_cache_options(), "immutable-object-cache");
+ ingest(get_mds_options(), "mds");
+ ingest(get_mds_client_options(), "mds_client");
+ ingest(get_cephfs_mirror_options(), "cephfs-mirror");
+ ingest(get_ceph_exporter_options(), "ceph-exporter");
+
+ return result;
+}
diff --git a/src/common/options/build_options.h b/src/common/options/build_options.h
new file mode 100644
index 000000000..6689e5e72
--- /dev/null
+++ b/src/common/options/build_options.h
@@ -0,0 +1,8 @@
+// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
+// vim: ts=8 sw=2 smarttab
+
+#pragma once
+#include <vector>
+#include "common/options.h"
+
+std::vector<Option> build_options();
diff --git a/src/common/options/ceph-exporter.yaml.in b/src/common/options/ceph-exporter.yaml.in
new file mode 100644
index 000000000..798a185e9
--- /dev/null
+++ b/src/common/options/ceph-exporter.yaml.in
@@ -0,0 +1,54 @@
+# -*- mode: YAML -*-
+---
+
+options:
+- name: exporter_sock_dir
+ type: str
+ level: advanced
+ desc: The path to ceph daemons socket files dir
+ default: /var/run/ceph/
+ services:
+ - ceph-exporter
+ flags:
+ - runtime
+- name: exporter_addr
+ type: str
+ level: advanced
+ desc: Host ip address where exporter is deployed
+ default: 0.0.0.0
+ services:
+ - ceph-exporter
+- name: exporter_http_port
+ type: int
+ level: advanced
+ desc: Port to deploy exporter on. Default is 9926
+ default: 9926
+ services:
+ - ceph-exporter
+- name: exporter_prio_limit
+ type: int
+ level: advanced
+ desc: Only perf counters greater than or equal to exporter_prio_limit are fetched
+ default: 5
+ services:
+ - ceph-exporter
+ flags:
+ - runtime
+- name: exporter_stats_period
+ type: int
+ level: advanced
+ desc: Time to wait before sending requests again to exporter server (seconds)
+ default: 5
+ services:
+ - ceph-exporter
+ flags:
+ - runtime
+- name: exporter_sort_metrics
+ type: bool
+ level: advanced
+ desc: If true it will sort the metrics and group them.
+ default: true
+ services:
+ - ceph-exporter
+ flags:
+ - runtime
diff --git a/src/common/options/cephfs-mirror.yaml.in b/src/common/options/cephfs-mirror.yaml.in
new file mode 100644
index 000000000..78f86dfb1
--- /dev/null
+++ b/src/common/options/cephfs-mirror.yaml.in
@@ -0,0 +1,94 @@
+# -*- mode: YAML -*-
+---
+
+options:
+- name: cephfs_mirror_max_concurrent_directory_syncs
+ type: uint
+ level: advanced
+ desc: maximum number of concurrent snapshot synchronization threads
+ long_desc: maximum number of directory snapshots that can be synchronized concurrently
+ by cephfs-mirror daemon. Controls the number of synchronization threads.
+ default: 3
+ services:
+ - cephfs-mirror
+ min: 1
+- name: cephfs_mirror_action_update_interval
+ type: secs
+ level: advanced
+ desc: interval for driving asynchronous mirror actions
+ long_desc: Interval in seconds to process pending mirror update actions.
+ default: 2
+ services:
+ - cephfs-mirror
+ min: 1
+- name: cephfs_mirror_restart_mirror_on_blocklist_interval
+ type: secs
+ level: advanced
+ desc: interval to restart blocklisted instances
+ long_desc: Interval in seconds to restart blocklisted mirror instances. Setting
+ to zero (0) disables restarting blocklisted instances.
+ default: 30
+ services:
+ - cephfs-mirror
+ min: 0
+- name: cephfs_mirror_max_snapshot_sync_per_cycle
+ type: uint
+ level: advanced
+ desc: number of snapshots to mirror in one cycle
+ long_desc: maximum number of snapshots to mirror when a directory is picked up for
+ mirroring by worker threads.
+ default: 3
+ services:
+ - cephfs-mirror
+ min: 1
+- name: cephfs_mirror_directory_scan_interval
+ type: uint
+ level: advanced
+ desc: interval to scan directories to mirror snapshots
+ long_desc: interval in seconds to scan configured directories for snapshot mirroring.
+ default: 10
+ services:
+ - cephfs-mirror
+ min: 1
+- name: cephfs_mirror_max_consecutive_failures_per_directory
+ type: uint
+ level: advanced
+ desc: consecutive failed directory synchronization attempts before marking a directory
+ as "failed"
+ long_desc: number of consecutive snapshot synchronization failures to mark a directory
+ as "failed". failed directories are retried for synchronization less frequently.
+ default: 10
+ services:
+ - cephfs-mirror
+ min: 0
+- name: cephfs_mirror_retry_failed_directories_interval
+ type: uint
+ level: advanced
+ desc: failed directory retry interval for synchronization
+ long_desc: interval in seconds to retry synchronization for failed directories.
+ default: 60
+ services:
+ - cephfs-mirror
+ min: 1
+- name: cephfs_mirror_restart_mirror_on_failure_interval
+ type: secs
+ level: advanced
+ desc: interval to restart failed mirror instances
+ long_desc: Interval in seconds to restart failed mirror instances. Setting to zero
+ (0) disables restarting failed mirror instances.
+ default: 20
+ services:
+ - cephfs-mirror
+ min: 0
+- name: cephfs_mirror_mount_timeout
+ type: secs
+ level: advanced
+ desc: timeout for mounting primary/secondary ceph file system
+ long_desc: Timeout in seconds for mounting primary or secondary (remote) ceph file system
+ by the cephfs-mirror daemon. Setting this to a higher value could result in the mirror
+ daemon getting stalled when mounting a file system if the cluster is not reachable. This
+ option is used to override the usual client_mount_timeout.
+ default: 10
+ services:
+ - cephfs-mirror
+ min: 0 \ No newline at end of file
diff --git a/src/common/options/crimson.yaml.in b/src/common/options/crimson.yaml.in
new file mode 100644
index 000000000..1007998fa
--- /dev/null
+++ b/src/common/options/crimson.yaml.in
@@ -0,0 +1,119 @@
+# -*- mode: YAML -*-
+---
+
+options:
+- name: crimson_osd_obc_lru_size
+ type: uint
+ level: advanced
+ desc: Number of obcs to cache
+ default: 10
+- name: crimson_osd_scheduler_concurrency
+ type: uint
+ level: advanced
+ desc: The maximum number concurrent IO operations, 0 for unlimited
+ default: 0
+- name: crimson_alien_op_num_threads
+ type: uint
+ level: advanced
+ desc: The number of threads for serving alienized ObjectStore
+ default: 6
+ flags:
+ - startup
+- name: crimson_seastar_smp
+ type: uint
+ level: advanced
+ desc: Number of seastar reactor threads to use for the osd
+ default: 1
+ flags:
+ - startup
+- name: crimson_alien_thread_cpu_cores
+ type: str
+ level: advanced
+ desc: CPU cores on which alienstore threads will run in cpuset(7) format
+- name: seastore_segment_size
+ type: size
+ desc: Segment size to use for SegmentManager
+ level: advanced
+ default: 64_M
+- name: seastore_device_size
+ type: size
+ desc: Total size to use for SegmentManager block file if created
+ level: dev
+ default: 50_G
+- name: seastore_block_create
+ type: bool
+ level: dev
+ desc: Create SegmentManager file if it doesn't exist
+ default: true
+ see_also:
+ - seastore_device_size
+- name: seastore_journal_batch_capacity
+ type: uint
+ level: dev
+ desc: The number limit of records in a journal batch
+ default: 16
+- name: seastore_journal_batch_flush_size
+ type: size
+ level: dev
+ desc: The size threshold to force flush a journal batch
+ default: 16_M
+- name: seastore_journal_iodepth_limit
+ type: uint
+ level: dev
+ desc: The io depth limit to submit journal records
+ default: 5
+- name: seastore_journal_batch_preferred_fullness
+ type: float
+ level: dev
+ desc: The record fullness threshold to flush a journal batch
+ default: 0.95
+- name: seastore_default_max_object_size
+ type: uint
+ level: dev
+ desc: default logical address space reservation for seastore objects' data
+ default: 16777216
+- name: seastore_default_object_metadata_reservation
+ type: uint
+ level: dev
+ desc: default logical address space reservation for seastore objects' metadata
+ default: 16777216
+- name: seastore_cache_lru_size
+ type: size
+ level: advanced
+ desc: Size in bytes of extents to keep in cache.
+ default: 64_M
+- name: seastore_obj_data_write_amplification
+ type: float
+ level: advanced
+ desc: split extent if ratio of total extent size to write size exceeds this value
+ default: 1.25
+- name: seastore_max_concurrent_transactions
+ type: uint
+ level: advanced
+ desc: maximum concurrent transactions that seastore allows
+ default: 8
+- name: seastore_main_device_type
+ type: str
+ level: dev
+ desc: The main device type seastore uses (SSD or RANDOM_BLOCK_SSD)
+ default: SSD
+- name: seastore_cbjournal_size
+ type: size
+ level: dev
+ desc: Total size to use for CircularBoundedJournal if created, it is valid only if seastore_main_device_type is RANDOM_BLOCK
+ default: 5_G
+- name: seastore_multiple_tiers_stop_evict_ratio
+ type: float
+ level: advanced
+ desc: When the used ratio of main tier is less than this value, then stop evict cold data to the cold tier.
+ default: 0.5
+- name: seastore_multiple_tiers_default_evict_ratio
+ type: float
+ level: advanced
+ desc: Begin evicting cold data to the cold tier when the used ratio of the main tier reaches this value.
+ default: 0.6
+- name: seastore_multiple_tiers_fast_evict_ratio
+ type: float
+ level: advanced
+ desc: Begin fast eviction when the used ratio of the main tier reaches this value.
+ default: 0.7
diff --git a/src/common/options/global.yaml.in b/src/common/options/global.yaml.in
new file mode 100644
index 000000000..fa426a115
--- /dev/null
+++ b/src/common/options/global.yaml.in
@@ -0,0 +1,6396 @@
+# -*- mode: YAML -*-
+---
+
+options:
+- name: host
+ type: str
+ level: basic
+ desc: local hostname
+ long_desc: if blank, ceph assumes the short hostname (hostname -s)
+ tags:
+ - network
+ services:
+ - common
+ flags:
+ - no_mon_update
+ with_legacy: true
+- name: fsid
+ type: uuid
+ level: basic
+ desc: cluster fsid (uuid)
+ fmt_desc: The cluster ID. One per cluster.
+ May be generated by a deployment tool if not specified.
+ note: Do not set this value if you use a deployment tool that does
+ it for you.
+ tags:
+ - service
+ services:
+ - common
+ flags:
+ - no_mon_update
+ - startup
+- name: public_addr
+ type: addr
+ level: basic
+ desc: public-facing address to bind to
+ fmt_desc: The IP address for the public (front-side) network.
+ Set for each daemon.
+ services:
+ - mon
+ - mds
+ - osd
+ - mgr
+ flags:
+ - startup
+ with_legacy: true
+- name: public_addrv
+ type: addrvec
+ level: basic
+ desc: public-facing address to bind to
+ services:
+ - mon
+ - mds
+ - osd
+ - mgr
+ flags:
+ - startup
+ with_legacy: true
+- name: public_bind_addr
+ type: addr
+ level: advanced
+ services:
+ - mon
+ flags:
+ - startup
+ fmt_desc: In some dynamic deployments the Ceph MON daemon might bind
+ to an IP address locally that is different from the ``public_addr``
+ advertised to other peers in the network. The environment must ensure
+ that routing rules are set correctly. If ``public_bind_addr`` is set
+ the Ceph Monitor daemon will bind to it locally and use ``public_addr``
+ in the monmaps to advertise its address to peers. This behavior is limited
+ to the Monitor daemon.
+ with_legacy: true
+- name: cluster_addr
+ type: addr
+ level: basic
+ desc: cluster-facing address to bind to
+ fmt_desc: The IP address for the cluster (back-side) network.
+ Set for each daemon.
+ tags:
+ - network
+ services:
+ - osd
+ flags:
+ - startup
+ with_legacy: true
+- name: public_network
+ type: str
+ level: advanced
+ desc: Network(s) from which to choose a public address to bind to
+ fmt_desc: The IP address and netmask of the public (front-side) network
+ (e.g., ``192.168.0.0/24``). Set in ``[global]``. You may specify
+ comma-separated subnets. The format of it looks like
+ ``{ip-address}/{netmask} [, {ip-address}/{netmask}]``
+ tags:
+ - network
+ services:
+ - mon
+ - mds
+ - osd
+ - mgr
+ flags:
+ - startup
+ with_legacy: true
+- name: public_network_interface
+ type: str
+ level: advanced
+ desc: Interface name(s) from which to choose an address from a public_network to
+ bind to; public_network must also be specified.
+ tags:
+ - network
+ services:
+ - mon
+ - mds
+ - osd
+ - mgr
+ see_also:
+ - public_network
+ flags:
+ - startup
+- name: cluster_network
+ type: str
+ level: advanced
+ desc: Network(s) from which to choose a cluster address to bind to
+ fmt_desc: The IP address and netmask of the cluster (back-side) network
+ (e.g., ``10.0.0.0/24``). Set in ``[global]``. You may specify
+ comma-separated subnets. The format of it looks like
+ ``{ip-address}/{netmask} [, {ip-address}/{netmask}]``
+ tags:
+ - network
+ services:
+ - osd
+ flags:
+ - startup
+ with_legacy: true
+- name: cluster_network_interface
+ type: str
+ level: advanced
+ desc: Interface name(s) from which to choose an address from a cluster_network to
+ bind to; cluster_network must also be specified.
+ tags:
+ - network
+ services:
+ - mon
+ - mds
+ - osd
+ - mgr
+ see_also:
+ - cluster_network
+ flags:
+ - startup
+- name: monmap
+ type: str
+ level: advanced
+ desc: path to MonMap file
+ long_desc: This option is normally used during mkfs, but can also be used to identify
+ which monitors to connect to.
+ services:
+ - mon
+ flags:
+ - no_mon_update
+ - create
+- name: mon_host
+ type: str
+ level: basic
+ desc: list of hosts or addresses to search for a monitor
+ long_desc: This is a list of IP addresses or hostnames that are separated by commas, whitespace, or semicolons. Hostnames are resolved via DNS. All A and AAAA records are included in the search list.
+ services:
+ - common
+ flags:
+ - no_mon_update
+ - startup
+- name: mon_host_override
+ type: str
+ level: advanced
+ desc: monitor(s) to use overriding the MonMap
+ fmt_desc: This is the list of monitors that the Ceph process **initially** contacts when first establishing communication with the Ceph cluster. This overrides the known monitor list that is derived from MonMap updates sent to older Ceph instances (like librados cluster handles). This option is expected to be useful primarily for debugging.
+ services:
+ - common
+ flags:
+ - no_mon_update
+ - startup
+- name: mon_dns_srv_name
+ type: str
+ level: advanced
+ desc: name of DNS SRV record to check for monitor addresses
+ fmt_desc: the service name used querying the DNS for the monitor hosts/addresses
+ default: ceph-mon
+ tags:
+ - network
+ services:
+ - common
+ see_also:
+ - mon_host
+ flags:
+ - startup
+- name: container_image
+ type: str
+ level: basic
+ desc: container image (used by cephadm orchestrator)
+ default: docker.io/ceph/daemon-base:latest-master-devel
+ flags:
+ - startup
+- name: no_config_file
+ type: bool
+ level: advanced
+ desc: signal that we don't require a config file to be present
+ long_desc: When specified, we won't be looking for a configuration file, and will
+ instead expect that whatever options or values are required for us to work will
+ be passed as arguments.
+ default: false
+ tags:
+ - config
+ services:
+ - common
+ flags:
+ - no_mon_update
+ - startup
+- name: lockdep
+ type: bool
+ level: dev
+ desc: enable lockdep lock dependency analyzer
+ default: false
+ services:
+ - common
+ flags:
+ - no_mon_update
+ - startup
+ with_legacy: true
+- name: lockdep_force_backtrace
+ type: bool
+ level: dev
+ desc: always gather current backtrace at every lock
+ default: false
+ services:
+ - common
+ see_also:
+ - lockdep
+ flags:
+ - startup
+ with_legacy: true
+- name: run_dir
+ type: str
+ level: advanced
+ desc: path for the 'run' directory for storing pid and socket files
+ default: /var/run/ceph
+ services:
+ - common
+ see_also:
+ - admin_socket
+ flags:
+ - startup
+ with_legacy: true
+- name: admin_socket
+ type: str
+ level: advanced
+ desc: path for the runtime control socket file, used by the 'ceph daemon' command
+ fmt_desc: The socket for executing administrative commands on a daemon,
+ irrespective of whether Ceph Monitors have established a quorum.
+ daemon_default: $run_dir/$cluster-$name.asok
+ services:
+ - common
+ flags:
+ - startup
+ # default changed by common_preinit()
+ with_legacy: true
+- name: admin_socket_mode
+ type: str
+ level: advanced
+ desc: file mode to set for the admin socket file, e.g, '0755'
+ services:
+ - common
+ see_also:
+ - admin_socket
+ flags:
+ - startup
+ with_legacy: true
+- name: daemonize
+ type: bool
+ level: advanced
+ desc: whether to daemonize (background) after startup
+ default: false
+ daemon_default: true
+ tags:
+ - service
+ services:
+ - mon
+ - mgr
+ - osd
+ - mds
+ see_also:
+ - pid_file
+ - chdir
+ flags:
+ - no_mon_update
+ - startup
+ # default changed by common_preinit()
+ with_legacy: true
+- name: setuser
+ type: str
+ level: advanced
+ desc: uid or user name to switch to on startup
+ long_desc: This is normally specified by the systemd unit file.
+ tags:
+ - service
+ services:
+ - mon
+ - mgr
+ - osd
+ - mds
+ see_also:
+ - setgroup
+ flags:
+ - startup
+ with_legacy: true
+- name: setgroup
+ type: str
+ level: advanced
+ desc: gid or group name to switch to on startup
+ long_desc: This is normally specified by the systemd unit file.
+ tags:
+ - service
+ services:
+ - mon
+ - mgr
+ - osd
+ - mds
+ see_also:
+ - setuser
+ flags:
+ - startup
+ with_legacy: true
+- name: setuser_match_path
+ type: str
+ level: advanced
+ desc: if set, setuser/setgroup is condition on this path matching ownership
+ long_desc: If setuser or setgroup are specified, and this option is non-empty, then
+ the uid/gid of the daemon will only be changed if the file or directory specified
+ by this option has a matching uid and/or gid. This exists primarily to allow
+ switching to user ceph for OSDs to be conditional on whether the osd data contents
+ have also been chowned after an upgrade. This is normally specified by the systemd
+ unit file.
+ tags:
+ - service
+ services:
+ - mon
+ - mgr
+ - osd
+ - mds
+ see_also:
+ - setuser
+ - setgroup
+ flags:
+ - startup
+ with_legacy: true
+- name: pid_file
+ type: str
+ level: advanced
+ desc: path to write a pid file (if any)
+ fmt_desc: The file in which the mon, osd or mds will write its
+ PID. For instance, ``/var/run/$cluster/$type.$id.pid``
+ will create /var/run/ceph/mon.a.pid for the ``mon`` with
+ id ``a`` running in the ``ceph`` cluster. The ``pid
+ file`` is removed when the daemon stops gracefully. If
+ the process is not daemonized (i.e. runs with the ``-f``
+ or ``-d`` option), the ``pid file`` is not created.
+ tags:
+ - service
+ services:
+ - mon
+ - mgr
+ - osd
+ - mds
+ flags:
+ - startup
+ with_legacy: true
+- name: chdir
+ type: str
+ level: advanced
+ desc: path to chdir(2) to after daemonizing
+ fmt_desc: The directory Ceph daemons change to once they are
+ up and running. Default ``/`` directory recommended.
+ tags:
+ - service
+ services:
+ - mon
+ - mgr
+ - osd
+ - mds
+ see_also:
+ - daemonize
+ flags:
+ - no_mon_update
+ - startup
+ with_legacy: true
+- name: fatal_signal_handlers
+ type: bool
+ level: advanced
+ desc: whether to register signal handlers for SIGABRT etc that dump a stack trace
+ long_desc: This is normally true for daemons and values for libraries.
+ fmt_desc: If set, we will install signal handlers for SEGV, ABRT, BUS, ILL,
+ FPE, XCPU, XFSZ, SYS signals to generate a useful log message
+ default: true
+ tags:
+ - service
+ services:
+ - mon
+ - mgr
+ - osd
+ - mds
+ flags:
+ - startup
+ with_legacy: true
+- name: crash_dir
+ type: str
+ level: advanced
+ desc: Directory where crash reports are archived
+ default: /var/lib/ceph/crash
+ flags:
+ - startup
+ with_legacy: true
+- name: restapi_log_level
+ type: str
+ level: advanced
+ desc: default set by python code
+ with_legacy: true
+- name: restapi_base_url
+ type: str
+ level: advanced
+ desc: default set by python code
+ with_legacy: true
+- name: erasure_code_dir
+ type: str
+ level: advanced
+ desc: directory where erasure-code plugins can be found
+ default: @CEPH_INSTALL_FULL_PKGLIBDIR@/erasure-code
+ services:
+ - mon
+ - osd
+ flags:
+ - startup
+ with_legacy: true
+- name: log_file
+ type: str
+ level: basic
+ desc: path to log file
+ fmt_desc: The location of the logging file for your cluster.
+ daemon_default: /var/log/ceph/$cluster-$name.log
+ see_also:
+ - log_to_file
+ - log_to_stderr
+ - err_to_stderr
+ - log_to_syslog
+ - err_to_syslog
+ # default changed by common_preinit()
+ with_legacy: true
+- name: log_max_new
+ type: int
+ level: advanced
+ desc: max unwritten log entries to allow before waiting to flush to the log
+ fmt_desc: The maximum number of new log files.
+ default: 1000
+ see_also:
+ - log_max_recent
+ # default changed by common_preinit()
+ with_legacy: true
+- name: log_max_recent
+ type: int
+ level: advanced
+ desc: recent log entries to keep in memory to dump in the event of a crash
+ long_desc: The purpose of this option is to log at a higher debug level only to
+ the in-memory buffer, and write out the detailed log messages only if there is
+ a crash. Only log entries below the lower log level will be written unconditionally
+ to the log. For example, debug_osd=1/5 will write everything <= 1 to the log
+ unconditionally but keep entries at levels 2-5 in memory. If there is a seg fault
+ or assertion failure, all entries will be dumped to the log.
+ min: 1
+ default: 500
+ daemon_default: 10000
+ # default changed by common_preinit()
+ with_legacy: true
+- name: log_to_file
+ type: bool
+ level: basic
+ desc: send log lines to a file
+ fmt_desc: Determines if logging messages should appear in a file.
+ default: true
+ see_also:
+ - log_file
+ with_legacy: true
+- name: log_to_stderr
+ type: bool
+ level: basic
+ desc: send log lines to stderr
+ fmt_desc: Determines if logging messages should appear in ``stderr``.
+ default: true
+ daemon_default: false
+ with_legacy: true
+- name: err_to_stderr
+ type: bool
+ level: basic
+ desc: send critical error log lines to stderr
+ fmt_desc: Determines if error messages should appear in ``stderr``.
+ default: false
+ daemon_default: true
+ with_legacy: true
+- name: log_stderr_prefix
+ type: str
+ level: advanced
+ desc: String to prefix log messages with when sent to stderr
+ long_desc: This is useful in container environments when combined with mon_cluster_log_to_stderr. The
+ mon log prefixes each line with the channel name (e.g., 'default', 'audit'), while
+ log_stderr_prefix can be set to 'debug '.
+ see_also:
+ - mon_cluster_log_to_stderr
+- name: log_to_syslog
+ type: bool
+ level: basic
+ desc: send log lines to syslog facility
+ fmt_desc: Determines if logging messages should appear in ``syslog``.
+ default: false
+ with_legacy: true
+- name: err_to_syslog
+ type: bool
+ level: basic
+ desc: send critical error log lines to syslog facility
+ fmt_desc: Determines if error messages should appear in ``syslog``.
+ default: false
+ with_legacy: true
+- name: log_flush_on_exit
+ type: bool
+ level: advanced
+ desc: set a process exit handler to ensure the log is flushed on exit
+ fmt_desc: Determines if Ceph should flush the log files after exit.
+ default: false
+ with_legacy: true
+- name: log_stop_at_utilization
+ type: float
+ level: basic
+ desc: stop writing to the log file when device utilization reaches this ratio
+ default: 0.97
+ see_also:
+ - log_file
+ min: 0
+ max: 1
+ with_legacy: true
+- name: log_to_graylog
+ type: bool
+ level: basic
+ desc: send log lines to remote graylog server
+ default: false
+ see_also:
+ - err_to_graylog
+ - log_graylog_host
+ - log_graylog_port
+ with_legacy: true
+- name: err_to_graylog
+ type: bool
+ level: basic
+ desc: send critical error log lines to remote graylog server
+ default: false
+ see_also:
+ - log_to_graylog
+ - log_graylog_host
+ - log_graylog_port
+ with_legacy: true
+- name: log_graylog_host
+ type: str
+ level: basic
+ desc: address or hostname of graylog server to log to
+ default: 127.0.0.1
+ see_also:
+ - log_to_graylog
+ - err_to_graylog
+ - log_graylog_port
+ with_legacy: true
+- name: log_graylog_port
+ type: int
+ level: basic
+ desc: port number for the remote graylog server
+ default: 12201
+ see_also:
+ - log_graylog_host
+ with_legacy: true
+- name: log_to_journald
+ type: bool
+ level: basic
+ desc: send log lines to journald
+ default: false
+ see_also:
+ - err_to_journald
+- name: err_to_journald
+ type: bool
+ level: basic
+ desc: send critical error log lines to journald
+ default: false
+ see_also:
+ - log_to_journald
+- name: log_coarse_timestamps
+ type: bool
+ level: advanced
+ desc: timestamp log entries from coarse system clock to improve performance
+ default: true
+ tags:
+ - performance
+ - service
+ services:
+ - common
+# options will take k/v pairs, or single-item that will be assumed as general
+# default for all, regardless of channel.
+# e.g., "info" would be taken as the same as "default=info"
+# also, "default=daemon audit=local0" would mean
+# "default all to 'daemon', override 'audit' with 'local0'
+- name: clog_to_monitors
+ type: str
+ level: advanced
+ desc: Make daemons send cluster log messages to monitors
+ fmt_desc: Determines if ``clog`` messages should be sent to monitors.
+ default: default=true
+ flags:
+ - runtime
+ with_legacy: true
+ services:
+ - mgr
+ - osd
+ - mds
+- name: clog_to_syslog
+ type: str
+ level: advanced
+ desc: Make daemons send cluster log messages to syslog
+ fmt_desc: Determines if ``clog`` messages should be sent to syslog.
+ default: 'false'
+ flags:
+ - runtime
+ with_legacy: true
+ services:
+ - mon
+ - mgr
+ - osd
+ - mds
+- name: clog_to_syslog_level
+ type: str
+ level: advanced
+ desc: Syslog level for cluster log messages
+ default: info
+ see_also:
+ - clog_to_syslog
+ flags:
+ - runtime
+ with_legacy: true
+ services:
+ - mon
+ - mgr
+ - osd
+ - mds
+- name: clog_to_syslog_facility
+ type: str
+ level: advanced
+ desc: Syslog facility for cluster log messages
+ default: default=daemon audit=local0
+ see_also:
+ - clog_to_syslog
+ flags:
+ - runtime
+ with_legacy: true
+ services:
+ - mon
+ - mgr
+ - osd
+ - mds
+- name: clog_to_graylog
+ type: str
+ level: advanced
+ desc: Make daemons send cluster log to graylog
+ default: 'false'
+ flags:
+ - runtime
+ services:
+ - mon
+ - mgr
+ - osd
+ - mds
+- name: clog_to_graylog_host
+ type: str
+ level: advanced
+ desc: Graylog host to cluster log messages
+ default: 127.0.0.1
+ see_also:
+ - clog_to_graylog
+ flags:
+ - runtime
+ with_legacy: true
+ services:
+ - mon
+ - mgr
+ - osd
+ - mds
+- name: clog_to_graylog_port
+ type: str
+ level: advanced
+ desc: Graylog port number for cluster log messages
+ default: '12201'
+ see_also:
+ - clog_to_graylog
+ flags:
+ - runtime
+ with_legacy: true
+ services:
+ - mon
+ - mgr
+ - osd
+ - mds
+- name: enable_experimental_unrecoverable_data_corrupting_features
+ type: str
+ level: advanced
+ desc: Enable named (or all with '*') experimental features that may be untested,
+ dangerous, and/or cause permanent data loss
+ flags:
+ - runtime
+ with_legacy: true
+- name: plugin_dir
+ type: str
+ level: advanced
+ desc: Base directory for dynamically loaded plugins
+ default: @CEPH_INSTALL_FULL_PKGLIBDIR@
+ services:
+ - mon
+ - osd
+ flags:
+ - startup
+- name: compressor_zlib_isal
+ type: bool
+ level: advanced
+ desc: Use Intel ISA-L accelerated zlib implementation if available
+ default: false
+ with_legacy: true
+# regular zlib compression level, not applicable to isa-l optimized version
+- name: compressor_zlib_level
+ type: int
+ level: advanced
+ desc: Zlib compression level to use
+ default: 5
+ with_legacy: true
+# regular zlib compression winsize, not applicable to isa-l optimized version
+- name: compressor_zlib_winsize
+ type: int
+ level: advanced
+ desc: Zlib compression winsize to use
+ default: -15
+ min: -15
+ max: 32
+ with_legacy: true
+# regular zstd compression level
+- name: compressor_zstd_level
+ type: int
+ level: advanced
+ desc: Zstd compression level to use
+ default: 1
+ with_legacy: true
+- name: qat_compressor_enabled
+ type: bool
+ level: advanced
+ desc: Enable Intel QAT acceleration support for compression if available
+ default: false
+ with_legacy: true
+- name: qat_compressor_session_max_number
+ type: uint
+ level: advanced
+ desc: Set the maximum number of session within Qatzip when using QAT compressor
+ default: 256
+- name: plugin_crypto_accelerator
+ type: str
+ level: advanced
+ desc: Crypto accelerator library to use
+ default: crypto_isal
+ with_legacy: true
+- name: openssl_engine_opts
+ type: str
+ level: advanced
+ desc: Use engine for specific openssl algorithm
+ long_desc: 'Pass opts in this way: engine_id=engine1,dynamic_path=/some/path/engine1.so,default_algorithms=DIGESTS:engine_id=engine2,dynamic_path=/some/path/engine2.so,default_algorithms=CIPHERS,other_ctrl=other_value'
+ flags:
+ - startup
+ with_legacy: true
+- name: mempool_debug
+ type: bool
+ level: dev
+ default: false
+ flags:
+ - no_mon_update
+ with_legacy: true
+- name: thp
+ type: bool
+ level: dev
+ desc: enable transparent huge page (THP) support
+ long_desc: Ceph is known to suffer from memory fragmentation due to THP use. This
+ is indicated by RSS usage above configured memory targets. Enabling THP is currently
+ discouraged until selective use of THP by Ceph is implemented.
+ default: false
+ flags:
+ - startup
+- name: key
+ type: str
+ level: advanced
+ desc: Authentication key
+ long_desc: A CephX authentication key, base64 encoded. It normally looks something
+ like 'AQAtut9ZdMbNJBAAHz6yBAWyJyz2yYRyeMWDag=='.
+ fmt_desc: The key (i.e., the text string of the key itself). Not recommended.
+ see_also:
+ - keyfile
+ - keyring
+ flags:
+ - no_mon_update
+ - startup
+ with_legacy: true
+- name: keyfile
+ type: str
+ level: advanced
+ desc: Path to a file containing a key
+ long_desc: The file should contain a CephX authentication key and optionally a trailing
+ newline, but nothing else.
+ fmt_desc: The path to a key file (i.e,. a file containing only the key).
+ see_also:
+ - key
+ flags:
+ - no_mon_update
+ - startup
+ with_legacy: true
+- name: keyring
+ type: str
+ level: advanced
+ desc: Path to a keyring file.
+ long_desc: A keyring file is an INI-style formatted file where the section names
+ are client or daemon names (e.g., 'osd.0') and each section contains a 'key' property
+ with CephX authentication key as the value.
+ # please note, document are generated without accessing to the CMake
+ # variables, so please update the document manually with a representive
+ # default value using the ":default:" option of ".. confval::" directive.
+ default: @keyring_paths@
+ see_also:
+ - key
+ - keyfile
+ flags:
+ - no_mon_update
+ - startup
+ with_legacy: true
+- name: heartbeat_interval
+ type: int
+ level: advanced
+ desc: Frequency of internal heartbeat checks (seconds)
+ default: 5
+ flags:
+ - startup
+ with_legacy: true
+- name: heartbeat_file
+ type: str
+ level: advanced
+ desc: File to touch on successful internal heartbeat
+ long_desc: If set, this file will be touched every time an internal heartbeat check
+ succeeds.
+ see_also:
+ - heartbeat_interval
+ flags:
+ - startup
+ with_legacy: true
+- name: heartbeat_inject_failure
+ type: int
+ level: dev
+ default: 0
+ with_legacy: true
+- name: perf
+ type: bool
+ level: advanced
+ desc: Enable internal performance metrics
+ long_desc: If enabled, collect and expose internal health metrics
+ default: true
+ with_legacy: true
+- name: ms_type
+ type: str
+ level: advanced
+ desc: Messenger implementation to use for network communication
+ fmt_desc: Transport type used by Async Messenger. Can be ``async+posix``,
+ ``async+dpdk`` or ``async+rdma``. Posix uses standard TCP/IP networking and is
+ default. Other transports may be experimental and support may be limited.
+ default: async+posix
+ flags:
+ - startup
+ with_legacy: true
+- name: ms_public_type
+ type: str
+ level: advanced
+ desc: Messenger implementation to use for the public network
+ long_desc: If not specified, use ms_type
+ see_also:
+ - ms_type
+ flags:
+ - startup
+ with_legacy: true
+- name: ms_cluster_type
+ type: str
+ level: advanced
+ desc: Messenger implementation to use for the internal cluster network
+ long_desc: If not specified, use ms_type
+ see_also:
+ - ms_type
+ flags:
+ - startup
+ with_legacy: true
+- name: ms_mon_cluster_mode
+ type: str
+ level: basic
+ desc: Connection modes (crc, secure) for intra-mon connections in order of preference
+ fmt_desc: the connection mode (or permitted modes) to use between monitors.
+ default: secure crc
+ see_also:
+ - ms_mon_service_mode
+ - ms_mon_client_mode
+ - ms_service_mode
+ - ms_cluster_mode
+ - ms_client_mode
+ flags:
+ - startup
+- name: ms_mon_service_mode
+ type: str
+ level: basic
+ desc: Allowed connection modes (crc, secure) for connections to mons
+ fmt_desc: a list of permitted modes for clients or
+ other Ceph daemons to use when connecting to monitors.
+ default: secure crc
+ see_also:
+ - ms_service_mode
+ - ms_mon_cluster_mode
+ - ms_mon_client_mode
+ - ms_cluster_mode
+ - ms_client_mode
+ flags:
+ - startup
+- name: ms_mon_client_mode
+ type: str
+ level: basic
+ desc: Connection modes (crc, secure) for connections from clients to monitors in
+ order of preference
+ fmt_desc: a list of connection modes, in order of
+ preference, for clients or non-monitor daemons to use when
+ connecting to monitors.
+ default: secure crc
+ see_also:
+ - ms_mon_service_mode
+ - ms_mon_cluster_mode
+ - ms_service_mode
+ - ms_cluster_mode
+ - ms_client_mode
+ flags:
+ - startup
+- name: ms_cluster_mode
+ type: str
+ level: basic
+ desc: Connection modes (crc, secure) for intra-cluster connections in order of preference
+ fmt_desc: connection mode (or permitted modes) used
+ for intra-cluster communication between Ceph daemons. If multiple
+ modes are listed, the modes listed first are preferred.
+ default: crc secure
+ see_also:
+ - ms_service_mode
+ - ms_client_mode
+ flags:
+ - startup
+- name: ms_service_mode
+ type: str
+ level: basic
+ desc: Allowed connection modes (crc, secure) for connections to daemons
+ fmt_desc: a list of permitted modes for clients to use
+ when connecting to the cluster.
+ default: crc secure
+ see_also:
+ - ms_cluster_mode
+ - ms_client_mode
+ flags:
+ - startup
+- name: ms_client_mode
+ type: str
+ level: basic
+ desc: Connection modes (crc, secure) for connections from clients in order of preference
+ fmt_desc: a list of connection modes, in order of
+ preference, for clients to use (or allow) when talking to a Ceph
+ cluster.
+ default: crc secure
+ see_also:
+ - ms_cluster_mode
+ - ms_service_mode
+ flags:
+ - startup
+- name: ms_osd_compress_mode
+ type: str
+ level: advanced
+ desc: Compression policy to use in Messenger for communicating with OSD
+ default: none
+ services:
+ - osd
+ enum_values:
+ - none
+ - force
+ see_also:
+ - ms_compress_secure
+ flags:
+ - runtime
+- name: ms_osd_compress_min_size
+ type: uint
+ level: advanced
+ desc: Minimal message size eligable for on-wire compression
+ default: 1_K
+ services:
+ - osd
+ see_also:
+ - ms_osd_compress_mode
+ flags:
+ - runtime
+- name: ms_osd_compression_algorithm
+ type: str
+ level: advanced
+ desc: Compression algorithm to use in Messenger when communicating with OSD
+ long_desc: Compression algorithm for connections with OSD in order of preference
+ Although the default value is set to snappy, a list
+ (like snappy zlib zstd etc.) is acceptable as well.
+ default: snappy
+ services:
+ - osd
+ see_also:
+ - ms_osd_compress_mode
+ flags:
+ - runtime
+- name: ms_compress_secure
+ type: bool
+ level: advanced
+ desc: Allowing compression when on-wire encryption is enabled
+ long_desc: Combining encryption with compression reduces the level of security of
+ messages between peers. In case both encryption and compression are enabled,
+ compression setting will be ignored and message will not be compressed.
+ This behaviour can be override using this setting.
+ default: false
+ see_also:
+ - ms_osd_compress_mode
+ flags:
+ - runtime
+- name: ms_learn_addr_from_peer
+ type: bool
+ level: advanced
+ desc: Learn address from what IP our first peer thinks we connect from
+ long_desc: Use the IP address our first peer (usually a monitor) sees that we are
+ connecting from. This is useful if a client is behind some sort of NAT and we
+ want to see it identified by its local (not NATed) address.
+ default: true
+ with_legacy: true
+- name: ms_tcp_nodelay
+ type: bool
+ level: advanced
+ desc: Disable Nagle's algorithm and send queued network traffic immediately
+ fmt_desc: Ceph enables ``ms_tcp_nodelay`` so that each request is sent
+ immediately (no buffering). Disabling `Nagle's algorithm`_
+ increases network traffic, which can introduce latency. If you
+ experience large numbers of small packets, you may try
+ disabling ``ms_tcp_nodelay``.
+ default: true
+ with_legacy: true
+- name: ms_tcp_rcvbuf
+ type: size
+ level: advanced
+ desc: Size of TCP socket receive buffer
+ fmt_desc: The size of the socket buffer on the receiving end of a network
+ connection. Disable by default.
+ default: 0
+ with_legacy: true
+- name: ms_tcp_prefetch_max_size
+ type: size
+ level: advanced
+ desc: Maximum amount of data to prefetch out of the socket receive buffer
+ default: 4_K
+ with_legacy: true
+- name: ms_initial_backoff
+ type: float
+ level: advanced
+ desc: Initial backoff after a network error is detected (seconds)
+ fmt_desc: The initial time to wait before reconnecting on a fault.
+ default: 0.2
+ with_legacy: true
+- name: ms_max_backoff
+ type: float
+ level: advanced
+ desc: Maximum backoff after a network error before retrying (seconds)
+ fmt_desc: The maximum time to wait before reconnecting on a fault.
+ default: 15
+ see_also:
+ - ms_initial_backoff
+ with_legacy: true
+- name: ms_crc_data
+ type: bool
+ level: dev
+ desc: Set and/or verify crc32c checksum on data payload sent over network
+ default: true
+ with_legacy: true
+- name: ms_crc_header
+ type: bool
+ level: dev
+ desc: Set and/or verify crc32c checksum on header payload sent over network
+ default: true
+ with_legacy: true
+- name: ms_die_on_bad_msg
+ type: bool
+ level: dev
+ desc: Induce a daemon crash/exit when a bad network message is received
+ fmt_desc: Debug option; do not configure.
+ default: false
+ with_legacy: true
+- name: ms_die_on_unhandled_msg
+ type: bool
+ level: dev
+ desc: Induce a daemon crash/exit when an unrecognized message is received
+ default: false
+ with_legacy: true
+- name: ms_die_on_old_message
+ type: bool
+ level: dev
+ desc: Induce a daemon crash/exit when a old, undecodable message is received
+ default: false
+ with_legacy: true
+- name: ms_die_on_skipped_message
+ type: bool
+ level: dev
+ desc: Induce a daemon crash/exit if sender skips a message sequence number
+ default: false
+ with_legacy: true
+- name: ms_die_on_bug
+ type: bool
+ level: dev
+ desc: Induce a crash/exit on various bugs (for testing purposes)
+ default: false
+ with_legacy: true
+- name: ms_dispatch_throttle_bytes
+ type: size
+ level: advanced
+ desc: Limit messages that are read off the network but still being processed
+ fmt_desc: Throttles total size of messages waiting to be dispatched.
+ default: 100_M
+ with_legacy: true
+- name: ms_bind_ipv4
+ type: bool
+ level: advanced
+ desc: Bind servers to IPv4 address(es)
+ fmt_desc: Enables Ceph daemons to bind to IPv4 addresses.
+ default: true
+ see_also:
+ - ms_bind_ipv6
+- name: ms_bind_ipv6
+ type: bool
+ level: advanced
+ desc: Bind servers to IPv6 address(es)
+ fmt_desc: Enables Ceph daemons to bind to IPv6 addresses.
+ default: false
+ see_also:
+ - ms_bind_ipv4
+ with_legacy: true
+- name: ms_bind_prefer_ipv4
+ type: bool
+ level: advanced
+ desc: Prefer IPV4 over IPV6 address(es)
+ default: false
+- name: ms_bind_msgr1
+ type: bool
+ level: advanced
+ desc: Bind servers to msgr1 (legacy) protocol address(es)
+ default: true
+ see_also:
+ - ms_bind_msgr2
+- name: ms_bind_msgr2
+ type: bool
+ level: advanced
+ desc: Bind servers to msgr2 (nautilus+) protocol address(es)
+ default: true
+ see_also:
+ - ms_bind_msgr1
+- name: ms_bind_port_min
+ type: int
+ level: advanced
+ desc: Lowest port number to bind daemon(s) to
+ fmt_desc: The minimum port number to which an OSD or MDS daemon will bind.
+ default: 6800
+ with_legacy: true
+- name: ms_bind_port_max
+ type: int
+ level: advanced
+ desc: Highest port number to bind daemon(s) to
+ fmt_desc: The maximum port number to which an OSD or MDS daemon will bind.
+ default: 7568
+ with_legacy: true
+# FreeBSD does not use SO_REAUSEADDR so allow for a bit more time per default
+- name: ms_bind_retry_count
+ type: int
+ level: advanced
+ desc: Number of attempts to make while bind(2)ing to a port
+ default: @ms_bind_retry_count@
+ with_legacy: true
+# FreeBSD does not use SO_REAUSEADDR so allow for a bit more time per default
+- name: ms_bind_retry_delay
+ type: int
+ level: advanced
+ desc: Delay between bind(2) attempts (seconds)
+ default: @ms_bind_retry_delay@
+ with_legacy: true
+- name: ms_bind_before_connect
+ type: bool
+ level: advanced
+ desc: Call bind(2) on client sockets
+ default: false
+ with_legacy: true
+- name: ms_tcp_listen_backlog
+ type: int
+ level: advanced
+ desc: Size of queue of incoming connections for accept(2)
+ default: 512
+ with_legacy: true
+- name: ms_connection_ready_timeout
+ type: uint
+ level: advanced
+ desc: Time before we declare a not yet ready connection as dead (seconds)
+ default: 10
+ with_legacy: true
+- name: ms_connection_idle_timeout
+ type: uint
+ level: advanced
+ desc: Time before an idle connection is closed (seconds)
+ default: 900
+ with_legacy: true
+- name: ms_pq_max_tokens_per_priority
+ type: uint
+ level: dev
+ default: 16_M
+ with_legacy: true
+- name: ms_pq_min_cost
+ type: size
+ level: dev
+ default: 64_K
+ with_legacy: true
+- name: ms_inject_socket_failures
+ type: uint
+ level: dev
+ desc: Inject a socket failure every Nth socket operation
+ fmt_desc: Debug option; do not configure.
+ default: 0
+ with_legacy: true
+- name: ms_inject_delay_type
+ type: str
+ level: dev
+ desc: Entity type to inject delays for
+ flags:
+ - runtime
+ with_legacy: true
+- name: ms_inject_delay_max
+ type: float
+ level: dev
+ desc: Max delay to inject
+ default: 1
+ with_legacy: true
+- name: ms_inject_delay_probability
+ type: float
+ level: dev
+ default: 0
+ with_legacy: true
+- name: ms_inject_internal_delays
+ type: float
+ level: dev
+ desc: Inject various internal delays to induce races (seconds)
+ default: 0
+ with_legacy: true
+- name: ms_inject_network_congestion
+ type: uint
+ level: dev
+ desc: Inject a network congestions that stuck with N times operations
+ default: 0
+ with_legacy: true
+- name: ms_blackhole_osd
+ type: bool
+ level: dev
+ default: false
+ with_legacy: true
+- name: ms_blackhole_mon
+ type: bool
+ level: dev
+ default: false
+ with_legacy: true
+- name: ms_blackhole_mds
+ type: bool
+ level: dev
+ default: false
+ with_legacy: true
+- name: ms_blackhole_mgr
+ type: bool
+ level: dev
+ default: false
+ with_legacy: true
+- name: ms_blackhole_client
+ type: bool
+ level: dev
+ default: false
+ with_legacy: true
+- name: ms_dump_on_send
+ type: bool
+ level: advanced
+ desc: Hexdump message to debug log on message send
+ default: false
+ with_legacy: true
+- name: ms_dump_corrupt_message_level
+ type: int
+ level: advanced
+ desc: Log level at which to hexdump corrupt messages we receive
+ default: 1
+ with_legacy: true
+# number of worker processing threads for async messenger created on init
+- name: ms_async_op_threads
+ type: uint
+ level: advanced
+ desc: Threadpool size for AsyncMessenger (ms_type=async)
+ fmt_desc: Initial number of worker threads used by each Async Messenger instance.
+ Should be at least equal to highest number of replicas, but you can
+ decrease it if you are low on CPU core count and/or you host a lot of
+ OSDs on single server.
+ default: 3
+ min: 1
+ max: 24
+ with_legacy: true
+- name: ms_async_reap_threshold
+ type: uint
+ level: dev
+ desc: number of deleted connections before we reap
+ default: 5
+ min: 1
+ with_legacy: true
+- name: ms_async_rdma_device_name
+ type: str
+ level: advanced
+ with_legacy: true
+- name: ms_async_rdma_enable_hugepage
+ type: bool
+ level: advanced
+ default: false
+ with_legacy: true
+- name: ms_async_rdma_buffer_size
+ type: size
+ level: advanced
+ default: 128_K
+ with_legacy: true
+- name: ms_async_rdma_send_buffers
+ type: uint
+ level: advanced
+ default: 1_K
+ with_legacy: true
+# size of the receive buffer pool, 0 is unlimited
+- name: ms_async_rdma_receive_buffers
+ type: uint
+ level: advanced
+ default: 32_K
+ with_legacy: true
+# max number of wr in srq
+- name: ms_async_rdma_receive_queue_len
+ type: uint
+ level: advanced
+ default: 4_K
+ with_legacy: true
+# support srq
+- name: ms_async_rdma_support_srq
+ type: bool
+ level: advanced
+ default: true
+ with_legacy: true
+- name: ms_async_rdma_port_num
+ type: uint
+ level: advanced
+ default: 1
+ with_legacy: true
+- name: ms_async_rdma_polling_us
+ type: uint
+ level: advanced
+ default: 1000
+ with_legacy: true
+- name: ms_async_rdma_gid_idx
+ type: int
+ level: advanced
+ desc: use gid_idx to select GID for choosing RoCEv1 or RoCEv2
+ default: 0
+ with_legacy: true
+# GID format: "fe80:0000:0000:0000:7efe:90ff:fe72:6efe", no zero folding
+- name: ms_async_rdma_local_gid
+ type: str
+ level: advanced
+ with_legacy: true
+# 0=RoCEv1, 1=RoCEv2, 2=RoCEv1.5
+- name: ms_async_rdma_roce_ver
+ type: int
+ level: advanced
+ default: 1
+ with_legacy: true
+# in RoCE, this means PCP
+- name: ms_async_rdma_sl
+ type: int
+ level: advanced
+ default: 3
+ with_legacy: true
+# in RoCE, this means DSCP
+- name: ms_async_rdma_dscp
+ type: int
+ level: advanced
+ default: 96
+ with_legacy: true
+# when there are enough accept failures, indicating there are unrecoverable failures,
+# just do ceph_abort() . Here we make it configurable.
+- name: ms_max_accept_failures
+ type: int
+ level: advanced
+ desc: The maximum number of consecutive failed accept() calls before considering
+ the daemon is misconfigured and abort it.
+ default: 4
+ with_legacy: true
+# rdma connection management
+- name: ms_async_rdma_cm
+ type: bool
+ level: advanced
+ default: false
+ with_legacy: true
+- name: ms_async_rdma_type
+ type: str
+ level: advanced
+ default: ib
+ with_legacy: true
+- name: ms_dpdk_port_id
+ type: int
+ level: advanced
+ default: 0
+ with_legacy: true
+# it is modified in unittest so that use SAFE_OPTION to declare
+- name: ms_dpdk_coremask
+ type: str
+ level: advanced
+ default: '0xF'
+ see_also:
+ - ms_async_op_threads
+ with_legacy: true
+- name: ms_dpdk_memory_channel
+ type: str
+ level: advanced
+ default: '4'
+ with_legacy: true
+- name: ms_dpdk_hugepages
+ type: str
+ level: advanced
+ with_legacy: true
+- name: ms_dpdk_pmd
+ type: str
+ level: advanced
+ with_legacy: true
+- name: ms_dpdk_devs_allowlist
+ type: str
+ level: advanced
+ desc: NIC's PCIe address are allowed to use
+ long_desc: for a single NIC use ms_dpdk_devs_allowlist=-a 0000:7d:010 or --allow=0000:7d:010;
+ for a bond nics use ms_dpdk_devs_allowlist=--allow=0000:7d:01.0 --allow=0000:7d:02.6
+ --vdev=net_bonding0,mode=2,slave=0000:7d:01.0,slave=0000:7d:02.6.
+- name: ms_dpdk_host_ipv4_addr
+ type: str
+ level: advanced
+ with_legacy: true
+- name: ms_dpdk_gateway_ipv4_addr
+ type: str
+ level: advanced
+ with_legacy: true
+- name: ms_dpdk_netmask_ipv4_addr
+ type: str
+ level: advanced
+ with_legacy: true
+- name: ms_dpdk_lro
+ type: bool
+ level: advanced
+ default: true
+ with_legacy: true
+- name: ms_dpdk_enable_tso
+ type: bool
+ level: advanced
+ default: true
+- name: ms_dpdk_hw_flow_control
+ type: bool
+ level: advanced
+ default: true
+ with_legacy: true
+# Weighing of a hardware network queue relative to a software queue (0=no work, 1= equal share)")
+- name: ms_dpdk_hw_queue_weight
+ type: float
+ level: advanced
+ default: 1
+ with_legacy: true
+- name: ms_dpdk_debug_allow_loopback
+ type: bool
+ level: dev
+ default: false
+ with_legacy: true
+- name: ms_dpdk_rx_buffer_count_per_core
+ type: int
+ level: advanced
+ default: 8192
+ with_legacy: true
+- name: inject_early_sigterm
+ type: bool
+ level: dev
+ desc: send ourselves a SIGTERM early during startup
+ default: false
+ with_legacy: true
+# list of initial cluster mon ids; if specified, need majority to form initial quorum and create new cluster
+- name: mon_initial_members
+ type: str
+ level: advanced
+ fmt_desc: The IDs of initial monitors in a cluster during startup. If
+ specified, Ceph requires an odd number of monitors to form an
+ initial quorum (e.g., 3).
+ note: A *majority* of monitors in your cluster must be able to reach
+ each other in order to establish a quorum. You can decrease the initial
+ number of monitors to establish a quorum with this setting.
+ services:
+ - mon
+ flags:
+ - no_mon_update
+ - cluster_create
+ with_legacy: true
+- name: mon_max_pg_per_osd
+ type: uint
+ level: advanced
+ desc: Max number of PGs per OSD the cluster will allow
+ long_desc: If the number of PGs per OSD exceeds this, a health warning will be visible
+ in `ceph status`. This is also used in automated PG management, as the threshold
+ at which some pools' pg_num may be shrunk in order to enable increasing the pg_num
+ of others.
+ default: 250
+ flags:
+ - runtime
+ services:
+ - mgr
+ - mon
+ min: 1
+- name: mon_osd_full_ratio
+ type: float
+ level: advanced
+ desc: full ratio of OSDs to be set during initial creation of the cluster
+ default: 0.95
+ flags:
+ - no_mon_update
+ - cluster_create
+ with_legacy: true
+- name: mon_osd_backfillfull_ratio
+ type: float
+ level: advanced
+ default: 0.9
+ flags:
+ - no_mon_update
+ - cluster_create
+ with_legacy: true
+- name: mon_osd_nearfull_ratio
+ type: float
+ level: advanced
+ desc: nearfull ratio for OSDs to be set during initial creation of cluster
+ default: 0.85
+ flags:
+ - no_mon_update
+ - cluster_create
+ with_legacy: true
+- name: mon_osd_initial_require_min_compat_client
+ type: str
+ level: advanced
+ default: luminous
+ flags:
+ - no_mon_update
+ - cluster_create
+ with_legacy: true
+- name: mon_allow_pool_delete
+ type: bool
+ level: advanced
+ desc: allow pool deletions
+ fmt_desc: Should monitors allow pools to be removed, regardless of what the pool flags say?
+ default: false
+ services:
+ - mon
+ with_legacy: true
+- name: mon_fake_pool_delete
+ type: bool
+ level: advanced
+ desc: fake pool deletions by renaming the rados pool
+ default: false
+ services:
+ - mon
+ with_legacy: true
+- name: mon_globalid_prealloc
+ type: uint
+ level: advanced
+ desc: number of globalid values to preallocate
+ long_desc: This setting caps how many new clients can authenticate with the cluster
+ before the monitors have to perform a write to preallocate more. Large values
+ burn through the 64-bit ID space more quickly.
+ fmt_desc: The number of global IDs to pre-allocate for clients and daemons in the cluster.
+ default: 10000
+ services:
+ - mon
+ with_legacy: true
+- name: mon_osd_report_timeout
+ type: int
+ level: advanced
+ desc: time before OSDs who do not report to the mons are marked down (seconds)
+ fmt_desc: The grace period in seconds before declaring
+ unresponsive Ceph OSD Daemons ``down``.
+ default: 15_min
+ services:
+ - mon
+ with_legacy: true
+- name: mon_warn_on_insecure_global_id_reclaim
+ type: bool
+ level: advanced
+ desc: issue AUTH_INSECURE_GLOBAL_ID_RECLAIM health warning if any connected
+ clients are insecurely reclaiming global_id
+ default: true
+ services:
+ - mon
+ see_also:
+ - mon_warn_on_insecure_global_id_reclaim_allowed
+ - auth_allow_insecure_global_id_reclaim
+ - auth_expose_insecure_global_id_reclaim
+- name: mon_warn_on_insecure_global_id_reclaim_allowed
+ type: bool
+ level: advanced
+ desc: issue AUTH_INSECURE_GLOBAL_ID_RECLAIM_ALLOWED health warning if insecure
+ global_id reclaim is allowed
+ default: true
+ services:
+ - mon
+ see_also:
+ - mon_warn_on_insecure_global_id_reclaim
+ - auth_allow_insecure_global_id_reclaim
+ - auth_expose_insecure_global_id_reclaim
+- name: mon_warn_on_msgr2_not_enabled
+ type: bool
+ level: advanced
+ desc: issue MON_MSGR2_NOT_ENABLED health warning if monitors are all running Nautilus
+ but not all binding to a msgr2 port
+ default: true
+ services:
+ - mon
+ see_also:
+ - ms_bind_msgr2
+- name: mon_warn_on_slow_ping_time
+ type: float
+ level: advanced
+ desc: Override mon_warn_on_slow_ping_ratio with specified threshold in milliseconds
+ fmt_desc: Override ``mon_warn_on_slow_ping_ratio`` with a specific value.
+ Raise ``HEALTH_WARN`` if any heartbeat between OSDs exceeds
+ ``mon_warn_on_slow_ping_time`` milliseconds. The default is 0 (disabled).
+ default: 0
+ services:
+ - mgr
+ - osd
+ see_also:
+ - mon_warn_on_slow_ping_ratio
+- name: mon_warn_on_slow_ping_ratio
+ type: float
+ level: advanced
+ desc: Issue a health warning if heartbeat ping longer than percentage of osd_heartbeat_grace
+ fmt_desc: Raise ``HEALTH_WARN`` when any heartbeat between OSDs exceeds
+ ``mon_warn_on_slow_ping_ratio`` of ``osd_heartbeat_grace``.
+ default: 0.05
+ services:
+ - mgr
+ - osd
+ see_also:
+ - osd_heartbeat_grace
+ - mon_warn_on_slow_ping_time
+- name: mon_max_snap_prune_per_epoch
+ type: uint
+ level: advanced
+ desc: max number of pruned snaps we will process in a single OSDMap epoch
+ default: 100
+ services:
+ - mon
+- name: mon_min_osdmap_epochs
+ type: int
+ level: advanced
+ desc: min number of OSDMaps to store
+ fmt_desc: Minimum number of OSD map epochs to keep at all times.
+ default: 500
+ services:
+ - mon
+ with_legacy: true
+- name: mon_max_log_epochs
+ type: int
+ level: advanced
+ desc: max number of past cluster log epochs to store
+ fmt_desc: Maximum number of Log epochs the monitor should keep.
+ default: 500
+ services:
+ - mon
+ with_legacy: true
+- name: mon_max_mdsmap_epochs
+ type: int
+ level: advanced
+ desc: max number of FSMaps/MDSMaps to store
+ fmt_desc: The maximum number of mdsmap epochs to trim during a single proposal.
+ default: 500
+ services:
+ - mon
+ with_legacy: true
+- name: mon_max_mgrmap_epochs
+ type: int
+ level: advanced
+ desc: max number of MgrMaps to store
+ default: 500
+ services:
+ - mon
+- name: mon_max_osd
+ type: int
+ level: advanced
+ desc: max number of OSDs in a cluster
+ fmt_desc: The maximum number of OSDs allowed in the cluster.
+ default: 10000
+ services:
+ - mon
+ with_legacy: true
+- name: mon_probe_timeout
+ type: float
+ level: advanced
+ desc: timeout for querying other mons during bootstrap pre-election phase (seconds)
+ fmt_desc: Number of seconds the monitor will wait to find peers before bootstrapping.
+ default: 2
+ services:
+ - mon
+ with_legacy: true
+- name: mon_client_bytes
+ type: size
+ level: advanced
+ desc: max bytes of outstanding client messages mon will read off the network
+ fmt_desc: The amount of client message data allowed in memory (in bytes).
+ default: 100_M
+ services:
+ - mon
+ with_legacy: true
+- name: mon_warn_pg_not_scrubbed_ratio
+ type: float
+ level: advanced
+ desc: Percentage of the scrub max interval past the scrub max interval to warn
+ default: 0.5
+ see_also:
+ - osd_scrub_max_interval
+ min: 0
+ with_legacy: true
+- name: mon_warn_pg_not_deep_scrubbed_ratio
+ type: float
+ level: advanced
+ desc: Percentage of the deep scrub interval past the deep scrub interval to warn
+ default: 0.75
+ see_also:
+ - osd_deep_scrub_interval
+ min: 0
+ with_legacy: true
+- name: mon_scrub_interval
+ type: secs
+ level: advanced
+ desc: frequency for scrubbing mon database
+ fmt_desc: How often the monitor scrubs its store by comparing
+ the stored checksums with the computed ones for all stored
+ keys. (0 disables it. dangerous, use with care)
+ default: 1_day
+ services:
+ - mon
+- name: mon_scrub_timeout
+ type: int
+ level: advanced
+ desc: timeout to restart scrub of mon quorum participant does not respond for the
+ latest chunk
+ default: 5_min
+ services:
+ - mon
+ with_legacy: true
+- name: mon_scrub_max_keys
+ type: int
+ level: advanced
+ desc: max keys per on scrub chunk/step
+ fmt_desc: The maximum number of keys to scrub each time.
+ default: 100
+ services:
+ - mon
+ with_legacy: true
+# probability of injected crc mismatch [0.0, 1.0]
+- name: mon_scrub_inject_crc_mismatch
+ type: float
+ level: dev
+ desc: probability for injecting crc mismatches into mon scrub
+ default: 0
+ services:
+ - mon
+ with_legacy: true
+# probability of injected missing keys [0.0, 1.0]
+- name: mon_scrub_inject_missing_keys
+ type: float
+ level: dev
+ desc: probability for injecting missing keys into mon scrub
+ default: 0
+ services:
+ - mon
+ with_legacy: true
+- name: mon_config_key_max_entry_size
+ type: size
+ level: advanced
+ desc: Defines the number of bytes allowed to be held in a single config-key entry
+ fmt_desc: The maximum size of config-key entry (in bytes)
+ default: 64_K
+ services:
+ - mon
+ with_legacy: true
+- name: mon_sync_timeout
+ type: float
+ level: advanced
+ desc: timeout before canceling sync if syncing mon does not respond
+ fmt_desc: Number of seconds the monitor will wait for the next update
+ message from its sync provider before it gives up and bootstrap
+ again.
+ default: 1_min
+ services:
+ - mon
+ with_legacy: true
+- name: mon_sync_max_payload_size
+ type: size
+ level: advanced
+ desc: target max message payload for mon sync
+ fmt_desc: The maximum size for a sync payload (in bytes).
+ default: 1_M
+ services:
+ - mon
+ with_legacy: true
+- name: mon_sync_max_payload_keys
+ type: int
+ level: advanced
+ desc: target max keys in message payload for mon sync
+ default: 2000
+ services:
+ - mon
+ with_legacy: true
+- name: mon_sync_debug
+ type: bool
+ level: dev
+ desc: enable extra debugging during mon sync
+ default: false
+ services:
+ - mon
+ with_legacy: true
+- name: mon_inject_sync_get_chunk_delay
+ type: float
+ level: dev
+ desc: inject delay during sync (seconds)
+ default: 0
+ services:
+ - mon
+ with_legacy: true
+- name: mon_osd_min_down_reporters
+ type: uint
+ level: advanced
+ desc: number of OSDs from different subtrees who need to report a down OSD for it
+ to count
+ fmt_desc: The minimum number of Ceph OSD Daemons required to report a
+ ``down`` Ceph OSD Daemon.
+ default: 2
+ services:
+ - mon
+ see_also:
+ - mon_osd_reporter_subtree_level
+- name: mon_osd_reporter_subtree_level
+ type: str
+ level: advanced
+ desc: in which level of parent bucket the reporters are counted
+ fmt_desc: In which level of parent bucket the reporters are counted. The OSDs
+ send failure reports to monitors if they find a peer that is not responsive.
+ Monitors mark the reported ``OSD`` out and then ``down`` after a grace period.
+ default: host
+ services:
+ - mon
+ flags:
+ - runtime
+- name: mon_osd_snap_trim_queue_warn_on
+ type: int
+ level: advanced
+ desc: Warn when snap trim queue is that large (or larger).
+ long_desc: Warn when snap trim queue length for at least one PG crosses this value,
+ as this is indicator of snap trimmer not keeping up, wasting disk space
+ default: 32768
+ services:
+ - mon
+ with_legacy: true
+# force mon to trim maps to this point, regardless of min_last_epoch_clean (dangerous)
+- name: mon_osd_force_trim_to
+ type: int
+ level: dev
+ desc: force mons to trim osdmaps through this epoch
+ fmt_desc: Force monitor to trim osdmaps to this point, even if there is
+ PGs not clean at the specified epoch (0 disables it. dangerous,
+ use with care)
+ default: 0
+ services:
+ - mon
+ with_legacy: true
+- name: mon_debug_extra_checks
+ type: bool
+ level: dev
+ desc: Enable some additional monitor checks
+ long_desc: Enable some additional monitor checks that would be too expensive to
+ run on production systems, or would only be relevant while testing or debugging.
+ default: false
+ services:
+ - mon
+- name: mon_debug_block_osdmap_trim
+ type: bool
+ level: dev
+ desc: Block OSDMap trimming while the option is enabled.
+ long_desc: Blocking OSDMap trimming may be quite helpful to easily reproduce states
+ in which the monitor keeps (hundreds of) thousands of osdmaps.
+ default: false
+ services:
+ - mon
+- name: mon_debug_deprecated_as_obsolete
+ type: bool
+ level: dev
+ desc: treat deprecated mon commands as obsolete
+ default: false
+ services:
+ - mon
+ with_legacy: true
+- name: mon_debug_dump_transactions
+ type: bool
+ level: dev
+ desc: dump paxos transactions to log
+ default: false
+ services:
+ - mon
+ see_also:
+ - mon_debug_dump_location
+ with_legacy: true
+- name: mon_debug_dump_json
+ type: bool
+ level: dev
+ desc: dump paxos transasctions to log as json
+ default: false
+ services:
+ - mon
+ see_also:
+ - mon_debug_dump_transactions
+ with_legacy: true
+- name: mon_debug_dump_location
+ type: str
+ level: dev
+ desc: file to dump paxos transactions to
+ default: /var/log/ceph/$cluster-$name.tdump
+ services:
+ - mon
+ see_also:
+ - mon_debug_dump_transactions
+ with_legacy: true
+- name: mon_debug_no_require_quincy
+ type: bool
+ level: dev
+ desc: do not set quincy feature for new mon clusters
+ default: false
+ services:
+ - mon
+ flags:
+ - cluster_create
+- name: mon_debug_no_require_reef
+ type: bool
+ level: dev
+ desc: do not set reef feature for new mon clusters
+ default: false
+ services:
+ - mon
+ flags:
+ - cluster_create
+- name: mon_debug_no_require_bluestore_for_ec_overwrites
+ type: bool
+ level: dev
+ desc: do not require bluestore OSDs to enable EC overwrites on a rados pool
+ default: false
+ services:
+ - mon
+ with_legacy: true
+- name: mon_debug_no_initial_persistent_features
+ type: bool
+ level: dev
+ desc: do not set any monmap features for new mon clusters
+ default: false
+ services:
+ - mon
+ flags:
+ - cluster_create
+ with_legacy: true
+- name: mon_inject_transaction_delay_max
+ type: float
+ level: dev
+ desc: max duration of injected delay in paxos
+ default: 10
+ services:
+ - mon
+ with_legacy: true
+# range [0, 1]
+- name: mon_inject_transaction_delay_probability
+ type: float
+ level: dev
+ desc: probability of injecting a delay in paxos
+ default: 0
+ services:
+ - mon
+ with_legacy: true
+- name: mon_inject_pg_merge_bounce_probability
+ type: float
+ level: dev
+ desc: probability of failing and reverting a pg_num decrement
+ default: 0
+ services:
+ - mon
+# kill the sync provider at a specific point in the work flow
+- name: mon_sync_provider_kill_at
+ type: int
+ level: dev
+ desc: kill mon sync requester at specific point
+ default: 0
+ services:
+ - mon
+ with_legacy: true
+# kill the sync requester at a specific point in the work flow
+- name: mon_sync_requester_kill_at
+ type: int
+ level: dev
+ desc: kill mon sync requestor at specific point
+ default: 0
+ services:
+ - mon
+ with_legacy: true
+# force monitor to join quorum even if it has been previously removed from the map
+- name: mon_force_quorum_join
+ type: bool
+ level: advanced
+ desc: force mon to rejoin quorum even though it was just removed
+ fmt_desc: Force monitor to join quorum even if it has been previously removed from the map
+ default: false
+ services:
+ - mon
+ with_legacy: true
+# type of keyvaluedb backend
+- name: mon_keyvaluedb
+ type: str
+ level: advanced
+ desc: database backend to use for the mon database
+ default: rocksdb
+ services:
+ - mon
+ enum_values:
+ - leveldb
+ - rocksdb
+ flags:
+ - create
+ with_legacy: true
+# UNSAFE -- TESTING ONLY! Allows addition of a cache tier with preexisting snaps
+- name: mon_debug_unsafe_allow_tier_with_nonempty_snaps
+ type: bool
+ level: dev
+ default: false
+ services:
+ - mon
+ with_legacy: true
+# required of mon, mds, osd daemons
+- name: auth_cluster_required
+ type: str
+ level: advanced
+ desc: authentication methods required by the cluster
+ fmt_desc: If enabled, the Ceph Storage Cluster daemons (i.e., ``ceph-mon``,
+ ``ceph-osd``, ``ceph-mds`` and ``ceph-mgr``) must authenticate with
+ each other. Valid settings are ``cephx`` or ``none``.
+ default: cephx
+ with_legacy: true
+# required by daemons of clients
+- name: auth_service_required
+ type: str
+ level: advanced
+ desc: authentication methods required by service daemons
+ fmt_desc: If enabled, the Ceph Storage Cluster daemons require Ceph Clients
+ to authenticate with the Ceph Storage Cluster in order to access
+ Ceph services. Valid settings are ``cephx`` or ``none``.
+ default: cephx
+ with_legacy: true
+# what clients require of daemons
+- name: auth_client_required
+ type: str
+ level: advanced
+ desc: authentication methods allowed by clients
+ fmt_desc: If enabled, the Ceph Client requires the Ceph Storage Cluster to
+ authenticate with the Ceph Client. Valid settings are ``cephx``
+ or ``none``.
+ default: cephx, none
+ with_legacy: true
+# deprecated; default value for above if they are not defined.
+- name: auth_supported
+ type: str
+ level: advanced
+ desc: authentication methods required (deprecated)
+ with_legacy: true
+- name: max_rotating_auth_attempts
+ type: int
+ level: advanced
+ desc: number of attempts to initialize rotating keys before giving up
+ default: 10
+ with_legacy: true
+- name: rotating_keys_bootstrap_timeout
+ type: int
+ level: advanced
+ desc: timeout for obtaining rotating keys during bootstrap phase (seconds)
+ default: 30
+- name: rotating_keys_renewal_timeout
+ type: int
+ level: advanced
+ desc: timeout for updating rotating keys (seconds)
+ default: 10
+- name: cephx_require_signatures
+ type: bool
+ level: advanced
+ default: false
+ fmt_desc: If set to ``true``, Ceph requires signatures on all message
+ traffic between the Ceph Client and the Ceph Storage Cluster, and
+ between daemons comprising the Ceph Storage Cluster.
+
+ Ceph Argonaut and Linux kernel versions prior to 3.19 do
+ not support signatures; if such clients are in use this
+ option can be turned off to allow them to connect.
+ with_legacy: true
+- name: cephx_require_version
+ type: int
+ level: advanced
+ desc: Cephx version required (1 = pre-mimic, 2 = mimic+)
+ default: 2
+ with_legacy: true
+- name: cephx_cluster_require_signatures
+ type: bool
+ level: advanced
+ default: false
+ fmt_desc: If set to ``true``, Ceph requires signatures on all message
+ traffic between Ceph daemons comprising the Ceph Storage Cluster.
+ with_legacy: true
+- name: cephx_cluster_require_version
+ type: int
+ level: advanced
+ desc: Cephx version required by the cluster from clients (1 = pre-mimic, 2 = mimic+)
+ default: 2
+ with_legacy: true
+- name: cephx_service_require_signatures
+ type: bool
+ level: advanced
+ default: false
+ fmt_desc: If set to ``true``, Ceph requires signatures on all message
+ traffic between Ceph Clients and the Ceph Storage Cluster.
+ with_legacy: true
+- name: cephx_service_require_version
+ type: int
+ level: advanced
+ desc: Cephx version required from ceph services (1 = pre-mimic, 2 = mimic+)
+ default: 2
+ with_legacy: true
+# Default to signing session messages if supported
+- name: cephx_sign_messages
+ type: bool
+ level: advanced
+ default: true
+ fmt_desc: If the Ceph version supports message signing, Ceph will sign
+ all messages so they are more difficult to spoof.
+ with_legacy: true
+- name: auth_mon_ticket_ttl
+ type: float
+ level: advanced
+ default: 72_hr
+ with_legacy: true
+- name: auth_service_ticket_ttl
+ type: float
+ level: advanced
+ default: 1_hr
+ fmt_desc: When the Ceph Storage Cluster sends a Ceph Client a ticket for
+ authentication, the Ceph Storage Cluster assigns the ticket a
+ time to live.
+ with_legacy: true
+- name: auth_allow_insecure_global_id_reclaim
+ type: bool
+ level: advanced
+ desc: Allow reclaiming global_id without presenting a valid ticket proving
+ previous possession of that global_id
+ long_desc: Allowing unauthorized global_id (re)use poses a security risk.
+ Unfortunately, older clients may omit their ticket on reconnects and
+ therefore rely on this being allowed for preserving their global_id for
+ the lifetime of the client instance. Setting this value to false would
+ immediately prevent new connections from those clients (assuming
+ auth_expose_insecure_global_id_reclaim set to true) and eventually break
+ existing sessions as well (regardless of auth_expose_insecure_global_id_reclaim
+ setting).
+ default: true
+ see_also:
+ - mon_warn_on_insecure_global_id_reclaim
+ - mon_warn_on_insecure_global_id_reclaim_allowed
+ - auth_expose_insecure_global_id_reclaim
+ with_legacy: true
+- name: auth_expose_insecure_global_id_reclaim
+ type: bool
+ level: advanced
+ desc: Force older clients that may omit their ticket on reconnects to
+ reconnect as part of establishing a session
+ long_desc: 'In permissive mode (auth_allow_insecure_global_id_reclaim set
+ to true), this helps with identifying clients that are not patched. In
+ enforcing mode (auth_allow_insecure_global_id_reclaim set to false), this
+ is a fail-fast mechanism: don''t establish a session that will almost
+ inevitably be broken later.'
+ default: true
+ see_also:
+ - mon_warn_on_insecure_global_id_reclaim
+ - mon_warn_on_insecure_global_id_reclaim_allowed
+ - auth_allow_insecure_global_id_reclaim
+ with_legacy: true
+# if true, assert when weird things happen
+- name: auth_debug
+ type: bool
+ level: dev
+ default: false
+ with_legacy: true
+# how many mons to try to connect to in parallel during hunt
+- name: mon_client_hunt_parallel
+ type: uint
+ level: advanced
+ default: 3
+ with_legacy: true
+# try new mon every N seconds until we connect
+- name: mon_client_hunt_interval
+ type: float
+ level: advanced
+ default: 3
+ fmt_desc: The client will try a new monitor every ``N`` seconds until it
+ establishes a connection.
+ with_legacy: true
+# send logs every N seconds
+- name: mon_client_log_interval
+ type: float
+ level: advanced
+ desc: How frequently we send queued cluster log messages to mon
+ default: 1
+ with_legacy: true
+# ping every N seconds
+- name: mon_client_ping_interval
+ type: float
+ level: advanced
+ default: 10
+ fmt_desc: The client will ping the monitor every ``N`` seconds.
+ with_legacy: true
+# fail if we don't hear back
+- name: mon_client_ping_timeout
+ type: float
+ level: advanced
+ default: 30
+ with_legacy: true
+- name: mon_client_hunt_interval_backoff
+ type: float
+ level: advanced
+ default: 1.5
+ with_legacy: true
+- name: mon_client_hunt_interval_min_multiple
+ type: float
+ level: advanced
+ default: 1
+ with_legacy: true
+- name: mon_client_hunt_interval_max_multiple
+ type: float
+ level: advanced
+ default: 10
+ with_legacy: true
+- name: mon_client_max_log_entries_per_message
+ type: int
+ level: advanced
+ default: 1000
+ fmt_desc: The maximum number of log entries a monitor will generate
+ per client message.
+ with_legacy: true
+- name: mon_client_directed_command_retry
+ type: int
+ level: dev
+ desc: Number of times to try sending a command directed at a specific monitor
+ default: 2
+ with_legacy: true
+# whitespace-separated list of key=value pairs describing crush location
+- name: crush_location
+ type: str
+ level: advanced
+ with_legacy: true
+- name: crush_location_hook
+ type: str
+ level: advanced
+ with_legacy: true
+- name: crush_location_hook_timeout
+ type: int
+ level: advanced
+ default: 10
+ with_legacy: true
+- name: objecter_tick_interval
+ type: float
+ level: dev
+ default: 5
+ with_legacy: true
+# before we ask for a map
+- name: objecter_timeout
+ type: float
+ level: advanced
+ desc: Seconds before in-flight op is considered 'laggy' and we query mon for the
+ latest OSDMap
+ default: 10
+ with_legacy: true
+- name: objecter_inflight_op_bytes
+ type: size
+ level: advanced
+ desc: Max in-flight data in bytes (both directions)
+ default: 100_M
+ with_legacy: true
+- name: objecter_inflight_ops
+ type: uint
+ level: advanced
+ desc: Max in-flight operations
+ default: 1_K
+ with_legacy: true
+# num of completion locks per each session, for serializing same object responses
+- name: objecter_completion_locks_per_session
+ type: uint
+ level: dev
+ default: 32
+ with_legacy: true
+# suppress watch pings
+- name: objecter_inject_no_watch_ping
+ type: bool
+ level: dev
+ default: false
+ with_legacy: true
+# ignore the first reply for each write, and resend the osd op instead
+- name: objecter_retry_writes_after_first_reply
+ type: bool
+ level: dev
+ default: false
+ with_legacy: true
+- name: objecter_debug_inject_relock_delay
+ type: bool
+ level: dev
+ default: false
+ with_legacy: true
+- name: filer_max_purge_ops
+ type: uint
+ level: advanced
+ desc: Max in-flight operations for purging a striped range (e.g., MDS journal)
+ default: 10
+ with_legacy: true
+- name: filer_max_truncate_ops
+ type: uint
+ level: advanced
+ desc: Max in-flight operations for truncating/deleting a striped sequence (e.g.,
+ MDS journal)
+ default: 128
+ with_legacy: true
+- name: journaler_write_head_interval
+ type: int
+ level: advanced
+ desc: Interval in seconds between journal header updates (to help bound replay time)
+ default: 15
+# * journal object size
+- name: journaler_prefetch_periods
+ type: uint
+ level: advanced
+ desc: Number of striping periods to prefetch while reading MDS journal
+ default: 10
+ # we need at least 2 periods to make progress.
+ min: 2
+# * journal object size
+- name: journaler_prezero_periods
+ type: uint
+ level: advanced
+ desc: Number of striping periods to zero head of MDS journal write position
+ default: 5
+ # we need to zero at least two periods, minimum, to ensure that we
+ # have a full empty object/period in front of us.
+ min: 2
+- name: osd_calc_pg_upmaps_aggressively
+ type: bool
+ level: advanced
+ desc: try to calculate PG upmaps more aggressively, e.g., by doing a fairly exhaustive
+ search of existing PGs that can be unmapped or upmapped
+ default: true
+ flags:
+ - runtime
+- name: osd_calc_pg_upmaps_aggressively_fast
+ type: bool
+ level: advanced
+ desc: Prevent very long (>10 minutes) calculations in some extreme cases (applicable
+ only to aggressive mode)
+ default: true
+ flags:
+ - runtime
+- name: osd_calc_pg_upmaps_local_fallback_retries
+ type: uint
+ level: advanced
+ desc: 'Maximum number of PGs we can attempt to unmap or upmap for a specific overfull
+ or underfull osd per iteration '
+ default: 100
+ flags:
+ - runtime
+# 1 = host
+- name: osd_crush_chooseleaf_type
+ type: int
+ level: dev
+ desc: default chooseleaf type for osdmaptool --create
+ fmt_desc: The bucket type to use for ``chooseleaf`` in a CRUSH rule. Uses
+ ordinal rank rather than name.
+ default: 1
+ flags:
+ - cluster_create
+ with_legacy: true
+# try to use gmt for hitset archive names if all osds in cluster support it
+- name: osd_pool_use_gmt_hitset
+ type: bool
+ level: dev
+ desc: use UTC for hitset timestamps
+ long_desc: This setting only exists for compatibility with hammer (and older) clusters.
+ default: true
+ with_legacy: true
+# whether turn on fast read on the pool or not
+- name: osd_pool_default_ec_fast_read
+ type: bool
+ level: advanced
+ desc: set ec_fast_read for new erasure-coded pools
+ fmt_desc: Whether to turn on fast read on the pool or not. It will be used as
+ the default setting of newly created erasure coded pools if ``fast_read``
+ is not specified at create time.
+ default: false
+ services:
+ - mon
+ with_legacy: true
+- name: osd_pool_default_crush_rule
+ type: int
+ level: advanced
+ desc: CRUSH rule for newly created pools
+ fmt_desc: The default CRUSH rule to use when creating a replicated pool. The
+ default value of ``-1`` means "pick the rule with the lowest numerical ID and
+ use that". This is to make pool creation work in the absence of rule 0.
+ default: -1
+ services:
+ - mon
+- name: osd_pool_default_size
+ type: uint
+ level: advanced
+ desc: the number of copies of an object for new replicated pools
+ fmt_desc: Sets the number of replicas for objects in the pool. The default
+ value is the same as
+ ``ceph osd pool set {pool-name} size {size}``.
+ default: 3
+ services:
+ - mon
+ min: 0
+ max: 10
+ flags:
+ - runtime
+- name: osd_pool_default_min_size
+ type: uint
+ level: advanced
+ desc: the minimal number of copies allowed to write to a degraded pool for new replicated
+ pools
+ long_desc: 0 means no specific default; ceph will use size-size/2
+ fmt_desc: Sets the minimum number of written replicas for objects in the
+ pool in order to acknowledge an I/O operation to the client. If
+ minimum is not met, Ceph will not acknowledge the I/O to the
+ client, **which may result in data loss**. This setting ensures
+ a minimum number of replicas when operating in ``degraded`` mode.
+ The default value is ``0`` which means no particular minimum. If ``0``,
+ minimum is ``size - (size / 2)``.
+ default: 0
+ services:
+ - mon
+ see_also:
+ - osd_pool_default_size
+ min: 0
+ max: 255
+ flags:
+ - runtime
+- name: osd_pool_default_pg_num
+ type: uint
+ level: advanced
+ desc: number of PGs for new pools
+ fmt_desc: The default number of placement groups for a pool. The default
+ value is the same as ``pg_num`` with ``mkpool``.
+ long_desc: With default value of `osd_pool_default_pg_autoscale_mode` being
+ `on` the number of PGs for new pools will start out with 1 pg, unless the
+ user specifies the pg_num.
+ default: 32
+ services:
+ - mon
+ see_also:
+ - osd_pool_default_pg_autoscale_mode
+ flags:
+ - runtime
+- name: osd_pool_default_pgp_num
+ type: uint
+ level: advanced
+ desc: number of PGs for placement purposes (0 to match pg_num)
+ fmt_desc: |
+ The default number of placement groups for placement for a pool.
+ The default value is the same as ``pgp_num`` with ``mkpool``.
+ PG and PGP should be equal (for now). Note: should not be set unless
+ autoscaling is disabled.
+ default: 0
+ services:
+ - mon
+ see_also:
+ - osd_pool_default_pg_num
+ - osd_pool_default_pg_autoscale_mode
+ flags:
+ - runtime
+- name: osd_pool_default_type
+ type: str
+ level: advanced
+ desc: default type of pool to create
+ default: replicated
+ services:
+ - mon
+ enum_values:
+ - replicated
+ - erasure
+ flags:
+ - runtime
+- name: osd_pool_default_erasure_code_profile
+ type: str
+ level: advanced
+ desc: default erasure code profile for new erasure-coded pools
+ default: plugin=jerasure technique=reed_sol_van k=2 m=2
+ services:
+ - mon
+ flags:
+ - runtime
+- name: osd_erasure_code_plugins
+ type: str
+ level: advanced
+ desc: erasure code plugins to load
+ default: @osd_erasure_code_plugins@
+ services:
+ - mon
+ - osd
+ flags:
+ - startup
+ with_legacy: true
+- name: osd_pool_default_flags
+ type: int
+ level: dev
+ desc: (integer) flags to set on new pools
+ fmt_desc: The default flags for new pools.
+ default: 0
+ services:
+ - mon
+ with_legacy: true
+# use new pg hashing to prevent pool/pg overlap
+- name: osd_pool_default_flag_hashpspool
+ type: bool
+ level: advanced
+ desc: set hashpspool (better hashing scheme) flag on new pools
+ default: true
+ services:
+ - mon
+ with_legacy: true
+# pool can't be deleted
+- name: osd_pool_default_flag_nodelete
+ type: bool
+ level: advanced
+ desc: set nodelete flag on new pools
+ fmt_desc: Set the ``nodelete`` flag on new pools, which prevents pool removal.
+ default: false
+ services:
+ - mon
+ with_legacy: true
+# pool's pg and pgp num can't be changed
+- name: osd_pool_default_flag_nopgchange
+ type: bool
+ level: advanced
+ desc: set nopgchange flag on new pools
+ fmt_desc: Set the ``nopgchange`` flag on new pools. Does not allow the number of PGs to be changed.
+ default: false
+ services:
+ - mon
+ with_legacy: true
+# pool's size and min size can't be changed
+- name: osd_pool_default_flag_nosizechange
+ type: bool
+ level: advanced
+ desc: set nosizechange flag on new pools
+ fmt_desc: Set the ``nosizechange`` flag on new pools. Does not allow the ``size`` to be changed.
+ default: false
+ services:
+ - mon
+ with_legacy: true
+- name: osd_pool_default_flag_bulk
+ type: bool
+ level: advanced
+ desc: set bulk flag on new pools
+ fmt_desc: Set the ``bulk`` flag on new pools. Allowing autoscaler to use scale-down mode.
+ default: false
+ services:
+ - mon
+ with_legacy: true
+- name: osd_pool_default_hit_set_bloom_fpp
+ type: float
+ level: advanced
+ default: 0.05
+ services:
+ - mon
+ see_also:
+ - osd_tier_default_cache_hit_set_type
+ with_legacy: true
+- name: osd_pool_default_cache_target_dirty_ratio
+ type: float
+ level: advanced
+ default: 0.4
+ with_legacy: true
+- name: osd_pool_default_cache_target_dirty_high_ratio
+ type: float
+ level: advanced
+ default: 0.6
+ with_legacy: true
+- name: osd_pool_default_cache_target_full_ratio
+ type: float
+ level: advanced
+ default: 0.8
+ with_legacy: true
+# seconds
+- name: osd_pool_default_cache_min_flush_age
+ type: int
+ level: advanced
+ default: 0
+ with_legacy: true
+# seconds
+- name: osd_pool_default_cache_min_evict_age
+ type: int
+ level: advanced
+ default: 0
+ with_legacy: true
+# max size to check for eviction
+- name: osd_pool_default_cache_max_evict_check_size
+ type: int
+ level: advanced
+ default: 10
+ with_legacy: true
+- name: osd_pool_default_pg_autoscale_mode
+ type: str
+ level: advanced
+ desc: Default PG autoscaling behavior for new pools
+ long_desc: With default value `on`, the autoscaler starts a new pool with 1
+ pg, unless the user specifies the pg_num.
+ default: 'on'
+ enum_values:
+ - 'off'
+ - 'warn'
+ - 'on'
+ flags:
+ - runtime
+- name: osd_pool_default_read_lease_ratio
+ type: float
+ level: dev
+ desc: Default read_lease_ratio for a pool, as a multiple of osd_heartbeat_grace
+ long_desc: This should be <= 1.0 so that the read lease will have expired by the
+ time we decide to mark a peer OSD down.
+ default: 0.8
+ see_also:
+ - osd_heartbeat_grace
+ flags:
+ - runtime
+ with_legacy: true
+# min target size for a HitSet
+- name: osd_hit_set_min_size
+ type: int
+ level: advanced
+ default: 1000
+ with_legacy: true
+# max target size for a HitSet
+- name: osd_hit_set_max_size
+ type: int
+ level: advanced
+ default: 100000
+ with_legacy: true
+# rados namespace for hit_set tracking
+- name: osd_hit_set_namespace
+ type: str
+ level: advanced
+ default: .ceph-internal
+ with_legacy: true
+# conservative default throttling values
+- name: osd_tier_promote_max_objects_sec
+ type: uint
+ level: advanced
+ default: 25
+ with_legacy: true
+- name: osd_tier_promote_max_bytes_sec
+ type: size
+ level: advanced
+ default: 5_M
+ with_legacy: true
+- name: osd_tier_default_cache_mode
+ type: str
+ level: advanced
+ default: writeback
+ enum_values:
+ - none
+ - writeback
+ - forward
+ - readonly
+ - readforward
+ - readproxy
+ - proxy
+ flags:
+ - runtime
+- name: osd_tier_default_cache_hit_set_count
+ type: uint
+ level: advanced
+ default: 4
+- name: osd_tier_default_cache_hit_set_period
+ type: uint
+ level: advanced
+ default: 1200
+- name: osd_tier_default_cache_hit_set_type
+ type: str
+ level: advanced
+ default: bloom
+ enum_values:
+ - bloom
+ - explicit_hash
+ - explicit_object
+ flags:
+ - runtime
+- name: osd_tier_default_cache_min_read_recency_for_promote
+ type: uint
+ level: advanced
+ desc: number of recent HitSets the object must appear in to be promoted (on read)
+ default: 1
+- name: osd_tier_default_cache_min_write_recency_for_promote
+ type: uint
+ level: advanced
+ desc: number of recent HitSets the object must appear in to be promoted (on write)
+ default: 1
+- name: osd_tier_default_cache_hit_set_grade_decay_rate
+ type: uint
+ level: advanced
+ default: 20
+- name: osd_tier_default_cache_hit_set_search_last_n
+ type: uint
+ level: advanced
+ default: 1
+- name: osd_objecter_finishers
+ type: int
+ level: advanced
+ default: 1
+ flags:
+ - startup
+ with_legacy: true
+- name: osd_map_dedup
+ type: bool
+ level: advanced
+ default: true
+ fmt_desc: Enable removing duplicates in the OSD map.
+ with_legacy: true
+- name: osd_map_message_max
+ type: int
+ level: advanced
+ desc: maximum number of OSDMaps to include in a single message
+ fmt_desc: The maximum map entries allowed per MOSDMap message.
+ default: 40
+ services:
+ - osd
+ - mon
+ with_legacy: true
+- name: osd_map_message_max_bytes
+ type: size
+ level: advanced
+ desc: maximum number of bytes worth of OSDMaps to include in a single message
+ default: 10_M
+ services:
+ - osd
+ - mon
+ with_legacy: true
+# do not assert on divergent_prior entries which aren't in the log and whose on-disk objects are newer
+- name: osd_ignore_stale_divergent_priors
+ type: bool
+ level: advanced
+ default: false
+ with_legacy: true
+- name: osd_heartbeat_interval
+ type: int
+ level: dev
+ desc: Interval (in seconds) between peer pings
+ fmt_desc: How often an Ceph OSD Daemon pings its peers (in seconds).
+ default: 6
+ min: 1
+ max: 1_min
+ with_legacy: true
+# (seconds) how long before we decide a peer has failed
+# This setting is read by the MONs and OSDs and has to be set to a equal value in both settings of the configuration
+- name: osd_heartbeat_grace
+ type: int
+ level: advanced
+ default: 20
+ fmt_desc: The elapsed time when a Ceph OSD Daemon hasn't shown a heartbeat
+ that the Ceph Storage Cluster considers it ``down``.
+ This setting must be set in both the [mon] and [osd] or [global]
+ sections so that it is read by both monitor and OSD daemons.
+ with_legacy: true
+- name: osd_heartbeat_stale
+ type: int
+ level: advanced
+ desc: Interval (in seconds) we mark an unresponsive heartbeat peer as stale.
+ long_desc: Automatically mark unresponsive heartbeat sessions as stale and tear
+ them down. The primary benefit is that OSD doesn't need to keep a flood of blocked
+ heartbeat messages around in memory.
+ default: 10_min
+# prio the heartbeat tcp socket and set dscp as CS6 on it if true
+- name: osd_heartbeat_use_min_delay_socket
+ type: bool
+ level: advanced
+ default: false
+ with_legacy: true
+# the minimum size of OSD heartbeat messages to send
+- name: osd_heartbeat_min_size
+ type: size
+ level: advanced
+ desc: Minimum heartbeat packet size in bytes. Will add dummy payload if heartbeat
+ packet is smaller than this.
+ default: 2000
+ with_legacy: true
+# max number of parallel snap trims/pg
+- name: osd_pg_max_concurrent_snap_trims
+ type: uint
+ level: advanced
+ default: 2
+ min: 1
+ with_legacy: true
+# max number of trimming pgs
+- name: osd_max_trimming_pgs
+ type: uint
+ level: advanced
+ default: 2
+ with_legacy: true
+# minimum number of peers that must be reachable to mark ourselves
+# back up after being wrongly marked down.
+- name: osd_heartbeat_min_healthy_ratio
+ type: float
+ level: advanced
+ default: 0.33
+ with_legacy: true
+# (seconds) how often to ping monitor if no peers
+- name: osd_mon_heartbeat_interval
+ type: int
+ level: advanced
+ default: 30
+ fmt_desc: How often the Ceph OSD Daemon pings a Ceph Monitor if it has no
+ Ceph OSD Daemon peers.
+ with_legacy: true
+- name: osd_mon_heartbeat_stat_stale
+ type: int
+ level: advanced
+ desc: Stop reporting on heartbeat ping times not updated for this many seconds.
+ long_desc: Stop reporting on old heartbeat information unless this is set to zero
+ fmt_desc: Stop reporting on heartbeat ping times which haven't been updated for
+ this many seconds. Set to zero to disable this action.
+ default: 1_hr
+# failures, up_thru, boot.
+- name: osd_mon_report_interval
+ type: int
+ level: advanced
+ desc: Frequency of OSD reports to mon for peer failures, fullness status changes
+ fmt_desc: The number of seconds a Ceph OSD Daemon may wait
+ from startup or another reportable event before reporting
+ to a Ceph Monitor.
+ default: 5
+ with_legacy: true
+# max updates in flight
+- name: osd_mon_report_max_in_flight
+ type: int
+ level: advanced
+ default: 2
+ with_legacy: true
+# (second) how often to send beacon message to monitor
+- name: osd_beacon_report_interval
+ type: int
+ level: advanced
+ default: 5_min
+ with_legacy: true
+# report pg stats for any given pg at least this often
+- name: osd_pg_stat_report_interval_max
+ type: int
+ level: advanced
+ default: 500
+ with_legacy: true
+# Max number of snap intervals to report to mgr in pg_stat_t
+- name: osd_max_snap_prune_intervals_per_epoch
+ type: uint
+ level: dev
+ desc: Max number of snap intervals to report to mgr in pg_stat_t
+ default: 512
+ with_legacy: true
+- name: osd_default_data_pool_replay_window
+ type: int
+ level: advanced
+ default: 45
+ fmt_desc: The time (in seconds) for an OSD to wait for a client to replay
+ a request.
+- name: osd_auto_mark_unfound_lost
+ type: bool
+ level: advanced
+ default: false
+ with_legacy: true
+- name: osd_check_for_log_corruption
+ type: bool
+ level: advanced
+ default: false
+ fmt_desc: Check log files for corruption. Can be computationally expensive.
+ with_legacy: true
+- name: osd_use_stale_snap
+ type: bool
+ level: advanced
+ default: false
+ with_legacy: true
+- name: osd_rollback_to_cluster_snap
+ type: str
+ level: advanced
+ with_legacy: true
+- name: osd_default_notify_timeout
+ type: uint
+ level: advanced
+ desc: default number of seconds after which notify propagation times out. used if
+ a client has not specified other value
+ fmt_desc: The OSD default notification timeout (in seconds).
+ default: 30
+ with_legacy: true
+- name: osd_kill_backfill_at
+ type: int
+ level: dev
+ default: 0
+ with_legacy: true
+# Bounds how infrequently a new map epoch will be persisted for a pg
+# make this < map_cache_size!
+- name: osd_pg_epoch_persisted_max_stale
+ type: uint
+ level: advanced
+ default: 40
+ with_legacy: true
+- name: osd_target_pg_log_entries_per_osd
+ type: uint
+ level: dev
+ desc: target number of PG entries total on an OSD - limited per pg by the min and
+ max options below
+ default: 300000
+ see_also:
+ - osd_max_pg_log_entries
+ - osd_min_pg_log_entries
+ with_legacy: true
+- name: osd_min_pg_log_entries
+ type: uint
+ level: dev
+ desc: minimum number of entries to maintain in the PG log
+ fmt_desc: The minimum number of placement group logs to maintain
+ when trimming log files.
+ default: 250
+ services:
+ - osd
+ see_also:
+ - osd_max_pg_log_entries
+ - osd_pg_log_dups_tracked
+ - osd_target_pg_log_entries_per_osd
+ with_legacy: true
+- name: osd_max_pg_log_entries
+ type: uint
+ level: dev
+ desc: maximum number of entries to maintain in the PG log
+ fmt_desc: The maximum number of placement group logs to maintain
+ when trimming log files.
+ default: 10000
+ services:
+ - osd
+ see_also:
+ - osd_min_pg_log_entries
+ - osd_pg_log_dups_tracked
+ - osd_target_pg_log_entries_per_osd
+ with_legacy: true
+- name: osd_pg_log_dups_tracked
+ type: uint
+ level: dev
+ desc: how many versions back to track in order to detect duplicate ops; this is
+ combined with both the regular pg log entries and additional minimal dup detection
+ entries
+ default: 3000
+ services:
+ - osd
+ see_also:
+ - osd_min_pg_log_entries
+ - osd_max_pg_log_entries
+ with_legacy: true
+- name: osd_object_clean_region_max_num_intervals
+ type: int
+ level: dev
+ desc: number of intervals in clean_offsets
+ long_desc: partial recovery uses multiple intervals to record the clean part of
+ the objectwhen the number of intervals is greater than osd_object_clean_region_max_num_intervals,
+ minimum interval will be trimmed(0 will recovery the entire object data interval)
+ default: 10
+ services:
+ - osd
+ with_legacy: true
+# max entries factor before force recovery
+- name: osd_force_recovery_pg_log_entries_factor
+ type: float
+ level: dev
+ default: 1.3
+ with_legacy: true
+- name: osd_pg_log_trim_min
+ type: uint
+ level: dev
+ desc: Minimum number of log entries to trim at once. This lets us trim in larger
+ batches rather than with each write.
+ default: 100
+ see_also:
+ - osd_max_pg_log_entries
+ - osd_min_pg_log_entries
+ with_legacy: true
+- name: osd_force_auth_primary_missing_objects
+ type: uint
+ level: advanced
+ desc: Approximate missing objects above which to force auth_log_shard to be primary
+ temporarily
+ default: 100
+- name: osd_async_recovery_min_cost
+ type: uint
+ level: advanced
+ desc: A mixture measure of number of current log entries difference and historical
+ missing objects, above which we switch to use asynchronous recovery when appropriate
+ default: 100
+ flags:
+ - runtime
+- name: osd_max_pg_per_osd_hard_ratio
+ type: float
+ level: advanced
+ desc: Maximum number of PG per OSD, a factor of 'mon_max_pg_per_osd'
+ long_desc: OSD will refuse to instantiate PG if the number of PG it serves exceeds
+ this number.
+ fmt_desc: The ratio of number of PGs per OSD allowed by the cluster before the
+ OSD refuses to create new PGs. An OSD stops creating new PGs if the number
+ of PGs it serves exceeds
+ ``osd_max_pg_per_osd_hard_ratio`` \* ``mon_max_pg_per_osd``.
+ default: 3
+ see_also:
+ - mon_max_pg_per_osd
+ min: 1
+- name: osd_pg_log_trim_max
+ type: uint
+ level: advanced
+ desc: maximum number of entries to remove at once from the PG log
+ default: 10000
+ services:
+ - osd
+ see_also:
+ - osd_min_pg_log_entries
+ - osd_max_pg_log_entries
+ with_legacy: true
+# how many seconds old makes an op complaint-worthy
+- name: osd_op_complaint_time
+ type: float
+ level: advanced
+ default: 30
+ fmt_desc: An operation becomes complaint worthy after the specified number
+ of seconds have elapsed.
+ with_legacy: true
+- name: osd_command_max_records
+ type: int
+ level: advanced
+ default: 256
+ fmt_desc: Limits the number of lost objects to return.
+ with_legacy: true
+# max peer osds to report that are blocking our progress
+- name: osd_max_pg_blocked_by
+ type: uint
+ level: advanced
+ default: 16
+ with_legacy: true
+- name: osd_op_log_threshold
+ type: int
+ level: advanced
+ default: 5
+ fmt_desc: How many operations logs to display at once.
+ with_legacy: true
+- name: osd_backoff_on_unfound
+ type: bool
+ level: advanced
+ default: true
+ with_legacy: true
+# [mainly for debug?] object unreadable/writeable
+- name: osd_backoff_on_degraded
+ type: bool
+ level: advanced
+ default: false
+ with_legacy: true
+# [debug] pg peering
+- name: osd_backoff_on_peering
+ type: bool
+ level: advanced
+ default: false
+ with_legacy: true
+- name: osd_debug_shutdown
+ type: bool
+ level: dev
+ desc: Turn up debug levels during shutdown
+ default: false
+ with_legacy: true
+# crash osd if client ignores a backoff; useful for debugging
+- name: osd_debug_crash_on_ignored_backoff
+ type: bool
+ level: dev
+ default: false
+ with_legacy: true
+- name: osd_debug_inject_dispatch_delay_probability
+ type: float
+ level: dev
+ default: 0
+ with_legacy: true
+- name: osd_debug_inject_dispatch_delay_duration
+ type: float
+ level: dev
+ default: 0.1
+ with_legacy: true
+- name: osd_debug_drop_ping_probability
+ desc: N/A
+ type: float
+ level: dev
+ default: 0
+ with_legacy: true
+- name: osd_debug_drop_ping_duration
+ desc: N/A
+ type: int
+ level: dev
+ default: 0
+ with_legacy: true
+- name: osd_debug_op_order
+ type: bool
+ level: dev
+ default: false
+ with_legacy: true
+- name: osd_debug_verify_missing_on_start
+ type: bool
+ level: dev
+ default: false
+ with_legacy: true
+- name: osd_debug_verify_snaps
+ type: bool
+ level: dev
+ default: false
+ with_legacy: true
+- name: osd_debug_verify_stray_on_activate
+ type: bool
+ level: dev
+ default: false
+ with_legacy: true
+- name: osd_debug_skip_full_check_in_backfill_reservation
+ type: bool
+ level: dev
+ default: false
+ with_legacy: true
+- name: osd_debug_reject_backfill_probability
+ type: float
+ level: dev
+ default: 0
+ with_legacy: true
+# inject failure during copyfrom completion
+- name: osd_debug_inject_copyfrom_error
+ type: bool
+ level: dev
+ default: false
+ with_legacy: true
+- name: osd_debug_misdirected_ops
+ type: bool
+ level: dev
+ default: false
+ with_legacy: true
+- name: osd_debug_skip_full_check_in_recovery
+ type: bool
+ level: dev
+ default: false
+ with_legacy: true
+- name: osd_debug_random_push_read_error
+ type: float
+ level: dev
+ default: 0
+ with_legacy: true
+- name: osd_debug_verify_cached_snaps
+ type: bool
+ level: dev
+ default: false
+ with_legacy: true
+- name: osd_debug_deep_scrub_sleep
+ type: float
+ level: dev
+ desc: Inject an expensive sleep during deep scrub IO to make it easier to induce
+ preemption
+ default: 0
+ with_legacy: true
+- name: osd_debug_no_acting_change
+ type: bool
+ level: dev
+ default: false
+ with_legacy: true
+- name: osd_debug_no_purge_strays
+ type: bool
+ level: dev
+ default: false
+ with_legacy: true
+- name: osd_debug_pretend_recovery_active
+ type: bool
+ level: dev
+ default: false
+ with_legacy: true
+# enable/disable OSD op tracking
+- name: osd_enable_op_tracker
+ type: bool
+ level: advanced
+ default: true
+ with_legacy: true
+# The number of shards for holding the ops
+- name: osd_num_op_tracker_shard
+ type: uint
+ level: advanced
+ default: 32
+ with_legacy: true
+# Max number of completed ops to track
+- name: osd_op_history_size
+ type: uint
+ level: advanced
+ default: 20
+ fmt_desc: The maximum number of completed operations to track.
+ with_legacy: true
+# Oldest completed op to track
+- name: osd_op_history_duration
+ type: uint
+ level: advanced
+ default: 600
+ fmt_desc: The oldest completed operation to track.
+ with_legacy: true
+# Max number of slow ops to track
+- name: osd_op_history_slow_op_size
+ type: uint
+ level: advanced
+ default: 20
+ with_legacy: true
+# track the op if over this threshold
+- name: osd_op_history_slow_op_threshold
+ type: float
+ level: advanced
+ default: 10
+ with_legacy: true
+# to adjust various transactions that batch smaller items
+- name: osd_target_transaction_size
+ type: int
+ level: advanced
+ default: 30
+ with_legacy: true
+# what % full makes an OSD "full" (failsafe)
+- name: osd_failsafe_full_ratio
+ type: float
+ level: advanced
+ default: 0.97
+ with_legacy: true
+- name: osd_fast_shutdown
+ type: bool
+ level: advanced
+ desc: Fast, immediate shutdown
+ long_desc: Setting this to false makes the OSD do a slower teardown of all state
+ when it receives a SIGINT or SIGTERM or when shutting down for any other reason. That
+ slow shutdown is primarilyy useful for doing memory leak checking with valgrind.
+ default: true
+ with_legacy: true
+- name: osd_fast_shutdown_timeout
+ type: int
+ level: advanced
+ desc: timeout in seconds for osd fast-shutdown (0 is unlimited)
+ default: 15
+ with_legacy: true
+ min: 0
+- name: osd_fast_shutdown_notify_mon
+ type: bool
+ level: advanced
+ desc: Tell mon about OSD shutdown on immediate shutdown
+ long_desc: Tell the monitor the OSD is shutting down on immediate shutdown. This
+ helps with cluster log messages from other OSDs reporting it immediately failed.
+ default: true
+ see_also:
+ - osd_fast_shutdown
+ - osd_mon_shutdown_timeout
+ with_legacy: true
+# immediately mark OSDs as down once they refuse to accept connections
+- name: osd_fast_fail_on_connection_refused
+ type: bool
+ level: advanced
+ default: true
+ fmt_desc: If this option is enabled, crashed OSDs are marked down
+ immediately by connected peers and MONs (assuming that the
+ crashed OSD host survives). Disable it to restore old
+ behavior, at the expense of possible long I/O stalls when
+ OSDs crash in the middle of I/O operations.
+ with_legacy: true
+- name: osd_pg_object_context_cache_count
+ type: int
+ level: advanced
+ default: 64
+ with_legacy: true
+# true if LTTng-UST tracepoints should be enabled
+- name: osd_tracing
+ type: bool
+ level: advanced
+ default: false
+ with_legacy: true
+# true if function instrumentation should use LTTng
+- name: osd_function_tracing
+ type: bool
+ level: advanced
+ default: false
+ with_legacy: true
+# use fast info attr, if we can
+- name: osd_fast_info
+ type: bool
+ level: advanced
+ default: true
+ with_legacy: true
+# determines whether PGLog::check() compares written out log to stored log
+- name: osd_debug_pg_log_writeout
+ type: bool
+ level: dev
+ default: false
+ with_legacy: true
+# Max number of loop before we reset thread-pool's handle
+- name: osd_loop_before_reset_tphandle
+ type: uint
+ level: advanced
+ default: 64
+ with_legacy: true
+# default timeout while caling WaitInterval on an empty queue
+- name: threadpool_default_timeout
+ type: int
+ level: advanced
+ default: 1_min
+ with_legacy: true
+# default wait time for an empty queue before pinging the hb timeout
+- name: threadpool_empty_queue_max_wait
+ type: int
+ level: advanced
+ default: 2
+ with_legacy: true
+- name: leveldb_log_to_ceph_log
+ type: bool
+ level: advanced
+ default: true
+ with_legacy: true
+- name: leveldb_write_buffer_size
+ type: size
+ level: advanced
+ default: 8_M
+ with_legacy: true
+- name: leveldb_cache_size
+ type: size
+ level: advanced
+ default: 128_M
+ with_legacy: true
+- name: leveldb_block_size
+ type: size
+ level: advanced
+ default: 0
+ with_legacy: true
+- name: leveldb_bloom_size
+ type: int
+ level: advanced
+ default: 0
+ with_legacy: true
+- name: leveldb_max_open_files
+ type: int
+ level: advanced
+ default: 0
+ with_legacy: true
+- name: leveldb_compression
+ type: bool
+ level: advanced
+ default: true
+ with_legacy: true
+- name: leveldb_paranoid
+ type: bool
+ level: advanced
+ default: false
+ with_legacy: true
+- name: leveldb_log
+ type: str
+ level: advanced
+ default: /dev/null
+ with_legacy: true
+- name: leveldb_compact_on_mount
+ type: bool
+ level: advanced
+ default: false
+ with_legacy: true
+- name: rocksdb_log_to_ceph_log
+ type: bool
+ level: advanced
+ default: true
+ with_legacy: true
+- name: rocksdb_cache_size
+ type: size
+ level: advanced
+ default: 512_M
+ flags:
+ - runtime
+ with_legacy: true
+# ratio of cache for row (vs block)
+- name: rocksdb_cache_row_ratio
+ type: float
+ level: advanced
+ default: 0
+ with_legacy: true
+# rocksdb block cache shard bits, 4 bit -> 16 shards
+- name: rocksdb_cache_shard_bits
+ type: int
+ level: advanced
+ default: 4
+ with_legacy: true
+# 'lru' or 'clock'
+- name: rocksdb_cache_type
+ type: str
+ level: advanced
+ default: binned_lru
+ with_legacy: true
+- name: rocksdb_block_size
+ type: size
+ level: advanced
+ default: 4_K
+ with_legacy: true
+# Enabling this will have 5-10% impact on performance for the stats collection
+- name: rocksdb_perf
+ type: bool
+ level: advanced
+ default: false
+ with_legacy: true
+# For rocksdb, this behavior will be an overhead of 5%~10%, collected only rocksdb_perf is enabled.
+- name: rocksdb_collect_compaction_stats
+ type: bool
+ level: advanced
+ default: false
+ with_legacy: true
+# For rocksdb, this behavior will be an overhead of 5%~10%, collected only rocksdb_perf is enabled.
+- name: rocksdb_collect_extended_stats
+ type: bool
+ level: advanced
+ default: false
+ with_legacy: true
+# For rocksdb, this behavior will be an overhead of 5%~10%, collected only rocksdb_perf is enabled.
+- name: rocksdb_collect_memory_stats
+ type: bool
+ level: advanced
+ default: false
+ with_legacy: true
+- name: rocksdb_delete_range_threshold
+ type: uint
+ level: advanced
+ desc: The number of keys required to invoke DeleteRange when deleting muliple keys.
+ default: 1_M
+- name: rocksdb_bloom_bits_per_key
+ type: uint
+ level: advanced
+ desc: Number of bits per key to use for RocksDB's bloom filters.
+ long_desc: 'RocksDB bloom filters can be used to quickly answer the question of
+ whether or not a key may exist or definitely does not exist in a given RocksDB
+ SST file without having to read all keys into memory. Using a higher bit value
+ decreases the likelihood of false positives at the expense of additional disk
+ space and memory consumption when the filter is loaded into RAM. The current
+ default value of 20 was found to provide significant performance gains when getattr
+ calls are made (such as during new object creation in bluestore) without significant
+ memory overhead or cache pollution when combined with rocksdb partitioned index
+ filters. See: https://github.com/facebook/rocksdb/wiki/Partitioned-Index-Filters
+ for more information.'
+ default: 20
+- name: rocksdb_cache_index_and_filter_blocks
+ type: bool
+ level: dev
+ desc: Whether to cache indices and filters in block cache
+ long_desc: By default RocksDB will load an SST file's index and bloom filters into
+ memory when it is opened and remove them from memory when an SST file is closed. Thus,
+ memory consumption by indices and bloom filters is directly tied to the number
+ of concurrent SST files allowed to be kept open. This option instead stores cached
+ indicies and filters in the block cache where they directly compete with other
+ cached data. By default we set this option to true to better account for and
+ bound rocksdb memory usage and keep filters in memory even when an SST file is
+ closed.
+ default: true
+- name: rocksdb_cache_index_and_filter_blocks_with_high_priority
+ type: bool
+ level: dev
+ desc: Whether to cache indices and filters in the block cache with high priority
+ long_desc: A downside of setting rocksdb_cache_index_and_filter_blocks to true is
+ that regular data can push indices and filters out of memory. Setting this option
+ to true means they are cached with higher priority than other data and should
+ typically stay in the block cache.
+ default: false
+- name: rocksdb_pin_l0_filter_and_index_blocks_in_cache
+ type: bool
+ level: dev
+ desc: Whether to pin Level 0 indices and bloom filters in the block cache
+ long_desc: A downside of setting rocksdb_cache_index_and_filter_blocks to true is
+ that regular data can push indices and filters out of memory. Setting this option
+ to true means that level 0 SST files will always have their indices and filters
+ pinned in the block cache.
+ default: false
+- name: rocksdb_index_type
+ type: str
+ level: dev
+ desc: 'Type of index for SST files: binary_search, hash_search, two_level'
+ long_desc: 'This option controls the table index type. binary_search is a space
+ efficient index block that is optimized for block-search-based index. hash_search
+ may improve prefix lookup performance at the expense of higher disk and memory
+ usage and potentially slower compactions. two_level is an experimental index
+ type that uses two binary search indexes and works in conjunction with partition
+ filters. See: http://rocksdb.org/blog/2017/05/12/partitioned-index-filter.html'
+ default: binary_search
+- name: rocksdb_partition_filters
+ type: bool
+ level: dev
+ desc: (experimental) partition SST index/filters into smaller blocks
+ long_desc: 'This is an experimental option for rocksdb that works in conjunction
+ with two_level indices to avoid having to keep the entire filter/index in cache
+ when cache_index_and_filter_blocks is true. The idea is to keep a much smaller
+ top-level index in heap/cache and then opportunistically cache the lower level
+ indices. See: https://github.com/facebook/rocksdb/wiki/Partitioned-Index-Filters'
+ default: false
+- name: rocksdb_metadata_block_size
+ type: size
+ level: dev
+ desc: The block size for index partitions. (0 = rocksdb default)
+ default: 4_K
+# osd_*_priority adjust the relative priority of client io, recovery io,
+# snaptrim io, etc
+#
+# osd_*_priority determines the ratio of available io between client and
+# recovery. Each option may be set between
+# 1..63.
+- name: rocksdb_cf_compact_on_deletion
+ type: bool
+ level: dev
+ desc: Compact the column family when a certain number of tombstones are observed within a given window.
+ long_desc: 'This setting instructs RocksDB to compact a column family when a certain
+ number of tombstones are observed during iteration within a certain sliding window.
+ For instance if rocksdb_cf_compact_on_deletion_sliding_window is 8192 and
+ rocksdb_cf_compact_on_deletion_trigger is 4096, then once 4096 tombstones are
+ observed after iteration over 8192 entries, the column family will be compacted.'
+ default: true
+ with_legacy: true
+ see_also:
+ - rocksdb_cf_compact_on_deletion_sliding_window
+ - rocksdb_cf_compact_on_deletion_trigger
+- name: rocksdb_cf_compact_on_deletion_sliding_window
+ type: int
+ level: dev
+ desc: The sliding window to use when rocksdb_cf_compact_on_deletion is enabled.
+ default: 32768
+ with_legacy: true
+ see_also:
+ - rocksdb_cf_compact_on_deletion
+- name: rocksdb_cf_compact_on_deletion_trigger
+ type: int
+ level: dev
+ desc: The trigger to use when rocksdb_cf_compact_on_deletion is enabled.
+ default: 16384
+ with_legacy: true
+ see_also:
+ - rocksdb_cf_compact_on_deletion
+- name: osd_client_op_priority
+ type: uint
+ level: advanced
+ default: 63
+ fmt_desc: The priority set for client operations. This value is relative
+ to that of ``osd_recovery_op_priority`` below. The default
+ strongly favors client ops over recovery.
+ with_legacy: true
+- name: osd_recovery_op_priority
+ type: uint
+ level: advanced
+ desc: Priority to use for recovery operations if not specified for the pool
+ fmt_desc: The priority of recovery operations vs client operations, if not specified by the
+ pool's ``recovery_op_priority``. The default value prioritizes client
+ ops (see above) over recovery ops. You may adjust the tradeoff of client
+ impact against the time to restore cluster health by lowering this value
+ for increased prioritization of client ops, or by increasing it to favor
+ recovery.
+ default: 3
+ with_legacy: true
+- name: osd_peering_op_priority
+ type: uint
+ level: dev
+ default: 255
+ with_legacy: true
+- name: osd_snap_trim_priority
+ type: uint
+ level: advanced
+ default: 5
+ fmt_desc: The priority set for the snap trim work queue.
+ with_legacy: true
+- name: osd_snap_trim_cost
+ type: size
+ level: advanced
+ default: 1_M
+ with_legacy: true
+- name: osd_pg_delete_priority
+ type: uint
+ level: advanced
+ default: 5
+ with_legacy: true
+- name: osd_pg_delete_cost
+ type: size
+ level: advanced
+ default: 1_M
+ with_legacy: true
+- name: osd_scrub_priority
+ type: uint
+ level: advanced
+ desc: Priority for scrub operations in work queue
+ fmt_desc: The default work queue priority for scheduled scrubs when the
+ pool doesn't specify a value of ``scrub_priority``. This can be
+ boosted to the value of ``osd_client_op_priority`` when scrubs are
+ blocking client operations.
+ default: 5
+ with_legacy: true
+- name: osd_scrub_cost
+ type: size
+ level: advanced
+ desc: Cost for scrub operations in work queue
+ default: 50_M
+ with_legacy: true
+- name: osd_scrub_event_cost
+ type: size
+ level: advanced
+ desc: Cost for each scrub operation, used when osd_op_queue=mclock_scheduler
+ default: 4_K
+ with_legacy: true
+# set requested scrub priority higher than scrub priority to make the
+# requested scrubs jump the queue of scheduled scrubs
+- name: osd_requested_scrub_priority
+ type: uint
+ level: advanced
+ default: 120
+ fmt_desc: The priority set for user requested scrub on the work queue. If
+ this value were to be smaller than ``osd_client_op_priority`` it
+ can be boosted to the value of ``osd_client_op_priority`` when
+ scrub is blocking client operations.
+ with_legacy: true
+- name: osd_recovery_priority
+ type: uint
+ level: advanced
+ desc: Priority of recovery in the work queue
+ long_desc: Not related to a pool's recovery_priority
+ fmt_desc: The default priority set for recovery work queue. Not
+ related to a pool's ``recovery_priority``.
+ default: 5
+ with_legacy: true
+# set default cost equal to 20MB io
+- name: osd_recovery_cost
+ type: size
+ level: advanced
+ default: 20_M
+ with_legacy: true
+# osd_recovery_op_warn_multiple scales the normal warning threshold,
+# osd_op_complaint_time, so that slow recovery ops won't cause noise
+- name: osd_recovery_op_warn_multiple
+ type: uint
+ level: advanced
+ default: 16
+ with_legacy: true
+# Max time to wait between notifying mon of shutdown and shutting down
+- name: osd_mon_shutdown_timeout
+ type: float
+ level: advanced
+ default: 5
+ with_legacy: true
+# crash if the OSD has stray PG refs on shutdown
+- name: osd_shutdown_pgref_assert
+ type: bool
+ level: advanced
+ default: false
+ with_legacy: true
+# OSD's maximum object size
+- name: osd_max_object_size
+ type: size
+ level: advanced
+ default: 128_M
+ fmt_desc: The maximum size of a RADOS object in bytes.
+ with_legacy: true
+# max rados object name len
+- name: osd_max_object_name_len
+ type: uint
+ level: advanced
+ default: 2_K
+ with_legacy: true
+# max rados object namespace len
+- name: osd_max_object_namespace_len
+ type: uint
+ level: advanced
+ default: 256
+ with_legacy: true
+# max rados attr name len; cannot go higher than 100 chars for file system backends
+- name: osd_max_attr_name_len
+ type: uint
+ level: advanced
+ default: 100
+ with_legacy: true
+- name: osd_max_attr_size
+ type: uint
+ level: advanced
+ default: 0
+ with_legacy: true
+- name: osd_max_omap_entries_per_request
+ type: uint
+ level: advanced
+ default: 1_K
+ with_legacy: true
+- name: osd_max_omap_bytes_per_request
+ type: size
+ level: advanced
+ default: 1_G
+ with_legacy: true
+# osd_recovery_op_warn_multiple scales the normal warning threshold,
+# osd_op_complaint_time, so that slow recovery ops won't cause noise
+- name: osd_max_write_op_reply_len
+ type: size
+ level: advanced
+ desc: Max size of the per-op payload for requests with the RETURNVEC flag set
+ long_desc: This value caps the amount of data (per op; a request may have many ops)
+ that will be sent back to the client and recorded in the PG log.
+ default: 64
+ with_legacy: true
+- name: osd_objectstore
+ type: str
+ level: advanced
+ desc: backend type for an OSD (like filestore or bluestore)
+ default: bluestore
+ enum_values:
+ - bluestore
+ - filestore
+ - memstore
+ - kstore
+ - seastore
+ - cyanstore
+ flags:
+ - create
+ with_legacy: true
+# true if LTTng-UST tracepoints should be enabled
+- name: osd_objectstore_tracing
+ type: bool
+ level: advanced
+ default: false
+ with_legacy: true
+- name: osd_objectstore_fuse
+ type: bool
+ level: advanced
+ default: false
+ with_legacy: true
+- name: osd_bench_small_size_max_iops
+ type: uint
+ level: advanced
+ default: 100
+ with_legacy: true
+- name: osd_bench_large_size_max_throughput
+ type: size
+ level: advanced
+ default: 100_M
+ with_legacy: true
+- name: osd_bench_max_block_size
+ type: size
+ level: advanced
+ default: 64_M
+ with_legacy: true
+# duration of 'osd bench', capped at 30s to avoid triggering timeouts
+- name: osd_bench_duration
+ type: uint
+ level: advanced
+ default: 30
+ with_legacy: true
+# create a blkin trace for all osd requests
+- name: osd_blkin_trace_all
+ type: bool
+ level: advanced
+ default: false
+ with_legacy: true
+# create a blkin trace for all objecter requests
+- name: osdc_blkin_trace_all
+ type: bool
+ level: advanced
+ default: false
+ with_legacy: true
+- name: osd_discard_disconnected_ops
+ type: bool
+ level: advanced
+ default: true
+ with_legacy: true
+- name: osd_memory_target
+ type: size
+ level: basic
+ desc: When tcmalloc and cache autotuning is enabled, try to keep this many bytes
+ mapped in memory.
+ long_desc: The minimum value must be at least equal to osd_memory_base + osd_memory_cache_min.
+ fmt_desc: |
+ When TCMalloc is available and cache autotuning is enabled, try to
+ keep this many bytes mapped in memory. Note: This may not exactly
+ match the RSS memory usage of the process. While the total amount
+ of heap memory mapped by the process should usually be close
+ to this target, there is no guarantee that the kernel will actually
+ reclaim memory that has been unmapped. During initial development,
+ it was found that some kernels result in the OSD's RSS memory
+ exceeding the mapped memory by up to 20%. It is hypothesised
+ however, that the kernel generally may be more aggressive about
+ reclaiming unmapped memory when there is a high amount of memory
+ pressure. Your mileage may vary.
+ default: 4_G
+ see_also:
+ - bluestore_cache_autotune
+ - osd_memory_cache_min
+ - osd_memory_base
+ - osd_memory_target_autotune
+ min: 896_M
+ flags:
+ - runtime
+- name: osd_memory_target_autotune
+ type: bool
+ default: false
+ level: advanced
+ desc: If enabled, allow orchestrator to automatically tune osd_memory_target
+ see_also:
+ - osd_memory_target
+- name: osd_memory_target_cgroup_limit_ratio
+ type: float
+ level: advanced
+ desc: Set the default value for osd_memory_target to the cgroup memory limit (if
+ set) times this value
+ long_desc: A value of 0 disables this feature.
+ default: 0.8
+ see_also:
+ - osd_memory_target
+ min: 0
+ max: 1
+- name: osd_memory_base
+ type: size
+ level: dev
+ desc: When tcmalloc and cache autotuning is enabled, estimate the minimum amount
+ of memory in bytes the OSD will need.
+ fmt_desc: When TCMalloc and cache autotuning are enabled, estimate the minimum
+ amount of memory in bytes the OSD will need. This is used to help
+ the autotuner estimate the expected aggregate memory consumption of
+ the caches.
+ default: 768_M
+ see_also:
+ - bluestore_cache_autotune
+ flags:
+ - runtime
+- name: osd_memory_expected_fragmentation
+ type: float
+ level: dev
+ desc: When tcmalloc and cache autotuning is enabled, estimate the percent of memory
+ fragmentation.
+ fmt_desc: When TCMalloc and cache autotuning is enabled, estimate the
+ percentage of memory fragmentation. This is used to help the
+ autotuner estimate the expected aggregate memory consumption
+ of the caches.
+ default: 0.15
+ see_also:
+ - bluestore_cache_autotune
+ min: 0
+ max: 1
+ flags:
+ - runtime
+- name: osd_memory_cache_min
+ type: size
+ level: dev
+ desc: When tcmalloc and cache autotuning is enabled, set the minimum amount of memory
+ used for caches.
+ fmt_desc: |
+ When TCMalloc and cache autotuning are enabled, set the minimum
+ amount of memory used for caches. Note: Setting this value too
+ low can result in significant cache thrashing.
+ default: 128_M
+ see_also:
+ - bluestore_cache_autotune
+ min: 128_M
+ flags:
+ - runtime
+- name: osd_memory_cache_resize_interval
+ type: float
+ level: dev
+ desc: When tcmalloc and cache autotuning is enabled, wait this many seconds between
+ resizing caches.
+ fmt_desc: When TCMalloc and cache autotuning are enabled, wait this many
+ seconds between resizing caches. This setting changes the total
+ amount of memory available for BlueStore to use for caching. Note
+ that setting this interval too small can result in memory allocator
+ thrashing and lower performance.
+ default: 1
+ see_also:
+ - bluestore_cache_autotune
+- name: memstore_device_bytes
+ type: size
+ level: advanced
+ default: 1_G
+ with_legacy: true
+- name: memstore_page_set
+ type: bool
+ level: advanced
+ default: false
+ with_legacy: true
+- name: memstore_page_size
+ type: size
+ level: advanced
+ default: 64_K
+ with_legacy: true
+- name: memstore_debug_omit_block_device_write
+ type: bool
+ level: dev
+ desc: write metadata only
+ default: false
+ see_also:
+ - bluestore_debug_omit_block_device_write
+ with_legacy: true
+- name: objectstore_blackhole
+ type: bool
+ level: advanced
+ default: false
+ with_legacy: true
+- name: bdev_debug_inflight_ios
+ type: bool
+ level: dev
+ default: false
+ with_legacy: true
+# if N>0, then ~ 1/N IOs will complete before we crash on flush
+- name: bdev_inject_crash
+ type: int
+ level: dev
+ default: 0
+ with_legacy: true
+# wait N more seconds on flush
+- name: bdev_inject_crash_flush_delay
+ type: int
+ level: dev
+ default: 2
+ with_legacy: true
+- name: bdev_aio
+ type: bool
+ level: advanced
+ default: true
+ with_legacy: true
+# milliseconds
+- name: bdev_aio_poll_ms
+ type: int
+ level: advanced
+ default: 250
+ with_legacy: true
+- name: bdev_aio_max_queue_depth
+ type: int
+ level: advanced
+ default: 1024
+ with_legacy: true
+- name: bdev_aio_reap_max
+ type: int
+ level: advanced
+ default: 16
+ with_legacy: true
+- name: bdev_block_size
+ type: size
+ level: advanced
+ default: 4_K
+ with_legacy: true
+- name: bdev_read_buffer_alignment
+ type: size
+ level: advanced
+ default: 4_K
+ with_legacy: true
+- name: bdev_read_preallocated_huge_buffers
+ type: str
+ level: advanced
+ desc: description of pools arrangement for huge page-based read buffers
+ long_desc: Arrangement of preallocated, huge pages-based pools for reading
+ from a KernelDevice. Applied to minimize size of scatter-gather lists
+ sent to NICs. Targets really big buffers (>= 2 or 4 MBs).
+ Keep in mind the system must be configured accordingly (see /proc/sys/vm/nr_hugepages).
+ Otherwise the OSD wil fail early.
+ Beware BlueStore, by default, stores large chunks across many smaller blobs.
+ Increasing bluestore_max_blob_size changes that, and thus allows the data to
+ be read back into small number of huge page-backed buffers.
+ fmt_desc: List of key=value pairs delimited by comma, semicolon or tab.
+ key specifies the targeted read size and must be expressed in bytes.
+ value specifies the number of preallocated buffers.
+ For instance, to preallocate 64 buffers that will be used to serve
+ 2 MB-sized read requests and 128 for 4 MB, someone needs to set
+ "2097152=64,4194304=128".
+ see_also:
+ - bluestore_max_blob_size
+- name: bdev_debug_aio
+ type: bool
+ level: dev
+ default: false
+ with_legacy: true
+- name: bdev_debug_aio_suicide_timeout
+ type: float
+ level: dev
+ default: 1_min
+ with_legacy: true
+- name: bdev_debug_aio_log_age
+ type: float
+ level: dev
+ default: 5
+ with_legacy: true
+# if yes, osd will unbind all NVMe devices from kernel driver and bind them
+# to the uio_pci_generic driver. The purpose is to prevent the case where
+# NVMe driver is loaded while osd is running.
+- name: bdev_nvme_unbind_from_kernel
+ type: bool
+ level: advanced
+ default: false
+ with_legacy: true
+- name: bdev_enable_discard
+ type: bool
+ level: advanced
+ default: false
+ with_legacy: true
+- name: bdev_async_discard
+ type: bool
+ level: advanced
+ default: false
+ with_legacy: true
+- name: bdev_flock_retry_interval
+ type: float
+ level: advanced
+ desc: interval to retry the flock
+ default: 0.1
+- name: bdev_flock_retry
+ type: uint
+ level: advanced
+ desc: times to retry the flock
+ long_desc: The number of times to retry on getting the block device lock. Programs
+ such as systemd-udevd may compete with Ceph for this lock. 0 means 'unlimited'.
+ default: 3
+- name: bluefs_alloc_size
+ type: size
+ level: advanced
+ desc: Allocation unit size for DB and WAL devices
+ default: 1_M
+ with_legacy: true
+- name: bluefs_shared_alloc_size
+ type: size
+ level: advanced
+ desc: Allocation unit size for primary/shared device
+ default: 64_K
+ with_legacy: true
+- name: bluefs_failed_shared_alloc_cooldown
+ type: float
+ level: advanced
+ desc: duration(in seconds) untill the next attempt to use
+ 'bluefs_shared_alloc_size' after facing ENOSPC failure.
+ long_desc: Cooldown period(in seconds) when BlueFS uses shared/slow device
+ allocation size instead of "bluefs_shared_alloc_size' one after facing
+ recoverable (via fallback to smaller chunk size) ENOSPC failure. Intended
+ primarily to avoid repetitive unsuccessful allocations which might be
+ expensive.
+ default: 600
+ with_legacy: true
+- name: bluefs_max_prefetch
+ type: size
+ level: advanced
+ default: 1_M
+ with_legacy: true
+# alloc when we get this low
+- name: bluefs_min_log_runway
+ type: size
+ level: advanced
+ default: 1_M
+ with_legacy: true
+# alloc this much at a time
+- name: bluefs_max_log_runway
+ type: size
+ level: advanced
+ default: 4_M
+ with_legacy: true
+# before we consider
+- name: bluefs_log_compact_min_ratio
+ type: float
+ level: advanced
+ default: 5
+ with_legacy: true
+# before we consider
+- name: bluefs_log_compact_min_size
+ type: size
+ level: advanced
+ default: 16_M
+ with_legacy: true
+# ignore flush until its this big
+- name: bluefs_min_flush_size
+ type: size
+ level: advanced
+ default: 512_K
+ with_legacy: true
+# sync or async log compaction
+- name: bluefs_compact_log_sync
+ type: bool
+ level: advanced
+ default: false
+ with_legacy: true
+- name: bluefs_buffered_io
+ type: bool
+ level: advanced
+ desc: Enabled buffered IO for bluefs reads.
+ long_desc: When this option is enabled, bluefs will in some cases perform buffered
+ reads. This allows the kernel page cache to act as a secondary cache for things
+ like RocksDB block reads. For example, if the rocksdb block cache isn't large
+ enough to hold all blocks during OMAP iteration, it may be possible to read them
+ from page cache instead of from the disk. This can dramatically improve
+ performance when the osd_memory_target is too small to hold all entries in block
+ cache but it does come with downsides. It has been reported to occasionally
+ cause excessive kernel swapping (and associated stalls) under certain workloads.
+ Currently the best and most consistent performing combination appears to be
+ enabling bluefs_buffered_io and disabling system level swap. It is possible
+ that this recommendation may change in the future however.
+ default: true
+ with_legacy: true
+- name: bluefs_sync_write
+ type: bool
+ level: advanced
+ default: false
+ with_legacy: true
+- name: bluefs_allocator
+ type: str
+ level: dev
+ default: hybrid
+ enum_values:
+ - bitmap
+ - stupid
+ - avl
+ - hybrid
+ with_legacy: true
+- name: bluefs_log_replay_check_allocations
+ type: bool
+ level: advanced
+ desc: Enables checks for allocations consistency during log replay
+ default: true
+ with_legacy: true
+- name: bluefs_replay_recovery
+ type: bool
+ level: dev
+ desc: Attempt to read bluefs log so large that it became unreadable.
+ long_desc: If BlueFS log grows to extreme sizes (200GB+) it is likely that it becames
+ unreadable. This options enables heuristics that scans devices for missing data.
+ DO NOT ENABLE BY DEFAULT
+ default: false
+ with_legacy: true
+- name: bluefs_replay_recovery_disable_compact
+ type: bool
+ level: advanced
+ default: false
+ with_legacy: true
+- name: bluefs_check_for_zeros
+ type: bool
+ level: dev
+ desc: Check data read for suspicious pages
+ long_desc: Looks into data read to check if there is a 4K block entirely filled
+ with zeros. If this happens, we re-read data. If there is difference, we print
+ error to log.
+ default: false
+ see_also:
+ - bluestore_retry_disk_reads
+ flags:
+ - runtime
+ with_legacy: true
+- name: bluefs_check_volume_selector_on_umount
+ type: bool
+ level: dev
+ desc: Check validity of volume selector on umount
+ long_desc: Checks if volume selector did not diverge from the state it should be in.
+ Reference is constructed from bluefs inode table. Asserts on inconsistency.
+ default: false
+ flags:
+ - runtime
+ with_legacy: true
+- name: bluefs_check_volume_selector_often
+ type: bool
+ level: dev
+ desc: Periodically check validity of volume selector
+ long_desc: Periodically checks if current volume selector does not diverge from the valid state.
+ Reference is constructed from bluefs inode table. Asserts on inconsistency. This is debug feature.
+ default: false
+ see_also:
+ - bluefs_check_volume_selector_on_umount
+ flags:
+ - startup
+ with_legacy: true
+- name: bluestore_bluefs
+ type: bool
+ level: dev
+ desc: Use BlueFS to back rocksdb
+ long_desc: BlueFS allows rocksdb to share the same physical device(s) as the rest
+ of BlueStore. It should be used in all cases unless testing/developing an alternative
+ metadata database for BlueStore.
+ default: true
+ flags:
+ - create
+ with_legacy: true
+# mirror to normal Env for debug
+- name: bluestore_bluefs_env_mirror
+ type: bool
+ level: dev
+ desc: Mirror bluefs data to file system for testing/validation
+ default: false
+ flags:
+ - create
+ with_legacy: true
+- name: bluestore_bluefs_max_free
+ type: size
+ level: advanced
+ default: 10_G
+ desc: Maximum free space allocated to BlueFS
+- name: bluestore_bluefs_alloc_failure_dump_interval
+ type: float
+ level: advanced
+ desc: How frequently (in seconds) to dump allocator on BlueFS space allocation failure
+ default: 0
+ with_legacy: true
+- name: bluestore_spdk_mem
+ type: size
+ level: dev
+ desc: Amount of dpdk memory size in MB
+ long_desc: If running multiple SPDK instances per node, you must specify the amount
+ of dpdk memory size in MB each instance will use, to make sure each instance uses
+ its own dpdk memory
+ default: 512
+- name: bluestore_spdk_coremask
+ type: str
+ level: dev
+ desc: A hexadecimal bit mask of the cores to run on. Note the core numbering can
+ change between platforms and should be determined beforehand
+ default: '0x1'
+- name: bluestore_spdk_max_io_completion
+ type: uint
+ level: dev
+ desc: Maximal I/Os to be batched completed while checking queue pair completions,
+ 0 means let spdk library determine it
+ default: 0
+- name: bluestore_spdk_io_sleep
+ type: uint
+ level: dev
+ desc: Time period to wait if there is no completed I/O from polling
+ default: 5
+# If you want to use spdk driver, you need to specify NVMe serial number here
+# with "spdk:" prefix.
+# Users can use 'lspci -vvv -d 8086:0953 | grep "Device Serial Number"' to
+# get the serial number of Intel(R) Fultondale NVMe controllers.
+# Example:
+# bluestore_block_path = spdk:55cd2e404bd73932
+- name: bluestore_block_path
+ type: str
+ level: dev
+ desc: Path to block device/file
+ flags:
+ - create
+ with_legacy: true
+- name: bluestore_block_size
+ type: size
+ level: dev
+ desc: Size of file to create for backing bluestore
+ default: 100_G
+ flags:
+ - create
+ with_legacy: true
+- name: bluestore_block_create
+ type: bool
+ level: dev
+ desc: Create bluestore_block_path if it doesn't exist
+ default: true
+ see_also:
+ - bluestore_block_path
+ - bluestore_block_size
+ flags:
+ - create
+ with_legacy: true
+- name: bluestore_block_db_path
+ type: str
+ level: dev
+ desc: Path for db block device
+ flags:
+ - create
+ with_legacy: true
+# rocksdb ssts (hot/warm)
+- name: bluestore_block_db_size
+ type: size
+ level: dev
+ desc: Size of file to create for bluestore_block_db_path
+ default: 0
+ flags:
+ - create
+ with_legacy: true
+- name: bluestore_block_db_create
+ type: bool
+ level: dev
+ desc: Create bluestore_block_db_path if it doesn't exist
+ default: false
+ see_also:
+ - bluestore_block_db_path
+ - bluestore_block_db_size
+ flags:
+ - create
+ with_legacy: true
+- name: bluestore_block_wal_path
+ type: str
+ level: dev
+ desc: Path to block device/file backing bluefs wal
+ flags:
+ - create
+ with_legacy: true
+# rocksdb wal
+- name: bluestore_block_wal_size
+ type: size
+ level: dev
+ desc: Size of file to create for bluestore_block_wal_path
+ default: 96_M
+ flags:
+ - create
+ with_legacy: true
+- name: bluestore_block_wal_create
+ type: bool
+ level: dev
+ desc: Create bluestore_block_wal_path if it doesn't exist
+ default: false
+ see_also:
+ - bluestore_block_wal_path
+ - bluestore_block_wal_size
+ flags:
+ - create
+ with_legacy: true
+# whether preallocate space if block/db_path/wal_path is file rather that block device.
+- name: bluestore_block_preallocate_file
+ type: bool
+ level: dev
+ desc: Preallocate file created via bluestore_block*_create
+ default: false
+ flags:
+ - create
+ with_legacy: true
+- name: bluestore_ignore_data_csum
+ type: bool
+ level: dev
+ desc: Ignore checksum errors on read and do not generate an EIO error
+ default: false
+ flags:
+ - runtime
+ with_legacy: true
+- name: bluestore_csum_type
+ type: str
+ level: advanced
+ desc: Default checksum algorithm to use
+ long_desc: crc32c, xxhash32, and xxhash64 are available. The _16 and _8 variants
+ use only a subset of the bits for more compact (but less reliable) checksumming.
+ fmt_desc: The default checksum algorithm to use.
+ default: crc32c
+ enum_values:
+ - none
+ - crc32c
+ - crc32c_16
+ - crc32c_8
+ - xxhash32
+ - xxhash64
+ flags:
+ - runtime
+ with_legacy: true
+- name: bluestore_retry_disk_reads
+ type: uint
+ level: advanced
+ desc: Number of read retries on checksum validation error
+ long_desc: Retries to read data from the disk this many times when checksum validation
+ fails to handle spurious read errors gracefully.
+ default: 3
+ min: 0
+ max: 255
+ flags:
+ - runtime
+ with_legacy: true
+- name: bluestore_min_alloc_size
+ type: uint
+ level: advanced
+ desc: Minimum allocation size to allocate for an object
+ long_desc: A smaller allocation size generally means less data is read and then
+ rewritten when a copy-on-write operation is triggered (e.g., when writing to something
+ that was recently snapshotted). Similarly, less data is journaled before performing
+ an overwrite (writes smaller than min_alloc_size must first pass through the BlueStore
+ journal). Larger values of min_alloc_size reduce the amount of metadata required
+ to describe the on-disk layout and reduce overall fragmentation.
+ default: 0
+ flags:
+ - create
+ with_legacy: true
+- name: bluestore_min_alloc_size_hdd
+ type: size
+ level: advanced
+ desc: Default min_alloc_size value for rotational media
+ default: 4_K
+ see_also:
+ - bluestore_min_alloc_size
+ flags:
+ - create
+ with_legacy: true
+- name: bluestore_min_alloc_size_ssd
+ type: size
+ level: advanced
+ desc: Default min_alloc_size value for non-rotational (solid state) media
+ default: 4_K
+ see_also:
+ - bluestore_min_alloc_size
+ flags:
+ - create
+ with_legacy: true
+- name: bluestore_use_optimal_io_size_for_min_alloc_size
+ type: bool
+ level: advanced
+ desc: Discover media optimal IO Size and use for min_alloc_size
+ default: false
+ see_also:
+ - bluestore_min_alloc_size
+ flags:
+ - create
+ with_legacy: true
+- name: bluestore_max_alloc_size
+ type: size
+ level: advanced
+ desc: Maximum size of a single allocation (0 for no max)
+ default: 0
+ flags:
+ - create
+ with_legacy: true
+- name: bluestore_prefer_deferred_size
+ type: size
+ level: advanced
+ desc: Writes smaller than this size will be written to the journal and then asynchronously
+ written to the device. This can be beneficial when using rotational media where
+ seeks are expensive, and is helpful both with and without solid state journal/wal
+ devices.
+ default: 0
+ flags:
+ - runtime
+ with_legacy: true
+- name: bluestore_prefer_deferred_size_hdd
+ type: size
+ level: advanced
+ desc: Default bluestore_prefer_deferred_size for rotational media
+ default: 64_K
+ see_also:
+ - bluestore_prefer_deferred_size
+ flags:
+ - runtime
+ with_legacy: true
+- name: bluestore_prefer_deferred_size_ssd
+ type: size
+ level: advanced
+ desc: Default bluestore_prefer_deferred_size for non-rotational (solid state) media
+ default: 0
+ see_also:
+ - bluestore_prefer_deferred_size
+ flags:
+ - runtime
+ with_legacy: true
+- name: bluestore_compression_mode
+ type: str
+ level: advanced
+ desc: Default policy for using compression when pool does not specify
+ long_desc: '''none'' means never use compression. ''passive'' means use compression
+ when clients hint that data is compressible. ''aggressive'' means use compression
+ unless clients hint that data is not compressible. This option is used when the
+ per-pool property for the compression mode is not present.'
+ fmt_desc: The default policy for using compression if the per-pool property
+ ``compression_mode`` is not set. ``none`` means never use
+ compression. ``passive`` means use compression when
+ :c:func:`clients hint <rados_set_alloc_hint>` that data is
+ compressible. ``aggressive`` means use compression unless
+ clients hint that data is not compressible. ``force`` means use
+ compression under all circumstances even if the clients hint that
+ the data is not compressible.
+ default: none
+ enum_values:
+ - none
+ - passive
+ - aggressive
+ - force
+ flags:
+ - runtime
+ with_legacy: true
+- name: bluestore_compression_algorithm
+ type: str
+ level: advanced
+ desc: Default compression algorithm to use when writing object data
+ long_desc: This controls the default compressor to use (if any) if the per-pool
+ property is not set. Note that zstd is *not* recommended for bluestore due to
+ high CPU overhead when compressing small amounts of data.
+ fmt_desc: The default compressor to use (if any) if the per-pool property
+ ``compression_algorithm`` is not set. Note that ``zstd`` is *not*
+ recommended for BlueStore due to high CPU overhead when
+ compressing small amounts of data.
+ default: snappy
+ enum_values:
+ - ''
+ - snappy
+ - zlib
+ - zstd
+ - lz4
+ flags:
+ - runtime
+ with_legacy: true
+- name: bluestore_compression_min_blob_size
+ type: size
+ level: advanced
+ desc: Maximum chunk size to apply compression to when random access is expected
+ for an object.
+ long_desc: Chunks larger than this are broken into smaller chunks before being compressed
+ fmt_desc: Chunks smaller than this are never compressed.
+ The per-pool property ``compression_min_blob_size`` overrides
+ this setting.
+ default: 0
+ flags:
+ - runtime
+ with_legacy: true
+- name: bluestore_compression_min_blob_size_hdd
+ type: size
+ level: advanced
+ desc: Default value of bluestore_compression_min_blob_size for rotational media
+ fmt_desc: Default value of ``bluestore compression min blob size``
+ for rotational media.
+ default: 8_K
+ see_also:
+ - bluestore_compression_min_blob_size
+ flags:
+ - runtime
+ with_legacy: true
+- name: bluestore_compression_min_blob_size_ssd
+ type: size
+ level: advanced
+ desc: Default value of bluestore_compression_min_blob_size for non-rotational (solid
+ state) media
+ fmt_desc: Default value of ``bluestore compression min blob size``
+ for non-rotational (solid state) media.
+ default: 64_K
+ see_also:
+ - bluestore_compression_min_blob_size
+ flags:
+ - runtime
+ with_legacy: true
+- name: bluestore_compression_max_blob_size
+ type: size
+ level: advanced
+ desc: Maximum chunk size to apply compression to when non-random access is expected
+ for an object.
+ long_desc: Chunks larger than this are broken into smaller chunks before being compressed
+ fmt_desc: Chunks larger than this value are broken into smaller blobs of at most
+ ``bluestore_compression_max_blob_size`` bytes before being compressed.
+ The per-pool property ``compression_max_blob_size`` overrides
+ this setting.
+ default: 0
+ flags:
+ - runtime
+ with_legacy: true
+- name: bluestore_compression_max_blob_size_hdd
+ type: size
+ level: advanced
+ desc: Default value of bluestore_compression_max_blob_size for rotational media
+ fmt_desc: Default value of ``bluestore compression max blob size``
+ for rotational media.
+ default: 64_K
+ see_also:
+ - bluestore_compression_max_blob_size
+ flags:
+ - runtime
+ with_legacy: true
+- name: bluestore_compression_max_blob_size_ssd
+ type: size
+ level: advanced
+ desc: Default value of bluestore_compression_max_blob_size for non-rotational (solid
+ state) media
+ fmt_desc: Default value of ``bluestore compression max blob size``
+ for non-rotational (SSD, NVMe) media.
+ default: 64_K
+ see_also:
+ - bluestore_compression_max_blob_size
+ flags:
+ - runtime
+ with_legacy: true
+# Specifies minimum expected amount of saved allocation units
+# per single blob to enable compressed blobs garbage collection
+- name: bluestore_gc_enable_blob_threshold
+ type: int
+ level: dev
+ default: 0
+ flags:
+ - runtime
+ with_legacy: true
+# Specifies minimum expected amount of saved allocation units
+# per all blobsb to enable compressed blobs garbage collection
+- name: bluestore_gc_enable_total_threshold
+ type: int
+ level: dev
+ default: 0
+ flags:
+ - runtime
+ with_legacy: true
+- name: bluestore_max_blob_size
+ type: size
+ level: dev
+ long_desc: Bluestore blobs are collections of extents (ie on-disk data) originating
+ from one or more objects. Blobs can be compressed, typically have checksum data,
+ may be overwritten, may be shared (with an extent ref map), or split. This setting
+ controls the maximum size a blob is allowed to be.
+ default: 0
+ flags:
+ - runtime
+ with_legacy: true
+- name: bluestore_max_blob_size_hdd
+ type: size
+ level: dev
+ default: 64_K
+ see_also:
+ - bluestore_max_blob_size
+ flags:
+ - runtime
+ with_legacy: true
+- name: bluestore_max_blob_size_ssd
+ type: size
+ level: dev
+ default: 64_K
+ see_also:
+ - bluestore_max_blob_size
+ flags:
+ - runtime
+ with_legacy: true
+# Require the net gain of compression at least to be at this ratio,
+# otherwise we don't compress.
+# And ask for compressing at least 12.5%(1/8) off, by default.
+- name: bluestore_compression_required_ratio
+ type: float
+ level: advanced
+ desc: Compression ratio required to store compressed data
+ long_desc: If we compress data and get less than this we discard the result and
+ store the original uncompressed data.
+ fmt_desc: The ratio of the size of the data chunk after
+ compression relative to the original size must be at
+ least this small in order to store the compressed
+ version.
+ default: 0.875
+ flags:
+ - runtime
+ with_legacy: true
+- name: bluestore_extent_map_shard_max_size
+ type: size
+ level: dev
+ desc: Max size (bytes) for a single extent map shard before splitting
+ default: 1200
+ with_legacy: true
+- name: bluestore_extent_map_shard_target_size
+ type: size
+ level: dev
+ desc: Target size (bytes) for a single extent map shard
+ default: 500
+ with_legacy: true
+- name: bluestore_extent_map_shard_min_size
+ type: size
+ level: dev
+ desc: Min size (bytes) for a single extent map shard before merging
+ default: 150
+ with_legacy: true
+- name: bluestore_extent_map_shard_target_size_slop
+ type: float
+ level: dev
+ desc: Ratio above/below target for a shard when trying to align to an existing extent
+ or blob boundary
+ default: 0.2
+ with_legacy: true
+- name: bluestore_extent_map_inline_shard_prealloc_size
+ type: size
+ level: dev
+ desc: Preallocated buffer for inline shards
+ default: 256
+ with_legacy: true
+- name: bluestore_cache_trim_interval
+ type: float
+ level: advanced
+ desc: How frequently we trim the bluestore cache
+ default: 0.05
+ with_legacy: true
+- name: bluestore_cache_trim_max_skip_pinned
+ type: uint
+ level: dev
+ desc: Max pinned cache entries we consider before giving up
+ default: 1000
+ with_legacy: true
+- name: bluestore_cache_type
+ type: str
+ level: dev
+ desc: Cache replacement algorithm
+ default: 2q
+ enum_values:
+ - 2q
+ - lru
+ with_legacy: true
+- name: bluestore_2q_cache_kin_ratio
+ type: float
+ level: dev
+ desc: 2Q paper suggests .5
+ default: 0.5
+ with_legacy: true
+- name: bluestore_2q_cache_kout_ratio
+ type: float
+ level: dev
+ desc: 2Q paper suggests .5
+ default: 0.5
+ with_legacy: true
+- name: bluestore_cache_size
+ type: size
+ level: dev
+ desc: Cache size (in bytes) for BlueStore
+ long_desc: This includes data and metadata cached by BlueStore as well as memory
+ devoted to rocksdb's cache(s).
+ fmt_desc: The amount of memory BlueStore will use for its cache. If zero,
+ ``bluestore_cache_size_hdd`` or ``bluestore_cache_size_ssd`` will
+ be used instead.
+ default: 0
+ with_legacy: true
+- name: bluestore_cache_size_hdd
+ type: size
+ level: dev
+ desc: Default bluestore_cache_size for rotational media
+ fmt_desc: The default amount of memory BlueStore will use for its cache when
+ backed by an HDD.
+ default: 1_G
+ see_also:
+ - bluestore_cache_size
+ with_legacy: true
+- name: bluestore_cache_size_ssd
+ type: size
+ level: dev
+ desc: Default bluestore_cache_size for non-rotational (solid state) media
+ fmt_desc: The default amount of memory BlueStore will use for its cache when
+ backed by an SSD.
+ default: 3_G
+ see_also:
+ - bluestore_cache_size
+ with_legacy: true
+- name: bluestore_cache_meta_ratio
+ type: float
+ level: dev
+ desc: Ratio of bluestore cache to devote to metadata
+ default: 0.45
+ see_also:
+ - bluestore_cache_size
+ with_legacy: true
+- name: bluestore_cache_kv_ratio
+ type: float
+ level: dev
+ desc: Ratio of bluestore cache to devote to key/value database (RocksDB)
+ default: 0.45
+ see_also:
+ - bluestore_cache_size
+ with_legacy: true
+- name: bluestore_cache_kv_onode_ratio
+ type: float
+ level: dev
+ desc: Ratio of bluestore cache to devote to kv onode column family (rocksdb)
+ default: 0.04
+ see_also:
+ - bluestore_cache_size
+- name: bluestore_cache_autotune
+ type: bool
+ level: dev
+ desc: Automatically tune the ratio of caches while respecting min values.
+ fmt_desc: Automatically tune the space ratios assigned to various BlueStore
+ caches while respecting minimum values.
+ default: true
+ see_also:
+ - bluestore_cache_size
+ - bluestore_cache_meta_ratio
+- name: bluestore_cache_autotune_interval
+ type: float
+ level: dev
+ desc: The number of seconds to wait between rebalances when cache autotune is enabled.
+ fmt_desc: |
+ The number of seconds to wait between rebalances when cache autotune is
+ enabled. `bluestore_cache_autotune_interval` sets the speed at which Ceph
+ recomputes the allocation ratios of various caches. Note: Setting this
+ interval too small can result in high CPU usage and lower performance.
+ default: 5
+ see_also:
+ - bluestore_cache_autotune
+- name: bluestore_cache_age_bin_interval
+ type: float
+ level: dev
+ desc: The duration (in seconds) represented by a single cache age bin.
+ fmt_desc: |
+ The caches used by bluestore will assign cache entries to an 'age bin'
+ that represents a period of time during which that cache entry was most
+ recently updated. By binning the caches in this way, Ceph's priority
+ cache balancing code can make better decisions about which caches should
+ receive priority based on the relative ages of items in the caches. By
+ default, a single cache age bin represents 1 second of time. Note:
+ Setting this interval too small can result in high CPU usage and lower
+ performance.
+ default: 1
+ see_also:
+ - bluestore_cache_age_bins_kv
+ - bluestore_cache_age_bins_kv_onode
+ - bluestore_cache_age_bins_meta
+ - bluestore_cache_age_bins_data
+- name: bluestore_cache_age_bins_kv
+ type: str
+ level: dev
+ desc: A 10 element, space separated list of age bins for kv cache
+ fmt_desc: |
+ A 10 element, space separated list of cache age bins grouped by
+ priority such that PRI1=[0,n), PRI2=[n,n+1), PRI3=[n+1,n+2) ...
+ PRI10=[n+8,n+9). Values represent the starting and ending bin for each
+ priority level. A 0 in the 2nd term will prevent any items from being
+ associated with that priority. bin duration is based on the
+ bluestore_cache_age_bin_interval value. For example,
+ "1 5 0 0 0 0 0 0 0 0" defines bin ranges for two priority levels. PRI1
+ contains 1 age bin. Assuming the default age bin interval of 1 second,
+ PRI1 represents cache items that are less than 1 second old. PRI2 has 4
+ bins representing cache items that are 1 to less than 5 seconds old. All
+ other cache items in this example are associated with the lowest priority
+ level as PRI3-PRI10 all have 0s in their second term.
+ default: "1 2 6 24 120 720 0 0 0 0"
+ see_also:
+ - bluestore_cache_age_bin_interval
+- name: bluestore_cache_age_bins_kv_onode
+ type: str
+ level: dev
+ desc: A 10 element, space separated list of age bins for kv onode cache
+ fmt_desc: |
+ A 10 element, space separated list of cache age bins grouped by
+ priority such that PRI1=[0,n), PRI2=[n,n+1), PRI3=[n+1,n+2) ...
+ PRI10=[n+8,n+9). Values represent the starting and ending bin for each
+ priority level. A 0 in the 2nd term will prevent any items from being
+ associated with that priority. bin duration is based on the
+ bluestore_cache_age_bin_interval value. For example,
+ "1 5 0 0 0 0 0 0 0 0" defines bin ranges for two priority levels. PRI1
+ contains 1 age bin. Assuming the default age bin interval of 1 second,
+ PRI1 represents cache items that are less than 1 second old. PRI2 has 4
+ bins representing cache items that are 1 to less than 5 seconds old. All
+ other cache items in this example are associated with the lowest priority
+ level as PRI3-PRI10 all have 0s in their second term.
+ default: "0 0 0 0 0 0 0 0 0 720"
+ see_also:
+ - bluestore_cache_age_bin_interval
+- name: bluestore_cache_age_bins_meta
+ type: str
+ level: dev
+ desc: A 10 element, space separated list of age bins for onode cache
+ fmt_desc: |
+ A 10 element, space separated list of cache age bins grouped by
+ priority such that PRI1=[0,n), PRI2=[n,n+1), PRI3=[n+1,n+2) ...
+ PRI10=[n+8,n+9). Values represent the starting and ending bin for each
+ priority level. A 0 in the 2nd term will prevent any items from being
+ associated with that priority. bin duration is based on the
+ bluestore_cache_age_bin_interval value. For example,
+ "1 5 0 0 0 0 0 0 0 0" defines bin ranges for two priority levels. PRI1
+ contains 1 age bin. Assuming the default age bin interval of 1 second,
+ PRI1 represents cache items that are less than 1 second old. PRI2 has 4
+ bins representing cache items that are 1 to less than 5 seconds old. All
+ other cache items in this example are associated with the lowest priority
+ level as PRI3-PRI10 all have 0s in their second term.
+ default: "1 2 6 24 120 720 0 0 0 0"
+ see_also:
+ - bluestore_cache_age_bin_interval
+- name: bluestore_cache_age_bins_data
+ type: str
+ level: dev
+ desc: A 10 element, space separated list of age bins for data cache
+ fmt_desc: |
+ A 10 element, space separated list of cache age bins grouped by
+ priority such that PRI1=[0,n), PRI2=[n,n+1), PRI3=[n+1,n+2) ...
+ PRI10=[n+8,n+9). Values represent the starting and ending bin for each
+ priority level. A 0 in the 2nd term will prevent any items from being
+ associated with that priority. bin duration is based on the
+ bluestore_cache_age_bin_interval value. For example,
+ "1 5 0 0 0 0 0 0 0 0" defines bin ranges for two priority levels. PRI1
+ contains 1 age bin. Assuming the default age bin interval of 1 second,
+ PRI1 represents cache items that are less than 1 second old. PRI2 has 4
+ bins representing cache items that are 1 to less than 5 seconds old. All
+ other cache items in this example are associated with the lowest priority
+ level as PRI3-PRI10 all have 0s in their second term.
+ default: "1 2 6 24 120 720 0 0 0 0"
+ see_also:
+ - bluestore_cache_age_bin_interval
+- name: bluestore_alloc_stats_dump_interval
+ type: float
+ level: dev
+ desc: The period (in second) for logging allocation statistics.
+ default: 1_day
+ with_legacy: true
+- name: bluestore_kvbackend
+ type: str
+ level: dev
+ desc: Key value database to use for bluestore
+ default: rocksdb
+ flags:
+ - create
+ with_legacy: true
+- name: bluestore_allocator
+ type: str
+ level: advanced
+ desc: Allocator policy
+ long_desc: Allocator to use for bluestore. Stupid should only be used for testing.
+ default: hybrid
+ enum_values:
+ - bitmap
+ - stupid
+ - avl
+ - hybrid
+ - zoned
+ with_legacy: true
+- name: bluestore_freelist_blocks_per_key
+ type: size
+ level: dev
+ desc: Block (and bits) per database key
+ default: 128
+ with_legacy: true
+- name: bluestore_bitmapallocator_blocks_per_zone
+ type: size
+ level: dev
+ default: 1_K
+ with_legacy: true
+- name: bluestore_bitmapallocator_span_size
+ type: size
+ level: dev
+ default: 1_K
+ with_legacy: true
+- name: bluestore_max_deferred_txc
+ type: uint
+ level: advanced
+ desc: Max transactions with deferred writes that can accumulate before we force
+ flush deferred writes
+ default: 32
+ with_legacy: true
+- name: bluestore_max_defer_interval
+ type: float
+ level: advanced
+ desc: max duration to force deferred submit
+ default: 3
+ with_legacy: true
+- name: bluestore_rocksdb_options
+ type: str
+ level: advanced
+ desc: Full set of rocksdb settings to override
+ default: compression=kNoCompression,max_write_buffer_number=64,min_write_buffer_number_to_merge=6,compaction_style=kCompactionStyleLevel,write_buffer_size=16777216,max_background_jobs=4,level0_file_num_compaction_trigger=8,max_bytes_for_level_base=1073741824,max_bytes_for_level_multiplier=8,compaction_readahead_size=2MB,max_total_wal_size=1073741824,writable_file_max_buffer_size=0
+ with_legacy: true
+- name: bluestore_rocksdb_options_annex
+ type: str
+ level: advanced
+ desc: An addition to bluestore_rocksdb_options. Allows setting rocksdb options without
+ repeating the existing defaults.
+ with_legacy: true
+- name: bluestore_rocksdb_cf
+ type: bool
+ level: advanced
+ desc: Enable use of rocksdb column families for bluestore metadata
+ fmt_desc: Enables sharding of BlueStore's RocksDB.
+ When ``true``, ``bluestore_rocksdb_cfs`` is used.
+ Only applied when OSD is doing ``--mkfs``.
+ default: true
+ verbatim: |
+ #ifdef WITH_SEASTAR
+ // This is necessary as the Seastar's allocator imposes restrictions
+ // on the number of threads that entered malloc/free/*. Unfortunately,
+ // RocksDB sharding in BlueStore dramatically lifted the number of
+ // threads spawn during RocksDB's init.
+ .set_validator([](std::string *value, std::string *error_message) {
+ if (const bool parsed_value = strict_strtob(value->c_str(), error_message);
+ error_message->empty() && parsed_value) {
+ *error_message = "invalid BlueStore sharding configuration."
+ " Be aware any change takes effect only on mkfs!";
+ return -EINVAL;
+ } else {
+ return 0;
+ }
+ })
+ #endif
+- name: bluestore_rocksdb_cfs
+ type: str
+ level: dev
+ desc: Definition of column families and their sharding
+ long_desc: 'Space separated list of elements: column_def [ ''='' rocksdb_options
+ ]. column_def := column_name [ ''('' shard_count [ '','' hash_begin ''-'' [ hash_end
+ ] ] '')'' ]. Example: ''I=write_buffer_size=1048576 O(6) m(7,10-)''. Interval
+ [hash_begin..hash_end) defines characters to use for hash calculation. Recommended
+ hash ranges: O(0-13) P(0-8) m(0-16). Sharding of S,T,C,M,B prefixes is inadvised'
+ fmt_desc: Definition of BlueStore's RocksDB sharding.
+ The optimal value depends on multiple factors, and modification is invadvisable.
+ This setting is used only when OSD is doing ``--mkfs``.
+ Next runs of OSD retrieve sharding from disk.
+ default: m(3) p(3,0-12) O(3,0-13)=block_cache={type=binned_lru} L=min_write_buffer_number_to_merge=32 P=min_write_buffer_number_to_merge=32
+- name: bluestore_qfsck_on_mount
+ type: bool
+ level: dev
+ desc: Run quick-fsck at mount comparing allocation-file to RocksDB allocation state
+ default: true
+ with_legacy: true
+- name: bluestore_fsck_on_mount
+ type: bool
+ level: dev
+ desc: Run fsck at mount
+ default: false
+ with_legacy: true
+- name: bluestore_fsck_on_mount_deep
+ type: bool
+ level: dev
+ desc: Run deep fsck at mount when bluestore_fsck_on_mount is set to true
+ default: false
+ with_legacy: true
+- name: bluestore_fsck_quick_fix_on_mount
+ type: bool
+ level: dev
+ desc: Do quick-fix for the store at mount
+ default: false
+ with_legacy: true
+- name: bluestore_fsck_on_umount
+ type: bool
+ level: dev
+ desc: Run fsck at umount
+ default: false
+ with_legacy: true
+- name: bluestore_allocation_from_file
+ type: bool
+ level: dev
+ desc: Remove allocation info from RocksDB and store the info in a new allocation file
+ default: true
+ with_legacy: true
+- name: bluestore_debug_inject_allocation_from_file_failure
+ type: float
+ level: dev
+ desc: Enables random error injections when restoring allocation map from file.
+ long_desc: Specifies error injection probability for restoring allocation map from file
+ hence causing full recovery. Intended primarily for testing.
+ default: 0
+ with_legacy: true
+- name: bluestore_fsck_on_umount_deep
+ type: bool
+ level: dev
+ desc: Run deep fsck at umount when bluestore_fsck_on_umount is set to true
+ default: false
+ with_legacy: true
+- name: bluestore_fsck_on_mkfs
+ type: bool
+ level: dev
+ desc: Run fsck after mkfs
+ default: true
+ with_legacy: true
+- name: bluestore_fsck_on_mkfs_deep
+ type: bool
+ level: dev
+ desc: Run deep fsck after mkfs
+ default: false
+ with_legacy: true
+- name: bluestore_sync_submit_transaction
+ type: bool
+ level: dev
+ desc: Try to submit metadata transaction to rocksdb in queuing thread context
+ default: false
+ with_legacy: true
+- name: bluestore_fsck_read_bytes_cap
+ type: size
+ level: advanced
+ desc: Maximum bytes read at once by deep fsck
+ default: 64_M
+ flags:
+ - runtime
+ with_legacy: true
+- name: bluestore_fsck_quick_fix_threads
+ type: int
+ level: advanced
+ desc: Number of additional threads to perform quick-fix (shallow fsck) command
+ default: 2
+ with_legacy: true
+- name: bluestore_fsck_shared_blob_tracker_size
+ type: float
+ level: dev
+ desc: Size(a fraction of osd_memory_target, defaults to 128MB) of a hash table to track shared blobs ref counts. Higher the size, more precise is the tracker -> less overhead during the repair.
+ default: 0.03125
+ see_also:
+ - osd_memory_target
+ flags:
+ - runtime
+- name: bluestore_throttle_bytes
+ type: size
+ level: advanced
+ desc: Maximum bytes in flight before we throttle IO submission
+ default: 64_M
+ flags:
+ - runtime
+ with_legacy: true
+- name: bluestore_throttle_deferred_bytes
+ type: size
+ level: advanced
+ desc: Maximum bytes for deferred writes before we throttle IO submission
+ default: 128_M
+ flags:
+ - runtime
+ with_legacy: true
+- name: bluestore_throttle_cost_per_io
+ type: size
+ level: advanced
+ desc: Overhead added to transaction cost (in bytes) for each IO
+ default: 0
+ flags:
+ - runtime
+ with_legacy: true
+- name: bluestore_throttle_cost_per_io_hdd
+ type: uint
+ level: advanced
+ desc: Default bluestore_throttle_cost_per_io for rotational media
+ default: 670000
+ see_also:
+ - bluestore_throttle_cost_per_io
+ flags:
+ - runtime
+ with_legacy: true
+- name: bluestore_throttle_cost_per_io_ssd
+ type: uint
+ level: advanced
+ desc: Default bluestore_throttle_cost_per_io for non-rotation (solid state) media
+ default: 4000
+ see_also:
+ - bluestore_throttle_cost_per_io
+ flags:
+ - runtime
+ with_legacy: true
+- name: bluestore_deferred_batch_ops
+ type: uint
+ level: advanced
+ desc: Max number of deferred writes before we flush the deferred write queue
+ default: 0
+ min: 0
+ max: 65535
+ flags:
+ - runtime
+ with_legacy: true
+- name: bluestore_deferred_batch_ops_hdd
+ type: uint
+ level: advanced
+ desc: Default bluestore_deferred_batch_ops for rotational media
+ default: 64
+ see_also:
+ - bluestore_deferred_batch_ops
+ min: 0
+ max: 65535
+ flags:
+ - runtime
+ with_legacy: true
+- name: bluestore_deferred_batch_ops_ssd
+ type: uint
+ level: advanced
+ desc: Default bluestore_deferred_batch_ops for non-rotational (solid state) media
+ default: 16
+ see_also:
+ - bluestore_deferred_batch_ops
+ min: 0
+ max: 65535
+ flags:
+ - runtime
+ with_legacy: true
+- name: bluestore_nid_prealloc
+ type: int
+ level: dev
+ desc: Number of unique object ids to preallocate at a time
+ default: 1024
+ with_legacy: true
+- name: bluestore_blobid_prealloc
+ type: uint
+ level: dev
+ desc: Number of unique blob ids to preallocate at a time
+ default: 10_K
+ with_legacy: true
+- name: bluestore_clone_cow
+ type: bool
+ level: advanced
+ desc: Use copy-on-write when cloning objects (versus reading and rewriting them
+ at clone time)
+ default: true
+ flags:
+ - runtime
+ with_legacy: true
+- name: bluestore_default_buffered_read
+ type: bool
+ level: advanced
+ desc: Cache read results by default (unless hinted NOCACHE or WONTNEED)
+ default: true
+ flags:
+ - runtime
+ with_legacy: true
+- name: bluestore_default_buffered_write
+ type: bool
+ level: advanced
+ desc: Cache writes by default (unless hinted NOCACHE or WONTNEED)
+ default: false
+ flags:
+ - runtime
+ with_legacy: true
+- name: bluestore_debug_no_reuse_blocks
+ type: bool
+ level: dev
+ default: false
+ with_legacy: true
+- name: bluestore_debug_small_allocations
+ type: int
+ level: dev
+ default: 0
+ with_legacy: true
+- name: bluestore_debug_too_many_blobs_threshold
+ type: int
+ level: dev
+ default: 24576
+ with_legacy: true
+- name: bluestore_debug_freelist
+ type: bool
+ level: dev
+ default: false
+ with_legacy: true
+- name: bluestore_debug_prefill
+ type: float
+ level: dev
+ desc: simulate fragmentation
+ default: 0
+ with_legacy: true
+- name: bluestore_debug_prefragment_max
+ type: size
+ level: dev
+ default: 1_M
+ with_legacy: true
+- name: bluestore_debug_inject_read_err
+ type: bool
+ level: dev
+ default: false
+ with_legacy: true
+- name: bluestore_debug_randomize_serial_transaction
+ type: int
+ level: dev
+ default: 0
+ with_legacy: true
+- name: bluestore_debug_omit_block_device_write
+ type: bool
+ level: dev
+ default: false
+ with_legacy: true
+- name: bluestore_debug_fsck_abort
+ type: bool
+ level: dev
+ default: false
+ with_legacy: true
+- name: bluestore_debug_omit_kv_commit
+ type: bool
+ level: dev
+ default: false
+ with_legacy: true
+- name: bluestore_debug_permit_any_bdev_label
+ type: bool
+ level: dev
+ default: false
+ with_legacy: true
+- name: bluestore_debug_random_read_err
+ type: float
+ level: dev
+ default: 0
+ with_legacy: true
+- name: bluestore_debug_inject_bug21040
+ type: bool
+ level: dev
+ default: false
+ with_legacy: true
+- name: bluestore_debug_inject_csum_err_probability
+ type: float
+ level: dev
+ desc: inject crc verification errors into bluestore device reads
+ default: 0
+ with_legacy: true
+- name: bluestore_debug_legacy_omap
+ type: bool
+ level: dev
+ desc: Allows mkfs to create OSD in legacy OMAP naming mode (neither per-pool nor per-pg).
+ This is intended primarily for developers' purposes. The resulting OSD might/would
+ be transformed to the currrently default 'per-pg' format when BlueStore's quick-fix or
+ repair are applied.
+ default: false
+ with_legacy: true
+- name: bluestore_fsck_error_on_no_per_pool_stats
+ type: bool
+ level: advanced
+ desc: Make fsck error (instead of warn) when bluestore lacks per-pool stats, e.g.,
+ after an upgrade
+ default: false
+ with_legacy: true
+- name: bluestore_warn_on_bluefs_spillover
+ type: bool
+ level: advanced
+ desc: Enable health indication on bluefs slow device usage
+ default: true
+ with_legacy: true
+- name: bluestore_warn_on_legacy_statfs
+ type: bool
+ level: advanced
+ desc: Enable health indication on lack of per-pool statfs reporting from bluestore
+ default: true
+ with_legacy: true
+- name: bluestore_warn_on_spurious_read_errors
+ type: bool
+ level: advanced
+ desc: Enable health indication when spurious read errors are observed by OSD
+ default: true
+ with_legacy: true
+- name: bluestore_fsck_error_on_no_per_pool_omap
+ type: bool
+ level: advanced
+ desc: Make fsck error (instead of warn) when objects without per-pool omap are found
+ default: false
+ with_legacy: true
+- name: bluestore_fsck_error_on_no_per_pg_omap
+ type: bool
+ level: advanced
+ desc: Make fsck error (instead of warn) when objects without per-pg omap are found
+ default: false
+ with_legacy: true
+- name: bluestore_warn_on_no_per_pool_omap
+ type: bool
+ level: advanced
+ desc: Enable health indication on lack of per-pool omap
+ default: true
+ with_legacy: true
+- name: bluestore_warn_on_no_per_pg_omap
+ type: bool
+ level: advanced
+ desc: Enable health indication on lack of per-pg omap
+ default: false
+ with_legacy: true
+- name: bluestore_log_op_age
+ type: float
+ level: advanced
+ desc: log operation if it's slower than this age (seconds)
+ default: 5
+ with_legacy: true
+- name: bluestore_log_omap_iterator_age
+ type: float
+ level: advanced
+ desc: log omap iteration operation if it's slower than this age (seconds)
+ default: 5
+ with_legacy: true
+- name: bluestore_log_collection_list_age
+ type: float
+ level: advanced
+ desc: log collection list operation if it's slower than this age (seconds)
+ default: 1_min
+ with_legacy: true
+- name: bluestore_debug_enforce_settings
+ type: str
+ level: dev
+ desc: Enforces specific hw profile settings
+ long_desc: '''hdd'' enforces settings intended for BlueStore above a rotational
+ drive. ''ssd'' enforces settings intended for BlueStore above a solid drive. ''default''
+ - using settings for the actual hardware.'
+ default: default
+ enum_values:
+ - default
+ - hdd
+ - ssd
+ with_legacy: true
+- name: bluestore_avl_alloc_ff_max_search_count
+ type: uint
+ level: dev
+ desc: Search for this many ranges in first-fit mode before switching over to
+ to best-fit mode. 0 to iterate through all ranges for required chunk.
+ default: 100
+- name: bluestore_avl_alloc_ff_max_search_bytes
+ type: size
+ level: dev
+ desc: Maximum distance to search in first-fit mode before switching over to
+ to best-fit mode. 0 to iterate through all ranges for required chunk.
+ default: 16_M
+- name: bluestore_avl_alloc_bf_threshold
+ type: uint
+ level: dev
+ desc: Sets threshold at which shrinking max free chunk size triggers enabling best-fit
+ mode.
+ long_desc: 'AVL allocator works in two modes: near-fit and best-fit. By default,
+ it uses very fast near-fit mode, in which it tries to fit a new block near the
+ last allocated block of similar size. The second mode is much slower best-fit
+ mode, in which it tries to find an exact match for the requested allocation. This
+ mode is used when either the device gets fragmented or when it is low on free
+ space. When the largest free block is smaller than ''bluestore_avl_alloc_bf_threshold'',
+ best-fit mode is used.'
+ default: 128_K
+ see_also:
+ - bluestore_avl_alloc_bf_free_pct
+- name: bluestore_avl_alloc_bf_free_pct
+ type: uint
+ level: dev
+ desc: Sets threshold at which shrinking free space (in %, integer) triggers enabling
+ best-fit mode.
+ long_desc: 'AVL allocator works in two modes: near-fit and best-fit. By default,
+ it uses very fast near-fit mode, in which it tries to fit a new block near the
+ last allocated block of similar size. The second mode is much slower best-fit
+ mode, in which it tries to find an exact match for the requested allocation. This
+ mode is used when either the device gets fragmented or when it is low on free
+ space. When free space is smaller than ''bluestore_avl_alloc_bf_free_pct'', best-fit
+ mode is used.'
+ default: 4
+ see_also:
+ - bluestore_avl_alloc_bf_threshold
+- name: bluestore_hybrid_alloc_mem_cap
+ type: uint
+ level: dev
+ desc: Maximum RAM hybrid allocator should use before enabling bitmap supplement
+ default: 64_M
+- name: bluestore_volume_selection_policy
+ type: str
+ level: dev
+ desc: Determines bluefs volume selection policy
+ long_desc: Determines bluefs volume selection policy. 'use_some_extra*' policy allows
+ to override RocksDB level granularity and put high level's data to faster device
+ even when the level doesn't completely fit there. 'fit_to_fast' policy enables
+ using 100% of faster disk capacity and allows the user to turn on 'level_compaction_dynamic_level_bytes'
+ option in RocksDB options.
+ default: use_some_extra
+ enum_values:
+ - rocksdb_original
+ - use_some_extra
+ - use_some_extra_enforced
+ - fit_to_fast
+ with_legacy: true
+- name: bluestore_volume_selection_reserved_factor
+ type: float
+ level: advanced
+ desc: DB level size multiplier. Determines amount of space at DB device to bar from
+ the usage when 'use some extra' policy is in action. Reserved size is determined
+ as sum(L_max_size[0], L_max_size[L-1]) + L_max_size[L] * this_factor
+ default: 2
+ flags:
+ - startup
+ with_legacy: true
+- name: bluestore_volume_selection_reserved
+ type: int
+ level: advanced
+ desc: Space reserved at DB device and not allowed for 'use some extra' policy usage.
+ Overrides 'bluestore_volume_selection_reserved_factor' setting and introduces
+ straightforward limit.
+ default: 0
+ flags:
+ - startup
+ with_legacy: true
+- name: bdev_ioring
+ type: bool
+ level: advanced
+ desc: Enables Linux io_uring API instead of libaio
+ default: false
+- name: bdev_ioring_hipri
+ type: bool
+ level: advanced
+ desc: Enables Linux io_uring API Use polled IO completions
+ default: false
+- name: bdev_ioring_sqthread_poll
+ type: bool
+ level: advanced
+ desc: Enables Linux io_uring API Offload submission/completion to kernel thread
+ default: false
+- name: bluestore_kv_sync_util_logging_s
+ type: float
+ level: advanced
+ desc: KV sync thread utilization logging period
+ long_desc: How often (in seconds) to print KV sync thread utilization, not logged
+ when set to 0 or when utilization is 0%
+ default: 10
+ flags:
+ - runtime
+ with_legacy: true
+- name: bluestore_fail_eio
+ type: bool
+ level: dev
+ desc: fail/crash on EIO
+ long_desc: whether bluestore osd fails on eio
+ default: false
+ flags:
+ - runtime
+ with_legacy: true
+- name: bluestore_zero_block_detection
+ type: bool
+ level: dev
+ desc: punch holes instead of writing zeros
+ long_desc: Intended for large-scale synthetic testing. Currently this is implemented
+ with punch hole semantics, affecting the logical extent map of the object. This does
+ not interact well with some RBD and CephFS features.
+ default: false
+ flags:
+ - runtime
+ with_legacy: true
+- name: kstore_max_ops
+ type: uint
+ level: advanced
+ default: 512
+ with_legacy: true
+- name: kstore_max_bytes
+ type: size
+ level: advanced
+ default: 64_M
+ with_legacy: true
+- name: kstore_backend
+ type: str
+ level: advanced
+ default: rocksdb
+ with_legacy: true
+- name: kstore_rocksdb_options
+ type: str
+ level: advanced
+ desc: Options to pass through when RocksDB is used as the KeyValueDB for kstore.
+ default: compression=kNoCompression
+ with_legacy: true
+- name: kstore_fsck_on_mount
+ type: bool
+ level: advanced
+ desc: Whether or not to run fsck on mount for kstore.
+ default: false
+ with_legacy: true
+- name: kstore_fsck_on_mount_deep
+ type: bool
+ level: advanced
+ desc: Whether or not to run deep fsck on mount for kstore
+ default: true
+ with_legacy: true
+- name: kstore_nid_prealloc
+ type: uint
+ level: advanced
+ default: 1_K
+ with_legacy: true
+- name: kstore_sync_transaction
+ type: bool
+ level: advanced
+ default: false
+ with_legacy: true
+- name: kstore_sync_submit_transaction
+ type: bool
+ level: advanced
+ default: false
+ with_legacy: true
+- name: kstore_onode_map_size
+ type: uint
+ level: advanced
+ default: 1_K
+ with_legacy: true
+- name: kstore_default_stripe_size
+ type: size
+ level: advanced
+ default: 64_K
+ with_legacy: true
+# rocksdb options that will be used for omap(if omap_backend is rocksdb)
+- name: filestore_rocksdb_options
+ type: str
+ level: dev
+ desc: Options to pass through when RocksDB is used as the KeyValueDB for filestore.
+ default: max_background_jobs=10,compaction_readahead_size=2097152,compression=kNoCompression
+ with_legacy: true
+- name: filestore_omap_backend
+ type: str
+ level: dev
+ desc: The KeyValueDB to use for filestore metadata (ie omap).
+ default: rocksdb
+ enum_values:
+ - leveldb
+ - rocksdb
+ with_legacy: true
+- name: filestore_omap_backend_path
+ type: str
+ level: dev
+ desc: The path where the filestore KeyValueDB should store it's database(s).
+ with_legacy: true
+# filestore wb throttle limits
+- name: filestore_wbthrottle_enable
+ type: bool
+ level: advanced
+ desc: Enabling throttling of operations to backing file system
+ default: true
+ with_legacy: true
+- name: filestore_wbthrottle_btrfs_bytes_start_flusher
+ type: size
+ level: advanced
+ desc: Start flushing (fsyncing) when this many bytes are written(btrfs)
+ default: 40_M
+ with_legacy: true
+- name: filestore_wbthrottle_btrfs_bytes_hard_limit
+ type: size
+ level: advanced
+ desc: Block writes when this many bytes haven't been flushed (fsynced) (btrfs)
+ default: 400_M
+ with_legacy: true
+- name: filestore_wbthrottle_btrfs_ios_start_flusher
+ type: uint
+ level: advanced
+ desc: Start flushing (fsyncing) when this many IOs are written (brtrfs)
+ default: 500
+ with_legacy: true
+- name: filestore_wbthrottle_btrfs_ios_hard_limit
+ type: uint
+ level: advanced
+ desc: Block writes when this many IOs haven't been flushed (fsynced) (btrfs)
+ default: 5000
+ with_legacy: true
+- name: filestore_wbthrottle_btrfs_inodes_start_flusher
+ type: uint
+ level: advanced
+ desc: Start flushing (fsyncing) when this many distinct inodes have been modified
+ (btrfs)
+ default: 500
+ with_legacy: true
+- name: filestore_wbthrottle_xfs_bytes_start_flusher
+ type: size
+ level: advanced
+ desc: Start flushing (fsyncing) when this many bytes are written(xfs)
+ default: 40_M
+ with_legacy: true
+- name: filestore_wbthrottle_xfs_bytes_hard_limit
+ type: size
+ level: advanced
+ desc: Block writes when this many bytes haven't been flushed (fsynced) (xfs)
+ default: 400_M
+ with_legacy: true
+- name: filestore_wbthrottle_xfs_ios_start_flusher
+ type: uint
+ level: advanced
+ desc: Start flushing (fsyncing) when this many IOs are written (xfs)
+ default: 500
+ with_legacy: true
+- name: filestore_wbthrottle_xfs_ios_hard_limit
+ type: uint
+ level: advanced
+ desc: Block writes when this many IOs haven't been flushed (fsynced) (xfs)
+ default: 5000
+ with_legacy: true
+- name: filestore_wbthrottle_xfs_inodes_start_flusher
+ type: uint
+ level: advanced
+ desc: Start flushing (fsyncing) when this many distinct inodes have been modified
+ (xfs)
+ default: 500
+ with_legacy: true
+# These must be less than the fd limit
+- name: filestore_wbthrottle_btrfs_inodes_hard_limit
+ type: uint
+ level: advanced
+ desc: Block writing when this many inodes have outstanding writes (btrfs)
+ default: 5000
+ with_legacy: true
+- name: filestore_wbthrottle_xfs_inodes_hard_limit
+ type: uint
+ level: advanced
+ desc: Block writing when this many inodes have outstanding writes (xfs)
+ default: 5000
+ with_legacy: true
+# Introduce a O_DSYNC write in the filestore
+- name: filestore_odsync_write
+ type: bool
+ level: dev
+ desc: Write with O_DSYNC
+ default: false
+ with_legacy: true
+# Tests index failure paths
+- name: filestore_index_retry_probability
+ type: float
+ level: dev
+ default: 0
+ with_legacy: true
+# Allow object read error injection
+- name: filestore_debug_inject_read_err
+ type: bool
+ level: dev
+ default: false
+ with_legacy: true
+- name: filestore_debug_random_read_err
+ type: float
+ level: dev
+ default: 0
+ with_legacy: true
+# Expensive debugging check on sync
+- name: filestore_debug_omap_check
+ type: bool
+ level: dev
+ default: false
+ fmt_desc: Debugging check on synchronization. This is an expensive operation.
+
+ with_legacy: true
+- name: filestore_omap_header_cache_size
+ type: size
+ level: dev
+ default: 1_K
+ with_legacy: true
+# Use omap for xattrs for attrs over
+# filestore_max_inline_xattr_size or
+- name: filestore_max_inline_xattr_size
+ type: size
+ level: dev
+ default: 0
+ with_legacy: true
+- name: filestore_max_inline_xattr_size_xfs
+ type: size
+ level: dev
+ default: 64_K
+ with_legacy: true
+- name: filestore_max_inline_xattr_size_btrfs
+ type: size
+ level: dev
+ default: 2_K
+ with_legacy: true
+- name: filestore_max_inline_xattr_size_other
+ type: size
+ level: dev
+ default: 512
+ with_legacy: true
+# for more than filestore_max_inline_xattrs attrs
+- name: filestore_max_inline_xattrs
+ type: uint
+ level: dev
+ default: 0
+ with_legacy: true
+- name: filestore_max_inline_xattrs_xfs
+ type: uint
+ level: dev
+ default: 10
+ with_legacy: true
+- name: filestore_max_inline_xattrs_btrfs
+ type: uint
+ level: dev
+ default: 10
+ with_legacy: true
+- name: filestore_max_inline_xattrs_other
+ type: uint
+ level: dev
+ default: 2
+ with_legacy: true
+- name: filestore_max_xattr_value_size
+ type: size
+ level: dev
+ default: 0
+ with_legacy: true
+- name: filestore_max_xattr_value_size_xfs
+ type: size
+ level: dev
+ default: 64_K
+ with_legacy: true
+- name: filestore_max_xattr_value_size_btrfs
+ type: size
+ level: dev
+ default: 64_K
+ with_legacy: true
+# ext4 allows 4k xattrs total including some smallish extra fields and the
+# keys. We're allowing 2 512 inline attrs in addition some some filestore
+# replay attrs. After accounting for those, we still need to fit up to
+# two attrs of this value. That means we need this value to be around 1k
+# to be safe. This is hacky, but it's not worth complicating the code
+# to work around ext4's total xattr limit.
+- name: filestore_max_xattr_value_size_other
+ type: size
+ level: dev
+ default: 1_K
+ with_legacy: true
+# track sloppy crcs
+- name: filestore_sloppy_crc
+ type: bool
+ level: dev
+ default: false
+ with_legacy: true
+- name: filestore_sloppy_crc_block_size
+ type: size
+ level: dev
+ default: 64_K
+ with_legacy: true
+- name: filestore_max_alloc_hint_size
+ type: size
+ level: dev
+ default: 1_M
+ with_legacy: true
+# seconds
+- name: filestore_max_sync_interval
+ type: float
+ level: advanced
+ desc: Period between calls to syncfs(2) and journal trims (seconds)
+ default: 5
+ with_legacy: true
+# seconds
+- name: filestore_min_sync_interval
+ type: float
+ level: dev
+ desc: Minimum period between calls to syncfs(2)
+ default: 0.01
+ with_legacy: true
+- name: filestore_btrfs_snap
+ type: bool
+ level: dev
+ default: true
+ with_legacy: true
+- name: filestore_btrfs_clone_range
+ type: bool
+ level: advanced
+ desc: Use btrfs clone_range ioctl to efficiently duplicate objects
+ default: true
+ with_legacy: true
+# zfsonlinux is still unstable
+- name: filestore_zfs_snap
+ type: bool
+ level: dev
+ default: false
+ with_legacy: true
+- name: filestore_fsync_flushes_journal_data
+ type: bool
+ level: dev
+ default: false
+ with_legacy: true
+# (try to) use fiemap
+- name: filestore_fiemap
+ type: bool
+ level: advanced
+ desc: Use fiemap ioctl(2) to determine which parts of objects are sparse
+ default: false
+ with_legacy: true
+- name: filestore_punch_hole
+ type: bool
+ level: advanced
+ desc: Use fallocate(2) FALLOC_FL_PUNCH_HOLE to efficiently zero ranges of objects
+ default: false
+ with_legacy: true
+# (try to) use seek_data/hole
+- name: filestore_seek_data_hole
+ type: bool
+ level: advanced
+ desc: Use lseek(2) SEEK_HOLE and SEEK_DATA to determine which parts of objects are
+ sparse
+ default: false
+ with_legacy: true
+- name: filestore_splice
+ type: bool
+ level: advanced
+ desc: Use splice(2) to more efficiently copy data between files
+ default: false
+ with_legacy: true
+- name: filestore_fadvise
+ type: bool
+ level: advanced
+ desc: Use posix_fadvise(2) to pass hints to file system
+ default: true
+ with_legacy: true
+# collect device partition information for management application to use
+- name: filestore_collect_device_partition_information
+ type: bool
+ level: advanced
+ desc: Collect metadata about the backing file system on OSD startup
+ default: true
+ with_legacy: true
+# (try to) use extsize for alloc hint NOTE: extsize seems to trigger
+# data corruption in xfs prior to kernel 3.5. filestore will
+# implicitly disable this if it cannot confirm the kernel is newer
+# than that.
+# NOTE: This option involves a tradeoff: When disabled, fragmentation is
+# worse, but large sequential writes are faster. When enabled, large
+# sequential writes are slower, but fragmentation is reduced.
+- name: filestore_xfs_extsize
+ type: bool
+ level: advanced
+ desc: Use XFS extsize ioctl(2) to hint allocator about expected write sizes
+ default: false
+ with_legacy: true
+- name: filestore_journal_parallel
+ type: bool
+ level: dev
+ default: false
+ with_legacy: true
+- name: filestore_journal_writeahead
+ type: bool
+ level: dev
+ default: false
+ with_legacy: true
+- name: filestore_journal_trailing
+ type: bool
+ level: dev
+ default: false
+ with_legacy: true
+- name: filestore_queue_max_ops
+ type: uint
+ level: advanced
+ desc: Max IO operations in flight
+ default: 50
+ with_legacy: true
+- name: filestore_queue_max_bytes
+ type: size
+ level: advanced
+ desc: Max (written) bytes in flight
+ default: 100_M
+ with_legacy: true
+- name: filestore_caller_concurrency
+ type: int
+ level: dev
+ default: 10
+ with_legacy: true
+# Expected filestore throughput in B/s
+- name: filestore_expected_throughput_bytes
+ type: float
+ level: advanced
+ desc: Expected throughput of backend device (aids throttling calculations)
+ default: 209715200
+ with_legacy: true
+# Expected filestore throughput in ops/s
+- name: filestore_expected_throughput_ops
+ type: float
+ level: advanced
+ desc: Expected through of backend device in IOPS (aids throttling calculations)
+ default: 200
+ with_legacy: true
+# Filestore max delay multiple. Defaults to 0 (disabled)
+- name: filestore_queue_max_delay_multiple
+ type: float
+ level: dev
+ default: 0
+ with_legacy: true
+# Filestore high delay multiple. Defaults to 0 (disabled)
+- name: filestore_queue_high_delay_multiple
+ type: float
+ level: dev
+ default: 0
+ with_legacy: true
+# Filestore max delay multiple ops. Defaults to 0 (disabled)
+- name: filestore_queue_max_delay_multiple_bytes
+ type: float
+ level: dev
+ default: 0
+ with_legacy: true
+# Filestore high delay multiple bytes. Defaults to 0 (disabled)
+- name: filestore_queue_high_delay_multiple_bytes
+ type: float
+ level: dev
+ default: 0
+ with_legacy: true
+# Filestore max delay multiple ops. Defaults to 0 (disabled)
+- name: filestore_queue_max_delay_multiple_ops
+ type: float
+ level: dev
+ default: 0
+ with_legacy: true
+# Filestore high delay multiple ops. Defaults to 0 (disabled)
+- name: filestore_queue_high_delay_multiple_ops
+ type: float
+ level: dev
+ default: 0
+ with_legacy: true
+- name: filestore_queue_low_threshhold
+ type: float
+ level: dev
+ default: 0.3
+ with_legacy: true
+- name: filestore_queue_high_threshhold
+ type: float
+ level: dev
+ with_legacy: true
+ default: 0.9
+- name: filestore_op_threads
+ type: int
+ level: advanced
+ desc: Threads used to apply changes to backing file system
+ default: 2
+ with_legacy: true
+- name: filestore_op_thread_timeout
+ type: int
+ level: advanced
+ desc: Seconds before a worker thread is considered stalled
+ default: 1_min
+ with_legacy: true
+- name: filestore_op_thread_suicide_timeout
+ type: int
+ level: advanced
+ desc: Seconds before a worker thread is considered dead
+ default: 3_min
+ with_legacy: true
+- name: filestore_commit_timeout
+ type: float
+ level: advanced
+ desc: Seconds before backing file system is considered hung
+ default: 10_min
+ with_legacy: true
+- name: filestore_fiemap_threshold
+ type: size
+ level: dev
+ default: 4_K
+ with_legacy: true
+- name: filestore_merge_threshold
+ type: int
+ level: dev
+ default: -10
+ with_legacy: true
+- name: filestore_split_multiple
+ type: int
+ level: dev
+ default: 2
+ with_legacy: true
+- name: filestore_split_rand_factor
+ type: uint
+ level: dev
+ default: 20
+ with_legacy: true
+- name: filestore_update_to
+ type: int
+ level: dev
+ default: 1000
+ with_legacy: true
+- name: filestore_blackhole
+ type: bool
+ level: dev
+ default: false
+ with_legacy: true
+- name: filestore_fd_cache_size
+ type: int
+ level: dev
+ default: 128
+ with_legacy: true
+- name: filestore_fd_cache_shards
+ type: int
+ level: dev
+ default: 16
+ with_legacy: true
+- name: filestore_ondisk_finisher_threads
+ type: int
+ level: dev
+ default: 1
+ with_legacy: true
+- name: filestore_apply_finisher_threads
+ type: int
+ level: dev
+ default: 1
+ with_legacy: true
+# file onto which store transaction dumps
+- name: filestore_dump_file
+ type: str
+ level: dev
+ with_legacy: true
+# inject a failure at the n'th opportunity
+- name: filestore_kill_at
+ type: int
+ level: dev
+ default: 0
+ with_legacy: true
+# artificially stall for N seconds in op queue thread
+- name: filestore_inject_stall
+ type: int
+ level: dev
+ default: 0
+ with_legacy: true
+# fail/crash on EIO
+- name: filestore_fail_eio
+ type: bool
+ level: dev
+ default: true
+ with_legacy: true
+- name: filestore_debug_verify_split
+ type: bool
+ level: dev
+ default: false
+ with_legacy: true
+- name: journal_dio
+ type: bool
+ level: dev
+ default: true
+ fmt_desc: Enables direct i/o to the journal. Requires ``journal block
+ align`` set to ``true``.
+ with_legacy: true
+- name: journal_aio
+ type: bool
+ level: dev
+ default: true
+ fmt_desc: Enables using ``libaio`` for asynchronous writes to the journal.
+ Requires ``journal dio`` set to ``true``. Version 0.61 and later, ``true``.
+ Version 0.60 and earlier, ``false``.
+ with_legacy: true
+- name: journal_force_aio
+ type: bool
+ level: dev
+ default: false
+ with_legacy: true
+- name: journal_block_size
+ type: size
+ level: dev
+ default: 4_K
+ with_legacy: true
+- name: journal_block_align
+ type: bool
+ level: dev
+ default: true
+ fmt_desc: Block aligns write operations. Required for ``dio`` and ``aio``.
+ with_legacy: true
+- name: journal_write_header_frequency
+ type: uint
+ level: dev
+ default: 0
+ with_legacy: true
+- name: journal_max_write_bytes
+ type: size
+ level: advanced
+ desc: Max bytes in flight to journal
+ fmt_desc: The maximum number of bytes the journal will write at
+ any one time.
+ default: 10_M
+ with_legacy: true
+- name: journal_max_write_entries
+ type: int
+ level: advanced
+ desc: Max IOs in flight to journal
+ fmt_desc: The maximum number of entries the journal will write at
+ any one time.
+ default: 100
+ with_legacy: true
+# Target range for journal fullness
+- name: journal_throttle_low_threshhold
+ type: float
+ level: dev
+ default: 0.6
+ with_legacy: true
+- name: journal_throttle_high_threshhold
+ type: float
+ level: dev
+ default: 0.9
+ with_legacy: true
+# Multiple over expected at high_threshhold. Defaults to 0 (disabled).
+- name: journal_throttle_high_multiple
+ type: float
+ level: dev
+ default: 0
+ with_legacy: true
+# Multiple over expected at max. Defaults to 0 (disabled).
+- name: journal_throttle_max_multiple
+ type: float
+ level: dev
+ default: 0
+ with_legacy: true
+# align data payloads >= this.
+- name: journal_align_min_size
+ type: size
+ level: dev
+ default: 64_K
+ fmt_desc: Align data payloads greater than the specified minimum.
+ with_legacy: true
+- name: journal_replay_from
+ type: int
+ level: dev
+ default: 0
+ with_legacy: true
+- name: journal_zero_on_create
+ type: bool
+ level: dev
+ default: false
+ fmt_desc: |
+ Causes the file store to overwrite the entire journal with
+ ``0``'s during ``mkfs``.
+ with_legacy: true
+# assume journal is not corrupt
+- name: journal_ignore_corruption
+ type: bool
+ level: dev
+ default: false
+ with_legacy: true
+# using ssd disk as journal, whether support discard nouse journal-data.
+- name: journal_discard
+ type: bool
+ level: dev
+ default: false
+ with_legacy: true
+# fio data directory for fio-objectstore
+- name: fio_dir
+ type: str
+ level: advanced
+ default: /tmp/fio
+ with_legacy: true
+- name: rados_mon_op_timeout
+ type: secs
+ level: advanced
+ desc: timeout for operations handled by monitors such as statfs (0 is unlimited)
+ default: 0
+ min: 0
+ flags:
+ - runtime
+- name: rados_osd_op_timeout
+ type: secs
+ level: advanced
+ desc: timeout for operations handled by osds such as write (0 is unlimited)
+ default: 0
+ min: 0
+ flags:
+ - runtime
+# true if LTTng-UST tracepoints should be enabled
+- name: rados_tracing
+ type: bool
+ level: advanced
+ default: false
+ with_legacy: true
+- name: mgr_connect_retry_interval
+ type: float
+ level: dev
+ default: 1
+ services:
+ - common
+- name: mgr_client_service_daemon_unregister_timeout
+ type: float
+ level: dev
+ desc: Time to wait during shutdown to deregister service with mgr
+ default: 1
+- name: throttler_perf_counter
+ type: bool
+ level: advanced
+ default: true
+ with_legacy: true
+- name: event_tracing
+ type: bool
+ level: advanced
+ default: false
+ with_legacy: true
+- name: bluestore_tracing
+ type: bool
+ level: advanced
+ desc: Enable bluestore event tracing.
+ default: false
+- name: bluestore_throttle_trace_rate
+ type: float
+ level: advanced
+ desc: Rate at which to sample bluestore transactions (per second)
+ default: 0
+- name: debug_deliberately_leak_memory
+ type: bool
+ level: dev
+ default: false
+ with_legacy: true
+- name: debug_asserts_on_shutdown
+ type: bool
+ level: dev
+ desc: Enable certain asserts to check for refcounting bugs on shutdown; see http://tracker.ceph.com/issues/21738
+ default: false
+- name: debug_asok_assert_abort
+ type: bool
+ level: dev
+ desc: allow commands 'assert' and 'abort' via asok for testing crash dumps etc
+ default: false
+ with_legacy: true
+- name: target_max_misplaced_ratio
+ type: float
+ level: basic
+ desc: Max ratio of misplaced objects to target when throttling data rebalancing
+ activity
+ default: 0.05
+- name: device_failure_prediction_mode
+ type: str
+ level: basic
+ desc: Method used to predict device failures
+ long_desc: To disable prediction, use 'none', 'local' uses a prediction model that
+ runs inside the mgr daemon. 'cloud' will share metrics with a cloud service and
+ query the service for devicelife expectancy.
+ default: none
+ enum_values:
+ - none
+ - local
+ - cloud
+ flags:
+ - runtime
+- name: gss_ktab_client_file
+ type: str
+ level: advanced
+ desc: GSS/KRB5 Keytab file for client authentication
+ long_desc: This sets the full path for the GSS/Kerberos client keytab file location.
+ default: /var/lib/ceph/$name/gss_client_$name.ktab
+ services:
+ - mon
+ - osd
+- name: gss_target_name
+ type: str
+ level: advanced
+ long_desc: This sets the gss target service name.
+ default: ceph
+ services:
+ - mon
+ - osd
+- name: debug_disable_randomized_ping
+ type: bool
+ level: dev
+ desc: Disable heartbeat ping randomization for testing purposes
+ default: false
+- name: debug_heartbeat_testing_span
+ type: int
+ level: dev
+ desc: Override 60 second periods for testing only
+ default: 0
+- name: librados_thread_count
+ type: uint
+ level: advanced
+ desc: Size of thread pool for Objecter
+ default: 2
+ tags:
+ - client
+ min: 1
+- name: osd_asio_thread_count
+ type: uint
+ level: advanced
+ desc: Size of thread pool for ASIO completions
+ default: 2
+ tags:
+ - osd
+ min: 1
+- name: cephsqlite_lock_renewal_interval
+ type: millisecs
+ level: advanced
+ desc: number of milliseconds before lock is renewed
+ default: 2000
+ tags:
+ - client
+ see_also:
+ - cephsqlite_lock_renewal_timeout
+ min: 100
+- name: cephsqlite_lock_renewal_timeout
+ type: millisecs
+ level: advanced
+ desc: number of milliseconds before transaction lock times out
+ long_desc: The amount of time before a running libcephsqlite VFS connection has
+ to renew a lock on the database before the lock is automatically lost. If the
+ lock is lost, the VFS will abort the process to prevent database corruption.
+ default: 30000
+ tags:
+ - client
+ see_also:
+ - cephsqlite_lock_renewal_interval
+ min: 100
+- name: cephsqlite_blocklist_dead_locker
+ type: bool
+ level: advanced
+ desc: blocklist the last dead owner of the database lock
+ long_desc: Require that the Ceph SQLite VFS blocklist the last dead owner of the
+ database when cleanup was incomplete. DO NOT CHANGE THIS UNLESS YOU UNDERSTAND
+ THE RAMIFICATIONS. CORRUPTION MAY RESULT.
+ default: true
+ tags:
+ - client
+- name: bdev_type
+ type: str
+ level: advanced
+ desc: Explicitly set the device type to select the driver if it's needed
+ enum_values:
+ - aio
+ - spdk
+ - pmem
+ - hm_smr
+- name: bluestore_cleaner_sleep_interval
+ type: float
+ level: advanced
+ desc: How long cleaner should sleep before re-checking utilization
+ default: 5
+ with_legacy: true
+- name: jaeger_tracing_enable
+ type: bool
+ level: advanced
+ desc: Ceph should use jaeger tracing system
+ default: false
+ services:
+ - rgw
+ - osd
+ with_legacy: true
+- name: jaeger_agent_port
+ type: int
+ level: advanced
+ desc: port number of the jaeger agent
+ default: 6799
+ services:
+ - rgw
+ - osd
+- name: mgr_ttl_cache_expire_seconds
+ type: uint
+ level: dev
+ desc: Set the time to live in seconds - set to 0 to disable the cache.
+ default: 0
+ services:
+ - mgr
diff --git a/src/common/options/immutable-object-cache.yaml.in b/src/common/options/immutable-object-cache.yaml.in
new file mode 100644
index 000000000..90b13d60d
--- /dev/null
+++ b/src/common/options/immutable-object-cache.yaml.in
@@ -0,0 +1,98 @@
+# -*- mode: YAML -*-
+---
+
+options:
+- name: immutable_object_cache_path
+ type: str
+ level: advanced
+ desc: immutable object cache data dir
+ default: /tmp/ceph_immutable_object_cache
+ services:
+ - immutable-object-cache
+- name: immutable_object_cache_sock
+ type: str
+ level: advanced
+ desc: immutable object cache domain socket
+ default: /var/run/ceph/immutable_object_cache_sock
+ services:
+ - immutable-object-cache
+- name: immutable_object_cache_max_size
+ type: size
+ level: advanced
+ desc: max immutable object cache data size
+ default: 1_G
+ services:
+ - immutable-object-cache
+- name: immutable_object_cache_max_inflight_ops
+ type: uint
+ level: advanced
+ desc: max inflight promoting requests for immutable object cache daemon
+ default: 128
+ services:
+ - immutable-object-cache
+- name: immutable_object_cache_client_dedicated_thread_num
+ type: uint
+ level: advanced
+ desc: immutable object cache client dedicated thread number
+ default: 2
+ services:
+ - immutable-object-cache
+- name: immutable_object_cache_watermark
+ type: float
+ level: advanced
+ desc: immutable object cache water mark
+ default: 0.9
+ services:
+ - immutable-object-cache
+- name: immutable_object_cache_qos_schedule_tick_min
+ type: millisecs
+ level: advanced
+ desc: minimum schedule tick for immutable object cache
+ default: 50
+ services:
+ - immutable-object-cache
+ min: 1
+- name: immutable_object_cache_qos_iops_limit
+ type: uint
+ level: advanced
+ desc: the desired immutable object cache IO operations limit per second
+ default: 0
+ services:
+ - immutable-object-cache
+- name: immutable_object_cache_qos_iops_burst
+ type: uint
+ level: advanced
+ desc: the desired burst limit of immutable object cache IO operations
+ default: 0
+ services:
+ - immutable-object-cache
+- name: immutable_object_cache_qos_iops_burst_seconds
+ type: secs
+ level: advanced
+ desc: the desired burst duration in seconds of immutable object cache IO operations
+ default: 1
+ services:
+ - immutable-object-cache
+ min: 1
+- name: immutable_object_cache_qos_bps_limit
+ type: uint
+ level: advanced
+ desc: the desired immutable object cache IO bytes limit per second
+ default: 0
+ services:
+ - immutable-object-cache
+- name: immutable_object_cache_qos_bps_burst
+ type: uint
+ level: advanced
+ desc: the desired burst limit of immutable object cache IO bytes
+ default: 0
+ services:
+ - immutable-object-cache
+- name: immutable_object_cache_qos_bps_burst_seconds
+ type: secs
+ level: advanced
+ desc: the desired burst duration in seconds of immutable object cache IO bytes
+ default: 1
+ services:
+ - immutable-object-cache
+ min: 1
diff --git a/src/common/options/legacy_config_opts.h b/src/common/options/legacy_config_opts.h
new file mode 100644
index 000000000..3f8c8244c
--- /dev/null
+++ b/src/common/options/legacy_config_opts.h
@@ -0,0 +1,11 @@
+#include "global_legacy_options.h"
+#include "cephfs-mirror_legacy_options.h"
+#include "mds_legacy_options.h"
+#include "mds-client_legacy_options.h"
+#include "mgr_legacy_options.h"
+#include "mon_legacy_options.h"
+#include "osd_legacy_options.h"
+#include "rbd_legacy_options.h"
+#include "rbd-mirror_legacy_options.h"
+#include "immutable-object-cache_legacy_options.h"
+#include "rgw_legacy_options.h"
diff --git a/src/common/options/mds-client.yaml.in b/src/common/options/mds-client.yaml.in
new file mode 100644
index 000000000..4e599d4cf
--- /dev/null
+++ b/src/common/options/mds-client.yaml.in
@@ -0,0 +1,580 @@
+# -*- mode: YAML -*-
+---
+
+options:
+- name: client_cache_size
+ type: size
+ level: basic
+ desc: soft maximum number of directory entries in client cache
+ fmt_desc: Set the number of inodes that the client keeps in the metadata cache.
+ default: 16_K
+ services:
+ - mds_client
+ with_legacy: true
+- name: client_cache_mid
+ type: float
+ level: advanced
+ desc: mid-point of client cache LRU
+ fmt_desc: Set client cache midpoint. The midpoint splits the least recently used
+ lists into a hot and warm list.
+ default: 0.75
+ services:
+ - mds_client
+ with_legacy: true
+- name: client_use_random_mds
+ type: bool
+ level: dev
+ desc: issue new requests to a random active MDS
+ fmt_desc: Choose random MDS for each request.
+ default: false
+ services:
+ - mds_client
+ with_legacy: true
+- name: client_mount_timeout
+ type: secs
+ level: advanced
+ desc: timeout for mounting CephFS (seconds)
+ fmt_desc: Set the timeout for CephFS mount in seconds.
+ default: 5_min
+ services:
+ - mds_client
+- name: client_tick_interval
+ type: secs
+ level: dev
+ desc: seconds between client upkeep ticks
+ fmt_desc: Set the interval in seconds between capability renewal and other upkeep.
+ default: 1
+ services:
+ - mds_client
+- name: client_trace
+ type: str
+ level: dev
+ desc: file containing trace of client operations
+ services:
+ - mds_client
+ with_legacy: true
+- name: client_readahead_min
+ type: size
+ level: advanced
+ desc: minimum bytes to readahead in a file
+ fmt_desc: Set the minimum number bytes that the client reads ahead.
+ default: 128_K
+ services:
+ - mds_client
+ with_legacy: true
+- name: client_readahead_max_bytes
+ type: size
+ level: advanced
+ desc: maximum bytes to readahead in a file (zero is unlimited)
+ fmt_desc: Set the maximum number of bytes that the client reads ahead for
+ future read operations. Overridden by the ``client_readahead_max_periods``
+ setting.
+ default: 0
+ services:
+ - mds_client
+ with_legacy: true
+# as multiple of file layout period (object size * num stripes)
+- name: client_readahead_max_periods
+ type: int
+ level: advanced
+ desc: maximum stripe periods to readahead in a file
+ fmt_desc: Set the number of file layout periods (object size * number of
+ stripes) that the client reads ahead. Overrides the
+ ``client_readahead_max_bytes`` setting.
+ default: 4
+ services:
+ - mds_client
+ with_legacy: true
+- name: client_reconnect_stale
+ type: bool
+ level: advanced
+ desc: reconnect when the session becomes stale
+ default: false
+ services:
+ - mds_client
+- name: client_snapdir
+ type: str
+ level: advanced
+ desc: pseudo directory for snapshot access to a directory
+ fmt_desc: Set the snapshot directory name.
+ default: .snap
+ services:
+ - mds_client
+ with_legacy: true
+- name: client_mountpoint
+ type: str
+ level: advanced
+ desc: default mount-point
+ fmt_desc: Directory to mount on the CephFS file system. An alternative to the
+ ``-r`` option of the ``ceph-fuse`` command.
+ default: /
+ services:
+ - mds_client
+- name: client_mount_uid
+ type: int
+ level: advanced
+ desc: uid to mount as
+ default: -1
+ services:
+ - mds_client
+ fmt_desc: Set the user ID of CephFS mount.
+ with_legacy: true
+- name: client_mount_gid
+ type: int
+ level: advanced
+ desc: gid to mount as
+ fmt_desc: Set the group ID of CephFS mount.
+ default: -1
+ services:
+ - mds_client
+ with_legacy: true
+- name: client_notify_timeout
+ type: int
+ level: dev
+ default: 10
+ services:
+ - mds_client
+ with_legacy: true
+- name: osd_client_watch_timeout
+ type: int
+ level: dev
+ default: 30
+ services:
+ - mds_client
+ with_legacy: true
+- name: client_caps_release_delay
+ type: secs
+ level: dev
+ default: 5
+ services:
+ - mds_client
+ fmt_desc: Set the delay between capability releases in seconds. The delay
+ sets how many seconds a client waits to release capabilities that it no
+ longer needs in case the capabilities are needed for another user space
+ operation.
+- name: client_quota_df
+ type: bool
+ level: advanced
+ desc: show quota usage for statfs (df)
+ fmt_desc: Report root directory quota for the ``statfs`` operation.
+ default: true
+ services:
+ - mds_client
+ with_legacy: true
+- name: client_oc
+ type: bool
+ level: advanced
+ desc: enable object caching
+ default: true
+ services:
+ - mds_client
+ with_legacy: true
+- name: client_oc_size
+ type: size
+ level: advanced
+ desc: maximum size of object cache
+ fmt_desc: Set how many bytes of data will the client cache.
+ default: 200_M
+ services:
+ - mds_client
+ flags:
+ - runtime
+ with_legacy: true
+# MB * n (dirty OR tx.. bigish)
+- name: client_oc_max_dirty
+ type: size
+ level: advanced
+ desc: maximum size of dirty pages in object cache
+ fmt_desc: Set the maximum number of dirty bytes in the object cache.
+ default: 100_M
+ services:
+ - mds_client
+ flags:
+ - runtime
+ with_legacy: true
+# target dirty (keep this smallish)
+- name: client_oc_target_dirty
+ type: size
+ level: advanced
+ desc: target size of dirty pages object cache
+ fmt_desc: Set the target size of dirty data. We recommend to keep this number low.
+ default: 8_M
+ services:
+ - mds_client
+ flags:
+ - runtime
+ with_legacy: true
+- name: client_oc_max_dirty_age
+ type: float
+ level: advanced
+ desc: maximum age of dirty pages in object cache (seconds)
+ fmt_desc: Set the maximum age in seconds of dirty data in the object cache
+ before writeback.
+ default: 5
+ services:
+ - mds_client
+ flags:
+ - runtime
+ with_legacy: true
+- name: client_oc_max_objects
+ type: int
+ level: advanced
+ desc: maximum number of objects in cache
+ fmt_desc: Set the maximum number of objects in the object cache.
+ default: 1000
+ services:
+ - mds_client
+ flags:
+ - runtime
+ with_legacy: true
+# check if MDS reply contains wanted caps
+- name: client_debug_getattr_caps
+ type: bool
+ level: dev
+ default: false
+ services:
+ - mds_client
+ with_legacy: true
+# always read synchronously (go to osds)
+- name: client_debug_force_sync_read
+ type: bool
+ level: dev
+ default: false
+ services:
+ - mds_client
+ fmt_desc: If set to ``true``, clients read data directly from OSDs instead
+ of using a local page cache.
+ with_legacy: true
+- name: client_debug_inject_tick_delay
+ type: secs
+ level: dev
+ default: 0
+ services:
+ - mds_client
+- name: client_max_inline_size
+ type: size
+ level: dev
+ default: 4_K
+ services:
+ - mds_client
+ fmt_desc: Set the maximum size of inlined data stored in a file inode rather
+ than in a separate data object in RADOS. This setting only applies if the
+ ``inline_data`` flag is set on the MDS map.
+ with_legacy: true
+# synthetic client bug for testing
+- name: client_inject_release_failure
+ type: bool
+ level: dev
+ default: false
+ services:
+ - mds_client
+ with_legacy: true
+# synthetic client bug for testing
+- name: client_inject_fixed_oldest_tid
+ type: bool
+ level: dev
+ default: false
+ services:
+ - mds_client
+ with_legacy: true
+- name: client_metadata
+ type: str
+ level: advanced
+ desc: metadata key=value comma-delimited pairs appended to session metadata
+ fmt_desc: Comma-delimited strings for client metadata sent to each MDS, in addition
+ to the automatically generated version, host name, and other metadata.
+ services:
+ - mds_client
+ with_legacy: true
+- name: client_acl_type
+ type: str
+ level: advanced
+ desc: ACL type to enforce (none or "posix_acl")
+ fmt_desc: Set the ACL type. Currently, only possible value is ``"posix_acl"`` to
+ enable POSIX ACL, or an empty string. This option only takes effect when the
+ ``fuse_default_permissions`` is set to ``false``.
+ services:
+ - mds_client
+ with_legacy: true
+- name: client_permissions
+ type: bool
+ level: advanced
+ desc: client-enforced permission checking
+ fmt_desc: Check client permissions on all I/O operations.
+ default: true
+ services:
+ - mds_client
+ with_legacy: true
+- name: client_dirsize_rbytes
+ type: bool
+ level: advanced
+ desc: set the directory size as the number of file bytes recursively used
+ long_desc: This option enables a CephFS feature that stores the recursive directory
+ size (the bytes used by files in the directory and its descendents) in the st_size
+ field of the stat structure.
+ default: true
+ services:
+ - mds_client
+ with_legacy: true
+- name: client_force_lazyio
+ type: bool
+ level: advanced
+ default: false
+ services:
+ - mds_client
+- name: fuse_use_invalidate_cb
+ type: bool
+ level: advanced
+ desc: use fuse 2.8+ invalidate callback to keep page cache consistent
+ default: true
+ services:
+ - mds_client
+- name: fuse_disable_pagecache
+ type: bool
+ level: advanced
+ desc: disable page caching in the kernel for this FUSE mount
+ fmt_desc: If set to ``true``, kernel page cache is disabled for ``ceph-fuse``
+ mounts. When multiple clients read/write to a file at the same
+ time, readers may get stale data from page cache. Due to
+ limitations of FUSE, ``ceph-fuse`` can't disable page cache dynamically.
+ default: false
+ services:
+ - mds_client
+- name: fuse_allow_other
+ type: bool
+ level: advanced
+ desc: pass allow_other to FUSE on mount
+ default: true
+ services:
+ - mds_client
+- name: fuse_default_permissions
+ type: bool
+ level: advanced
+ desc: pass default_permisions to FUSE on mount
+ fmt_desc: When set to ``false``, ``ceph-fuse`` utility checks does its own
+ permissions checking, instead of relying on the permissions enforcement in
+ FUSE. Set to ``false`` together with the ``client acl type=posix_acl``
+ option to enable POSIX ACL.
+ default: false
+ services:
+ - mds_client
+ flags:
+ - startup
+- name: fuse_splice_read
+ type: bool
+ level: advanced
+ desc: enable splice read to reduce the memory copies
+ default: true
+ services:
+ - mds_client
+- name: fuse_splice_write
+ type: bool
+ level: advanced
+ desc: enable splice write to reduce the memory copies
+ default: true
+ services:
+ - mds_client
+- name: fuse_splice_move
+ type: bool
+ level: advanced
+ desc: enable splice move to reduce the memory copies
+ default: true
+ services:
+ - mds_client
+- name: fuse_big_writes
+ type: bool
+ level: advanced
+ desc: big_writes is deprecated in libfuse 3.0.0
+ default: true
+ services:
+ - mds_client
+- name: fuse_max_write
+ type: size
+ level: advanced
+ desc: set the maximum number of bytes in a single write operation
+ long_desc: Set the maximum number of bytes in a single write operation that may
+ pass atomically through FUSE. The FUSE default is 128kB and may be indicated by
+ setting this option to 0.
+ fmt_desc: Set the maximum number of bytes in a single write operation. A value of
+ 0 indicates no change; the FUSE default of 128 kbytes remains in force.
+ default: 0
+ services:
+ - mds_client
+- name: fuse_atomic_o_trunc
+ type: bool
+ level: advanced
+ desc: pass atomic_o_trunc flag to FUSE on mount
+ default: true
+ services:
+ - mds_client
+- name: fuse_debug
+ type: bool
+ level: advanced
+ desc: enable debugging for the libfuse
+ default: false
+ services:
+ - mds_client
+ flags:
+ - no_mon_update
+ - startup
+- name: fuse_multithreaded
+ type: bool
+ level: advanced
+ desc: allow parallel processing through FUSE library
+ default: true
+ services:
+ - mds_client
+- name: fuse_require_active_mds
+ type: bool
+ level: advanced
+ desc: require active MDSs in the file system when mounting
+ default: true
+ services:
+ - mds_client
+- name: fuse_syncfs_on_mksnap
+ type: bool
+ level: advanced
+ desc: synchronize all local metadata/file changes after snapshot
+ default: true
+ services:
+ - mds_client
+- name: fuse_set_user_groups
+ type: bool
+ level: advanced
+ desc: check for ceph-fuse to consider supplementary groups for permissions
+ default: true
+ services:
+ - mds_client
+# the client should try to use dentry invalidation instead of remounting, on kernels it believes that will work for
+- name: client_try_dentry_invalidate
+ type: bool
+ level: dev
+ default: false
+ services:
+ - mds_client
+ with_legacy: true
+- name: client_max_retries_on_remount_failure
+ type: uint
+ level: advanced
+ desc: number of consecutive failed remount attempts for invalidating kernel dcache
+ after which client would abort.
+ default: 5
+ services:
+ - mds_client
+- name: client_die_on_failed_remount
+ type: bool
+ level: dev
+ default: false
+ services:
+ - mds_client
+- name: client_die_on_failed_dentry_invalidate
+ type: bool
+ level: advanced
+ desc: kill the client when no dentry invalidation options are available
+ long_desc: The CephFS client requires a mechanism to invalidate dentries in the
+ caller (e.g. the kernel for ceph-fuse) when capabilities must be recalled. If
+ the client cannot do this then the MDS cache cannot shrink which can cause the
+ MDS to fail.
+ default: true
+ services:
+ - mds_client
+- name: client_check_pool_perm
+ type: bool
+ level: advanced
+ desc: confirm access to inode's data pool/namespace described in file layout
+ default: true
+ services:
+ - mds_client
+ with_legacy: true
+- name: client_use_faked_inos
+ type: bool
+ level: dev
+ default: false
+ services:
+ - mds_client
+ flags:
+ - startup
+ - no_mon_update
+ with_legacy: true
+- name: client_fs
+ type: str
+ level: advanced
+ desc: CephFS file system name to mount
+ long_desc: Use this with ceph-fuse, or with any process that uses libcephfs. Programs
+ using libcephfs may also pass the filesystem name into mount(), which will override
+ this setting. If no filesystem name is given in mount() or this setting, the default
+ filesystem will be mounted (usually the first created).
+ services:
+ - mds_client
+ flags:
+ - startup
+- name: client_mds_namespace
+ type: str
+ level: dev
+ services:
+ - mds_client
+ flags:
+ - startup
+- name: fake_statfs_for_testing
+ type: int
+ level: dev
+ desc: Set a value for kb and compute kb_used from total of num_bytes
+ default: 0
+ services:
+ - mds_client
+ with_legacy: true
+# XXX: mon
+- name: debug_allow_any_pool_priority
+ type: bool
+ level: dev
+ desc: Allow any pool priority to be set to test conversion to new range
+ default: false
+ services:
+ - mds_client
+ with_legacy: true
+- name: client_asio_thread_count
+ type: uint
+ level: advanced
+ desc: Size of thread pool for ASIO completions
+ default: 2
+ tags:
+ - client
+ services:
+ - mds_client
+ min: 1
+- name: client_shutdown_timeout
+ type: secs
+ level: advanced
+ desc: timeout for shutting down CephFS
+ long_desc: Timeout for shutting down CephFS via unmount or shutdown.
+ default: 30
+ tags:
+ - client
+ services:
+ - mds_client
+ min: 0
+ flags:
+ - runtime
+- name: client_collect_and_send_global_metrics
+ type: bool
+ level: advanced
+ desc: to enable and force collecting and sending the global metrics to MDS
+ long_desc: To be careful for this, when connecting to some old ceph clusters
+ it may crash the MDS daemons while upgrading.
+ default: false
+ tags:
+ - client
+ services:
+ - mds_client
+ flags:
+ - runtime
+- name: client_quota
+ type: bool
+ level: advanced
+ desc: Enable quota enforcement
+ long_desc: Enable quota_bytes and quota_files enforcement for the client.
+ default: true
+ services:
+ - mds_client
+ flags:
+ - runtime
diff --git a/src/common/options/mds.yaml.in b/src/common/options/mds.yaml.in
new file mode 100644
index 000000000..6eb0702fc
--- /dev/null
+++ b/src/common/options/mds.yaml.in
@@ -0,0 +1,1536 @@
+# -*- mode: YAML -*-
+---
+
+options:
+- name: mds_alternate_name_max
+ type: size
+ level: advanced
+ desc: set the maximum length of alternate names for dentries
+ default: 8_K
+ services:
+ - mds
+ flags:
+ - runtime
+- name: mds_fscrypt_last_block_max_size
+ type: size
+ level: advanced
+ desc: maximum size of the last block without the header along with a truncate
+ request when the fscrypt is enabled.
+ default: 4_K
+ services:
+ - mds
+ flags:
+ - runtime
+- name: mds_valgrind_exit
+ type: bool
+ level: dev
+ default: false
+ services:
+ - mds
+ flags:
+ - runtime
+- name: mds_standby_replay_damaged
+ type: bool
+ level: dev
+ default: false
+ flags:
+ - runtime
+- name: mds_numa_node
+ type: int
+ level: advanced
+ desc: set mds's cpu affinity to a numa node (-1 for none)
+ default: -1
+ services:
+ - mds
+ flags:
+ - startup
+- name: mds_data
+ type: str
+ level: advanced
+ desc: path to MDS data and keyring
+ default: /var/lib/ceph/mds/$cluster-$id
+ services:
+ - mds
+ flags:
+ - no_mon_update
+ with_legacy: true
+- name: mds_join_fs
+ type: str
+ level: basic
+ desc: file system MDS prefers to join
+ long_desc: This setting indicates which file system name the MDS should prefer to
+ join (affinity). The monitors will try to have the MDS cluster safely reach a
+ state where all MDS have strong affinity, even via failovers to a standby.
+ services:
+ - mds
+ flags:
+ - runtime
+# max xattr kv pairs size for each dir/file
+- name: mds_max_xattr_pairs_size
+ type: size
+ level: advanced
+ desc: maximum aggregate size of extended attributes on a file
+ default: 64_K
+ services:
+ - mds
+ with_legacy: true
+- name: mds_cache_trim_interval
+ type: secs
+ level: advanced
+ desc: interval in seconds between cache trimming
+ default: 1
+ services:
+ - mds
+ flags:
+ - runtime
+- name: mds_cache_release_free_interval
+ type: secs
+ level: dev
+ desc: interval in seconds between heap releases
+ default: 10
+ services:
+ - mds
+ flags:
+ - runtime
+- name: mds_cache_memory_limit
+ type: size
+ level: basic
+ desc: target maximum memory usage of MDS cache
+ long_desc: This sets a target maximum memory usage of the MDS cache and is the primary
+ tunable to limit the MDS memory usage. The MDS will try to stay under a reservation
+ of this limit (by default 95%; 1 - mds_cache_reservation) by trimming unused metadata
+ in its cache and recalling cached items in the client caches. It is possible for
+ the MDS to exceed this limit due to slow recall from clients. The mds_health_cache_threshold
+ (150%) sets a cache full threshold for when the MDS signals a cluster health warning.
+ default: 4_G
+ services:
+ - mds
+ flags:
+ - runtime
+- name: mds_cache_reservation
+ type: float
+ level: advanced
+ desc: amount of memory to reserve for future cached objects
+ fmt_desc: The cache reservation (memory or inodes) for the MDS cache to maintain.
+ Once the MDS begins dipping into its reservation, it will recall
+ client state until its cache size shrinks to restore the
+ reservation.
+ default: 0.05
+ services:
+ - mds
+ flags:
+ - runtime
+- name: mds_health_cache_threshold
+ type: float
+ level: advanced
+ desc: threshold for cache size to generate health warning
+ default: 1.5
+ services:
+ - mds
+- name: mds_cache_mid
+ type: float
+ level: advanced
+ desc: midpoint for MDS cache LRU
+ fmt_desc: The insertion point for new items in the cache LRU
+ (from the top).
+ default: 0.7
+ services:
+ - mds
+- name: mds_cache_trim_decay_rate
+ type: float
+ level: advanced
+ desc: decay rate for trimming MDS cache throttle
+ default: 1
+ services:
+ - mds
+ flags:
+ - runtime
+- name: mds_cache_trim_threshold
+ type: size
+ level: advanced
+ desc: threshold for number of dentries that can be trimmed
+ default: 256_K
+ services:
+ - mds
+ flags:
+ - runtime
+- name: mds_max_file_recover
+ type: uint
+ level: advanced
+ desc: maximum number of files to recover file sizes in parallel
+ default: 32
+ services:
+ - mds
+ with_legacy: true
+- name: mds_dir_max_commit_size
+ type: int
+ level: advanced
+ desc: maximum size in megabytes for a RADOS write to a directory
+ fmt_desc: The maximum size of a directory update before Ceph breaks it into
+ smaller transactions (MB).
+ default: 10
+ services:
+ - mds
+ with_legacy: true
+- name: mds_dir_keys_per_op
+ type: int
+ level: advanced
+ desc: number of directory entries to read in one RADOS operation
+ default: 16384
+ services:
+ - mds
+ with_legacy: true
+- name: mds_decay_halflife
+ type: float
+ level: advanced
+ desc: rate of decay for temperature counters on each directory for balancing
+ default: 5
+ services:
+ - mds
+ with_legacy: true
+- name: mds_beacon_interval
+ type: float
+ level: advanced
+ desc: interval in seconds between MDS beacon messages sent to monitors
+ default: 4
+ services:
+ - mds
+ with_legacy: true
+- name: mds_beacon_grace
+ type: float
+ level: advanced
+ desc: tolerance in seconds for missed MDS beacons to monitors
+ fmt_desc: The interval without beacons before Ceph declares an MDS laggy
+ (and possibly replace it).
+ default: 15
+ services:
+ - mds
+ with_legacy: true
+- name: mds_heartbeat_reset_grace
+ type: uint
+ level: advanced
+ desc: the basic unit of tolerance in how many circles in a loop, which will
+ keep running by holding the mds_lock, it must trigger to reset heartbeat
+ default: 1000
+ services:
+ - mds
+- name: mds_heartbeat_grace
+ type: float
+ level: advanced
+ desc: tolerance in seconds for MDS internal heartbeat
+ default: 15
+ services:
+ - mds
+- name: mds_enforce_unique_name
+ type: bool
+ level: advanced
+ desc: require MDS name is unique in the cluster
+ default: true
+ services:
+ - mds
+ with_legacy: true
+# whether to blocklist clients whose sessions are dropped due to timeout
+- name: mds_session_blocklist_on_timeout
+ type: bool
+ level: advanced
+ desc: blocklist clients whose sessions have become stale
+ default: true
+ services:
+ - mds
+ with_legacy: true
+# whether to blocklist clients whose sessions are dropped via admin commands
+- name: mds_session_blocklist_on_evict
+ type: bool
+ level: advanced
+ desc: blocklist clients that have been evicted
+ default: true
+ services:
+ - mds
+ with_legacy: true
+# how many sessions should I try to load/store in a single OMAP operation?
+- name: mds_sessionmap_keys_per_op
+ type: uint
+ level: advanced
+ desc: number of omap keys to read from the SessionMap in one operation
+ default: 1_K
+ services:
+ - mds
+ with_legacy: true
+- name: mds_recall_max_caps
+ type: size
+ level: advanced
+ desc: maximum number of caps to recall from client session in single recall
+ default: 30000
+ services:
+ - mds
+ flags:
+ - runtime
+- name: mds_recall_max_decay_rate
+ type: float
+ level: advanced
+ desc: decay rate for throttle on recalled caps on a session
+ default: 1.5
+ services:
+ - mds
+ flags:
+ - runtime
+- name: mds_recall_max_decay_threshold
+ type: size
+ level: advanced
+ desc: decay threshold for throttle on recalled caps on a session
+ default: 128_K
+ services:
+ - mds
+ flags:
+ - runtime
+- name: mds_recall_global_max_decay_threshold
+ type: size
+ level: advanced
+ desc: decay threshold for throttle on recalled caps globally
+ default: 128_K
+ services:
+ - mds
+ flags:
+ - runtime
+- name: mds_recall_warning_threshold
+ type: size
+ level: advanced
+ desc: decay threshold for warning on slow session cap recall
+ default: 256_K
+ services:
+ - mds
+ flags:
+ - runtime
+- name: mds_recall_warning_decay_rate
+ type: float
+ level: advanced
+ desc: decay rate for warning on slow session cap recall
+ default: 60
+ services:
+ - mds
+ flags:
+ - runtime
+- name: mds_session_cache_liveness_decay_rate
+ type: float
+ level: advanced
+ desc: decay rate for session liveness leading to preemptive cap recall
+ long_desc: This determines how long a session needs to be quiescent before the MDS
+ begins preemptively recalling capabilities. The default of 5 minutes will cause
+ 10 halvings of the decay counter after 1 hour, or 1/1024. The default magnitude
+ of 10 (1^10 or 1024) is chosen so that the MDS considers a previously chatty session
+ (approximately) to be quiescent after 1 hour.
+ default: 5_min
+ services:
+ - mds
+ see_also:
+ - mds_session_cache_liveness_magnitude
+ flags:
+ - runtime
+- name: mds_session_cache_liveness_magnitude
+ type: size
+ level: advanced
+ desc: decay magnitude for preemptively recalling caps on quiet client
+ long_desc: This is the order of magnitude difference (in base 2) of the internal
+ liveness decay counter and the number of capabilities the session holds. When
+ this difference occurs, the MDS treats the session as quiescent and begins recalling
+ capabilities.
+ default: 10
+ services:
+ - mds
+ see_also:
+ - mds_session_cache_liveness_decay_rate
+ flags:
+ - runtime
+- name: mds_session_cap_acquisition_decay_rate
+ type: float
+ level: advanced
+ desc: decay rate for session readdir caps leading to readdir throttle
+ long_desc: The half-life for the session cap acquisition counter of caps
+ acquired by readdir. This is used for throttling readdir requests from
+ clients.
+ default: 30
+ services:
+ - mds
+ flags:
+ - runtime
+- name: mds_session_cap_acquisition_throttle
+ type: uint
+ level: advanced
+ desc: threshold at which the cap acquisition decay counter throttles
+ default: 100000
+ services:
+ - mds
+- name: mds_session_max_caps_throttle_ratio
+ type: float
+ level: advanced
+ desc: ratio of mds_max_caps_per_client that client must exceed before readdir may
+ be throttled by cap acquisition throttle
+ default: 1.1
+ services:
+ - mds
+- name: mds_cap_acquisition_throttle_retry_request_timeout
+ type: float
+ level: advanced
+ desc: timeout in seconds after which a client request is retried due to cap acquisition
+ throttling
+ default: 0.5
+ services:
+ - mds
+# detecting freeze tree deadlock
+- name: mds_freeze_tree_timeout
+ type: float
+ level: dev
+ default: 30
+ services:
+ - mds
+ with_legacy: true
+# collapse N-client health metrics to a single 'many'
+- name: mds_health_summarize_threshold
+ type: int
+ level: advanced
+ desc: threshold of number of clients to summarize late client recall
+ default: 10
+ services:
+ - mds
+ with_legacy: true
+# seconds to wait for clients during mds restart
+# make it (mdsmap.session_timeout - mds_beacon_grace)
+- name: mds_reconnect_timeout
+ type: float
+ level: advanced
+ desc: timeout in seconds to wait for clients to reconnect during MDS reconnect recovery
+ state
+ default: 45
+ services:
+ - mds
+ with_legacy: true
+- name: mds_deny_all_reconnect
+ type: bool
+ level: advanced
+ desc: flag to deny all client reconnects during failover
+ default: false
+ services:
+ - mds
+ flags:
+ - runtime
+- name: mds_dir_prefetch
+ type: bool
+ level: advanced
+ desc: flag to prefetch entire dir
+ default: true
+ services:
+ - mds
+ flags:
+ - runtime
+- name: mds_tick_interval
+ type: float
+ level: advanced
+ desc: time in seconds between upkeep tasks
+ fmt_desc: How frequently the MDS performs internal periodic tasks.
+ default: 5
+ services:
+ - mds
+ with_legacy: true
+# try to avoid propagating more often than this
+- name: mds_dirstat_min_interval
+ type: float
+ level: dev
+ default: 1
+ services:
+ - mds
+ fmt_desc: The minimum interval (in seconds) to try to avoid propagating
+ recursive stats up the tree.
+ with_legacy: true
+# how quickly dirstat changes propagate up the hierarchy
+- name: mds_scatter_nudge_interval
+ type: float
+ level: advanced
+ desc: minimum interval between scatter lock updates
+ fmt_desc: How quickly dirstat changes propagate up.
+ default: 5
+ services:
+ - mds
+ with_legacy: true
+- name: mds_client_prealloc_inos
+ type: int
+ level: advanced
+ desc: number of unused inodes to pre-allocate to clients for file creation
+ fmt_desc: The number of inode numbers to preallocate per client session.
+ default: 1000
+ services:
+ - mds
+ with_legacy: true
+- name: mds_client_delegate_inos_pct
+ type: uint
+ level: advanced
+ desc: percentage of preallocated inos to delegate to client
+ default: 50
+ services:
+ - mds
+ flags:
+ - runtime
+- name: mds_early_reply
+ type: bool
+ level: advanced
+ desc: additional reply to clients that metadata requests are complete but not yet
+ durable
+ fmt_desc: Determines whether the MDS should allow clients to see request
+ results before they commit to the journal.
+ default: true
+ services:
+ - mds
+ with_legacy: true
+- name: mds_replay_unsafe_with_closed_session
+ type: bool
+ level: advanced
+ desc: complete all the replay request when mds is restarted, no matter the session
+ is closed or not
+ default: false
+ services:
+ - mds
+ flags:
+ - startup
+- name: mds_default_dir_hash
+ type: int
+ level: advanced
+ desc: hash function to select directory fragment for dentry name
+ fmt_desc: The function to use for hashing files across directory fragments.
+ # CEPH_STR_HASH_RJENKINS
+ default: 2
+ services:
+ - mds
+ with_legacy: true
+- name: mds_log_pause
+ type: bool
+ level: dev
+ default: false
+ services:
+ - mds
+ with_legacy: true
+- name: mds_log_skip_corrupt_events
+ type: bool
+ level: dev
+ default: false
+ services:
+ - mds
+ fmt_desc: Determines whether the MDS should try to skip corrupt journal
+ events during journal replay.
+ with_legacy: true
+- name: mds_log_max_events
+ type: int
+ level: advanced
+ desc: maximum number of events in the MDS journal (-1 is unlimited)
+ fmt_desc: The maximum events in the journal before we initiate trimming.
+ Set to ``-1`` to disable limits.
+ default: -1
+ services:
+ - mds
+ with_legacy: true
+- name: mds_log_events_per_segment
+ type: int
+ level: advanced
+ desc: maximum number of events in an MDS journal segment
+ default: 1024
+ services:
+ - mds
+ with_legacy: true
+# segment size for mds log, default to default file_layout_t
+- name: mds_log_segment_size
+ type: size
+ level: advanced
+ desc: size in bytes of each MDS log segment
+ default: 0
+ services:
+ - mds
+ with_legacy: true
+- name: mds_log_max_segments
+ type: uint
+ level: advanced
+ desc: maximum number of segments which may be untrimmed
+ fmt_desc: The maximum number of segments (objects) in the journal before
+ we initiate trimming. Set to ``-1`` to disable limits.
+ default: 128
+ services:
+ - mds
+ with_legacy: true
+- name: mds_log_warn_factor
+ type: float
+ level: advanced
+ desc: trigger MDS_HEALTH_TRIM warning when the mds log is longer than mds_log_max_segments
+ * mds_log_warn_factor
+ default: 2
+ services:
+ - mds
+ min: 1
+ flags:
+ - runtime
+- name: mds_bal_export_pin
+ type: bool
+ level: advanced
+ desc: allow setting directory export pins to particular ranks
+ default: true
+ services:
+ - mds
+ with_legacy: true
+- name: mds_export_ephemeral_random
+ type: bool
+ level: advanced
+ desc: allow ephemeral random pinning of the loaded subtrees
+ long_desc: probabilistically pin the loaded directory inode and the subtree beneath
+ it to an MDS based on the consistent hash of the inode number. The higher this
+ value the more likely the loaded subtrees get pinned
+ default: true
+ services:
+ - mds
+ flags:
+ - runtime
+- name: mds_export_ephemeral_random_max
+ type: float
+ level: advanced
+ desc: the maximum percent permitted for random ephemeral pin policy
+ default: 0.01
+ services:
+ - mds
+ see_also:
+ - mds_export_ephemeral_random
+ min: 0
+ max: 1
+ flags:
+ - runtime
+- name: mds_export_ephemeral_distributed
+ type: bool
+ level: advanced
+ desc: allow ephemeral distributed pinning of the loaded subtrees
+ long_desc: 'pin the immediate child directories of the loaded directory inode based
+ on the consistent hash of the child''s inode number. '
+ default: true
+ services:
+ - mds
+ flags:
+ - runtime
+- name: mds_export_ephemeral_distributed_factor
+ type: float
+ level: advanced
+ desc: multiple of max_mds for splitting and distributing directory
+ default: 2
+ services:
+ - mds
+ min: 1
+ max: 100
+ flags:
+ - runtime
+- name: mds_bal_sample_interval
+ type: float
+ level: advanced
+ desc: interval in seconds between balancer ticks
+ fmt_desc: Determines how frequently to sample directory temperature
+ (for fragmentation decisions).
+ default: 3
+ services:
+ - mds
+ with_legacy: true
+- name: mds_bal_replicate_threshold
+ type: float
+ level: advanced
+ desc: hot popularity threshold to replicate a subtree
+ fmt_desc: The minimum temperature before Ceph attempts to replicate
+ metadata to other nodes.
+ default: 8000
+ services:
+ - mds
+ with_legacy: true
+- name: mds_bal_unreplicate_threshold
+ type: float
+ level: advanced
+ desc: cold popularity threshold to merge subtrees
+ fmt_desc: The minimum temperature before Ceph stops replicating
+ metadata to other nodes.
+ default: 0
+ services:
+ - mds
+ with_legacy: true
+- name: mds_bal_split_size
+ type: int
+ level: advanced
+ desc: minimum size of directory fragment before splitting
+ fmt_desc: The maximum directory size before the MDS will split a directory
+ fragment into smaller bits.
+ default: 10000
+ services:
+ - mds
+ with_legacy: true
+- name: mds_bal_split_rd
+ type: float
+ level: advanced
+ desc: hot read popularity threshold for splitting a directory fragment
+ fmt_desc: The maximum directory read temperature before Ceph splits
+ a directory fragment.
+ default: 25000
+ services:
+ - mds
+ with_legacy: true
+- name: mds_bal_split_wr
+ type: float
+ level: advanced
+ desc: hot write popularity threshold for splitting a directory fragment
+ fmt_desc: The maximum directory write temperature before Ceph splits
+ a directory fragment.
+ default: 10000
+ services:
+ - mds
+ with_legacy: true
+- name: mds_bal_split_bits
+ type: int
+ level: advanced
+ desc: power of two child fragments for a fragment on split
+ fmt_desc: The number of bits by which to split a directory fragment.
+ default: 3
+ services:
+ - mds
+ min: 1
+ max: 24
+ with_legacy: true
+- name: mds_bal_merge_size
+ type: int
+ level: advanced
+ desc: size of fragments where merging should occur
+ fmt_desc: The minimum directory size before Ceph tries to merge
+ adjacent directory fragments.
+ default: 50
+ services:
+ - mds
+ with_legacy: true
+- name: mds_bal_interval
+ type: int
+ level: advanced
+ desc: interval between MDS balancer cycles
+ fmt_desc: The frequency (in seconds) of workload exchanges between MDSs.
+ default: 10
+ services:
+ - mds
+- name: mds_bal_fragment_interval
+ type: int
+ level: advanced
+ desc: delay in seconds before interrupting client IO to perform splits
+ fmt_desc: The delay (in seconds) between a fragment being eligible for split
+ or merge and executing the fragmentation change.
+ default: 5
+ services:
+ - mds
+# order of magnitude higher than split size
+- name: mds_bal_fragment_size_max
+ type: int
+ level: advanced
+ desc: maximum size of a directory fragment before new creat/links fail
+ fmt_desc: The maximum size of a fragment before any new entries
+ are rejected with ENOSPC.
+ default: 100000
+ services:
+ - mds
+ with_legacy: true
+# multiple of size_max that triggers immediate split
+- name: mds_bal_fragment_fast_factor
+ type: float
+ level: advanced
+ desc: ratio of mds_bal_split_size at which fast fragment splitting occurs
+ fmt_desc: The ratio by which frags may exceed the split size before
+ a split is executed immediately (skipping the fragment interval)
+ default: 1.5
+ services:
+ - mds
+ with_legacy: true
+- name: mds_bal_fragment_dirs
+ type: bool
+ level: advanced
+ desc: enable directory fragmentation
+ long_desc: Directory fragmentation is a standard feature of CephFS that allows sharding
+ directories across multiple objects for performance and stability. Additionally,
+ this allows fragments to be distributed across multiple active MDSs to increase
+ throughput. Disabling (new) fragmentation should only be done in exceptional circumstances
+ and may lead to performance issues.
+ default: true
+ services:
+ - mds
+- name: mds_bal_idle_threshold
+ type: float
+ level: advanced
+ desc: idle metadata popularity threshold before rebalancing
+ fmt_desc: The minimum temperature before Ceph migrates a subtree
+ back to its parent.
+ default: 0
+ services:
+ - mds
+ with_legacy: true
+- name: mds_bal_max
+ type: int
+ level: dev
+ default: -1
+ services:
+ - mds
+ fmt_desc: The number of iterations to run balancer before Ceph stops.
+ (used for testing purposes only)
+ with_legacy: true
+- name: mds_bal_max_until
+ type: int
+ level: dev
+ default: -1
+ services:
+ - mds
+ fmt_desc: The number of seconds to run balancer before Ceph stops.
+ (used for testing purposes only)
+ with_legacy: true
+- name: mds_bal_mode
+ type: int
+ level: dev
+ default: 0
+ services:
+ - mds
+ fmt_desc: |
+ The method for calculating MDS load.
+
+ - ``0`` = Hybrid.
+ - ``1`` = Request rate and latency.
+ - ``2`` = CPU load.
+ with_legacy: true
+# must be this much above average before we export anything
+- name: mds_bal_min_rebalance
+ type: float
+ level: dev
+ desc: amount overloaded over internal target before balancer begins offloading
+ fmt_desc: The minimum subtree temperature before Ceph migrates.
+ default: 0.1
+ services:
+ - mds
+ with_legacy: true
+# if we need less than this, we don't do anything
+- name: mds_bal_min_start
+ type: float
+ level: dev
+ default: 0.2
+ services:
+ - mds
+ fmt_desc: The minimum subtree temperature before Ceph searches a subtree.
+ with_legacy: true
+# take within this range of what we need
+- name: mds_bal_need_min
+ type: float
+ level: dev
+ default: 0.8
+ services:
+ - mds
+ fmt_desc: The minimum fraction of target subtree size to accept.
+ with_legacy: true
+- name: mds_bal_need_max
+ type: float
+ level: dev
+ default: 1.2
+ services:
+ - mds
+ fmt_desc: The maximum fraction of target subtree size to accept.
+ with_legacy: true
+# any sub bigger than this taken in full
+- name: mds_bal_midchunk
+ type: float
+ level: dev
+ default: 0.3
+ services:
+ - mds
+ fmt_desc: Ceph will migrate any subtree that is larger than this fraction
+ of the target subtree size.
+ with_legacy: true
+# never take anything smaller than this
+- name: mds_bal_minchunk
+ type: float
+ level: dev
+ default: 0.001
+ services:
+ - mds
+ fmt_desc: Ceph will ignore any subtree that is smaller than this fraction
+ of the target subtree size.
+ with_legacy: true
+# target decay half-life in MDSMap (2x larger is approx. 2x slower)
+- name: mds_bal_target_decay
+ type: float
+ level: advanced
+ desc: rate of decay for export targets communicated to clients
+ default: 10
+ services:
+ - mds
+ with_legacy: true
+- name: mds_oft_prefetch_dirfrags
+ type: bool
+ level: advanced
+ desc: prefetch dirfrags recorded in open file table on startup
+ default: false
+ services:
+ - mds
+ flags:
+ - startup
+# time to wait before starting replay again
+- name: mds_replay_interval
+ type: float
+ level: advanced
+ desc: time in seconds between replay of updates to journal by standby replay MDS
+ fmt_desc: The journal poll interval when in standby-replay mode.
+ ("hot standby")
+ default: 1
+ services:
+ - mds
+ with_legacy: true
+- name: mds_shutdown_check
+ type: int
+ level: dev
+ default: 0
+ services:
+ - mds
+ fmt_desc: The interval for polling the cache during MDS shutdown.
+ with_legacy: true
+- name: mds_thrash_exports
+ type: int
+ level: dev
+ default: 0
+ services:
+ - mds
+ fmt_desc: Ceph will randomly export subtrees between nodes (testing only).
+ with_legacy: true
+- name: mds_thrash_fragments
+ type: int
+ level: dev
+ default: 0
+ services:
+ - mds
+ fmt_desc: Ceph will randomly fragment or merge directories.
+ with_legacy: true
+- name: mds_dump_cache_on_map
+ type: bool
+ level: dev
+ default: false
+ services:
+ - mds
+ fmt_desc: Ceph will dump the MDS cache contents to a file on each MDSMap.
+ with_legacy: true
+- name: mds_dump_cache_after_rejoin
+ type: bool
+ level: dev
+ default: false
+ services:
+ - mds
+ fmt_desc: Ceph will dump MDS cache contents to a file after
+ rejoining the cache (during recovery).
+ with_legacy: true
+- name: mds_verify_scatter
+ type: bool
+ level: dev
+ default: false
+ services:
+ - mds
+ fmt_desc: Ceph will assert that various scatter/gather invariants
+ are ``true`` (developers only).
+ with_legacy: true
+- name: mds_debug_scatterstat
+ type: bool
+ level: dev
+ default: false
+ services:
+ - mds
+ fmt_desc: Ceph will assert that various recursive stat invariants
+ are ``true`` (for developers only).
+ with_legacy: true
+- name: mds_debug_frag
+ type: bool
+ level: dev
+ default: false
+ services:
+ - mds
+ fmt_desc: Ceph will verify directory fragmentation invariants
+ when convenient (developers only).
+ with_legacy: true
+- name: mds_debug_auth_pins
+ type: bool
+ level: dev
+ default: false
+ services:
+ - mds
+ fmt_desc: The debug auth pin invariants (for developers only).
+ with_legacy: true
+- name: mds_debug_subtrees
+ type: bool
+ level: dev
+ default: false
+ services:
+ - mds
+ fmt_desc: The debug subtree invariants (for developers only).
+ with_legacy: true
+- name: mds_abort_on_newly_corrupt_dentry
+ type: bool
+ level: advanced
+ default: true
+ services:
+ - mds
+ fmt_desc: MDS will abort if dentry is detected newly corrupted.
+- name: mds_go_bad_corrupt_dentry
+ type: bool
+ level: advanced
+ default: true
+ services:
+ - mds
+ fmt_desc: MDS will mark a corrupt dentry as bad and isolate
+ flags:
+ - runtime
+- name: mds_inject_rename_corrupt_dentry_first
+ type: float
+ level: dev
+ default: 0.0
+ services:
+ - mds
+ fmt_desc: probabilistically inject corrupt CDentry::first at rename
+ flags:
+ - runtime
+- name: mds_inject_journal_corrupt_dentry_first
+ type: float
+ level: dev
+ default: 0.0
+ services:
+ - mds
+ fmt_desc: probabilistically inject corrupt CDentry::first at journal load
+ flags:
+ - runtime
+- name: mds_kill_mdstable_at
+ type: int
+ level: dev
+ default: 0
+ services:
+ - mds
+ fmt_desc: Ceph will inject MDS failure in MDSTable code
+ (for developers only).
+ with_legacy: true
+- name: mds_max_export_size
+ type: size
+ level: dev
+ default: 20_M
+ services:
+ - mds
+- name: mds_kill_export_at
+ type: int
+ level: dev
+ default: 0
+ services:
+ - mds
+ fmt_desc: Ceph will inject MDS failure in the subtree export code
+ (for developers only).
+ with_legacy: true
+- name: mds_kill_import_at
+ type: int
+ level: dev
+ default: 0
+ services:
+ - mds
+ fmt_desc: Ceph will inject MDS failure in the subtree import code
+ (for developers only).
+ with_legacy: true
+- name: mds_kill_link_at
+ type: int
+ level: dev
+ default: 0
+ services:
+ - mds
+ fmt_desc: Ceph will inject MDS failure in hard link code
+ (for developers only).
+ with_legacy: true
+- name: mds_kill_rename_at
+ type: int
+ level: dev
+ default: 0
+ services:
+ - mds
+ fmt_desc: Ceph will inject MDS failure in the rename code
+ (for developers only).
+ with_legacy: true
+- name: mds_kill_openc_at
+ type: int
+ level: dev
+ default: 0
+ services:
+ - mds
+ with_legacy: true
+# XXX
+- name: mds_kill_journal_at
+ type: int
+ level: dev
+ default: 0
+ services:
+ - mds
+- name: mds_kill_journal_expire_at
+ type: int
+ level: dev
+ default: 0
+ services:
+ - mds
+ with_legacy: true
+- name: mds_kill_journal_replay_at
+ type: int
+ level: dev
+ default: 0
+ services:
+ - mds
+ with_legacy: true
+- name: mds_journal_format
+ type: uint
+ level: dev
+ default: 1
+ services:
+ - mds
+ with_legacy: true
+- name: mds_kill_create_at
+ type: int
+ level: dev
+ default: 0
+ services:
+ - mds
+ with_legacy: true
+- name: mds_inject_health_dummy
+ type: bool
+ level: dev
+ default: false
+ services:
+ - mds
+- name: mds_kill_skip_replaying_inotable
+ type: bool
+ level: dev
+ default: false
+ services:
+ - mds
+ fmt_desc: Ceph will skip replaying the inotable when replaying the journal, and
+ the premary MDS will crash, while the replacing MDS won't.
+ (for testing only).
+ with_legacy: true
+- name: mds_inject_skip_replaying_inotable
+ type: bool
+ level: dev
+ default: false
+ services:
+ - mds
+ fmt_desc: Ceph will skip replaying the inotable when replaying the journal, and
+ the premary MDS will crash, while the replacing MDS won't.
+ (for testing only).
+ with_legacy: true
+# percentage of MDS modify replies to skip sending the client a trace on [0-1]
+- name: mds_inject_traceless_reply_probability
+ type: float
+ level: dev
+ default: 0
+ services:
+ - mds
+ with_legacy: true
+- name: mds_wipe_sessions
+ type: bool
+ level: dev
+ default: false
+ services:
+ - mds
+ fmt_desc: Ceph will delete all client sessions on startup
+ (for testing only).
+ with_legacy: true
+- name: mds_wipe_ino_prealloc
+ type: bool
+ level: dev
+ default: false
+ services:
+ - mds
+ fmt_desc: Ceph will delete ino preallocation metadata on startup
+ (for testing only).
+ with_legacy: true
+- name: mds_skip_ino
+ type: int
+ level: dev
+ default: 0
+ services:
+ - mds
+ fmt_desc: The number of inode numbers to skip on startup
+ (for testing only).
+ with_legacy: true
+- name: mds_enable_op_tracker
+ type: bool
+ level: advanced
+ desc: track remote operation progression and statistics
+ default: true
+ services:
+ - mds
+ with_legacy: true
+# Max number of completed ops to track
+- name: mds_op_history_size
+ type: uint
+ level: advanced
+ desc: maximum size for list of historical operations
+ default: 20
+ services:
+ - mds
+ with_legacy: true
+# Oldest completed op to track
+- name: mds_op_history_duration
+ type: uint
+ level: advanced
+ desc: expiration time in seconds of historical operations
+ default: 600
+ services:
+ - mds
+ with_legacy: true
+# how many seconds old makes an op complaint-worthy
+- name: mds_op_complaint_time
+ type: float
+ level: advanced
+ desc: time in seconds to consider an operation blocked after no updates
+ default: 30
+ services:
+ - mds
+ with_legacy: true
+# how many op log messages to show in one go
+- name: mds_op_log_threshold
+ type: int
+ level: dev
+ default: 5
+ services:
+ - mds
+ with_legacy: true
+- name: mds_snap_min_uid
+ type: uint
+ level: advanced
+ desc: minimum uid of client to perform snapshots
+ default: 0
+ services:
+ - mds
+ with_legacy: true
+- name: mds_snap_max_uid
+ type: uint
+ level: advanced
+ desc: maximum uid of client to perform snapshots
+ default: 4294967294
+ services:
+ - mds
+ with_legacy: true
+- name: mds_snap_rstat
+ type: bool
+ level: advanced
+ desc: enabled nested rstat for snapshots
+ default: false
+ services:
+ - mds
+ with_legacy: true
+- name: mds_verify_backtrace
+ type: uint
+ level: dev
+ default: 1
+ services:
+ - mds
+ with_legacy: true
+# detect clients which aren't trimming completed requests
+- name: mds_max_completed_flushes
+ type: uint
+ level: dev
+ default: 100000
+ services:
+ - mds
+ with_legacy: true
+- name: mds_max_completed_requests
+ type: uint
+ level: dev
+ default: 100000
+ services:
+ - mds
+ with_legacy: true
+- name: mds_action_on_write_error
+ type: uint
+ level: advanced
+ desc: action to take when MDS cannot write to RADOS (0:ignore, 1:read-only, 2:suicide)
+ default: 1
+ services:
+ - mds
+ with_legacy: true
+- name: mds_mon_shutdown_timeout
+ type: float
+ level: advanced
+ desc: time to wait for mon to receive damaged MDS rank notification
+ default: 5
+ services:
+ - mds
+ with_legacy: true
+# Maximum number of concurrent stray files to purge
+- name: mds_max_purge_files
+ type: uint
+ level: advanced
+ desc: maximum number of deleted files to purge in parallel
+ default: 64
+ services:
+ - mds
+ with_legacy: true
+# Maximum number of concurrent RADOS ops to issue in purging
+- name: mds_max_purge_ops
+ type: uint
+ level: advanced
+ desc: maximum number of purge operations performed in parallel
+ default: 8_K
+ services:
+ - mds
+ with_legacy: true
+# Maximum number of concurrent RADOS ops to issue in purging, scaled by PG count
+- name: mds_max_purge_ops_per_pg
+ type: float
+ level: advanced
+ desc: number of parallel purge operations performed per PG
+ default: 0.5
+ services:
+ - mds
+ with_legacy: true
+- name: mds_purge_queue_busy_flush_period
+ type: float
+ level: dev
+ default: 1
+ services:
+ - mds
+ with_legacy: true
+- name: mds_root_ino_uid
+ type: int
+ level: advanced
+ desc: default uid for new root directory
+ default: 0
+ services:
+ - mds
+ with_legacy: true
+- name: mds_root_ino_gid
+ type: int
+ level: advanced
+ desc: default gid for new root directory
+ default: 0
+ services:
+ - mds
+ with_legacy: true
+- name: mds_max_scrub_ops_in_progress
+ type: int
+ level: advanced
+ desc: maximum number of scrub operations performed in parallel
+ default: 5
+ services:
+ - mds
+ with_legacy: true
+- name: mds_forward_all_requests_to_auth
+ type: bool
+ level: advanced
+ desc: always process op on auth mds
+ default: false
+ services:
+ - mds
+ flags:
+ - runtime
+# Maximum number of damaged frags/dentries before whole MDS rank goes damaged
+- name: mds_damage_table_max_entries
+ type: int
+ level: advanced
+ desc: maximum number of damage table entries
+ default: 10000
+ services:
+ - mds
+ with_legacy: true
+# Maximum increment for client writable range, counted by number of objects
+- name: mds_client_writeable_range_max_inc_objs
+ type: uint
+ level: advanced
+ desc: maximum number of objects in writeable range of a file for a client
+ default: 1_K
+ services:
+ - mds
+ with_legacy: true
+- name: mds_min_caps_per_client
+ type: uint
+ level: advanced
+ desc: minimum number of capabilities a client may hold
+ default: 100
+ services:
+ - mds
+- name: mds_min_caps_working_set
+ type: uint
+ level: advanced
+ desc: number of capabilities a client may hold without cache pressure warnings generated
+ default: 10000
+ services:
+ - mds
+ flags:
+ - runtime
+- name: mds_max_caps_per_client
+ type: uint
+ level: advanced
+ desc: maximum number of capabilities a client may hold
+ default: 1_M
+ services:
+ - mds
+- name: mds_hack_allow_loading_invalid_metadata
+ type: bool
+ level: advanced
+ desc: INTENTIONALLY CAUSE DATA LOSS by bypasing checks for invalid metadata on disk.
+ Allows testing repair tools.
+ default: false
+ services:
+ - mds
+- name: mds_defer_session_stale
+ type: bool
+ level: dev
+ default: true
+ services:
+ - mds
+- name: mds_inject_migrator_session_race
+ type: bool
+ level: dev
+ default: false
+ services:
+ - mds
+- name: mds_request_load_average_decay_rate
+ type: float
+ level: advanced
+ desc: rate of decay in seconds for calculating request load average
+ default: 1_min
+ services:
+ - mds
+- name: mds_cap_revoke_eviction_timeout
+ type: float
+ level: advanced
+ desc: number of seconds after which clients which have not responded to cap revoke
+ messages by the MDS are evicted.
+ default: 0
+ services:
+ - mds
+- name: mds_dump_cache_threshold_formatter
+ type: size
+ level: dev
+ desc: threshold for cache usage to disallow "dump cache" operation to formatter
+ long_desc: Disallow MDS from dumping caches to formatter via "dump cache" command
+ if cache usage exceeds this threshold.
+ default: 1_G
+ services:
+ - mds
+- name: mds_dump_cache_threshold_file
+ type: size
+ level: dev
+ desc: threshold for cache usage to disallow "dump cache" operation to file
+ long_desc: Disallow MDS from dumping caches to file via "dump cache" command if
+ cache usage exceeds this threshold.
+ default: 0
+ services:
+ - mds
+- name: mds_task_status_update_interval
+ type: float
+ level: dev
+ desc: task status update interval to manager
+ long_desc: interval (in seconds) for sending mds task status to ceph manager
+ default: 2
+ services:
+ - mds
+- name: mds_max_snaps_per_dir
+ type: uint
+ level: advanced
+ desc: max snapshots per directory
+ long_desc: maximum number of snapshots that can be created per directory
+ default: 100
+ services:
+ - mds
+ min: 0
+ max: 4_K
+ flags:
+ - runtime
+- name: mds_asio_thread_count
+ type: uint
+ level: advanced
+ desc: Size of thread pool for ASIO completions
+ default: 2
+ tags:
+ - mds
+ services:
+ - mds
+ min: 1
+- name: mds_ping_grace
+ type: secs
+ level: advanced
+ desc: timeout after which an MDS is considered laggy by rank 0 MDS.
+ long_desc: timeout for replying to a ping message sent by rank 0 after which an
+ active MDS considered laggy (delayed metrics) by rank 0.
+ default: 15
+ services:
+ - mds
+ flags:
+ - runtime
+- name: mds_ping_interval
+ type: secs
+ level: advanced
+ desc: interval in seconds for sending ping messages to active MDSs.
+ long_desc: interval in seconds for rank 0 to send ping messages to all active MDSs.
+ default: 5
+ services:
+ - mds
+ flags:
+ - runtime
+- name: mds_metrics_update_interval
+ type: secs
+ level: advanced
+ desc: interval in seconds for metrics data update.
+ long_desc: interval in seconds after which active MDSs send client metrics data
+ to rank 0.
+ default: 2
+ services:
+ - mds
+ flags:
+ - runtime
+- name: mds_dir_max_entries
+ type: uint
+ level: advanced
+ desc: maximum number of entries per directory before new creat/links fail
+ long_desc: The maximum number of entries before any new entries
+ are rejected with ENOSPC.
+ default: 0
+ services:
+ - mds
+ flags:
+ - runtime
+- name: mds_sleep_rank_change
+ type: float
+ level: dev
+ default: 0.0
+ flags:
+ - runtime
+- name: mds_connect_bootstrapping
+ type: bool
+ level: dev
+ default: false
+ flags:
+ - runtime
+- name: mds_symlink_recovery
+ type: bool
+ level: advanced
+ desc: Stores symlink target on the first data object of symlink file.
+ Allows recover of symlink using recovery tools.
+ default: true
+ services:
+ - mds
+ flags:
+ - runtime
+- name: mds_extraordinary_events_dump_interval
+ type: secs
+ level: advanced
+ desc: Interval in seconds for dumping the recent in-memory logs when there is an extra-ordinary event.
+ long_desc: Interval in seconds for dumping the recent in-memory logs when there is an extra-ordinary
+ event. The default is ``0`` (disabled). The log level should be ``< 10`` and the gather level
+ should be ``>=10`` in debug_mds for enabling this option.
+ default: 0
+ min: 0
+ max: 60
+ services:
+ - mds
+ flags:
+ - runtime
+- name: mds_session_metadata_threshold
+ type: size
+ level: advanced
+ desc: Evict non-advancing client-tid sessions exceeding the config size.
+ long_desc: Evict clients which are not advancing their request tids which causes a large buildup of session metadata (`completed_requests`) in the MDS causing the MDS to go read-only since the RADOS operation exceeds the size threashold. This config is the maximum size (in bytes) that a session metadata (encoded) can grow.
+ default: 16_M
+ services:
+ - mds
+ flags:
+ - runtime
diff --git a/src/common/options/mgr.yaml.in b/src/common/options/mgr.yaml.in
new file mode 100644
index 000000000..7d7b68035
--- /dev/null
+++ b/src/common/options/mgr.yaml.in
@@ -0,0 +1,362 @@
+# -*- mode: YAML -*-
+---
+
+options:
+- name: mgr_data
+ type: str
+ level: advanced
+ desc: Filesystem path to the ceph-mgr data directory, used to contain keyring.
+ fmt_desc: Path to load daemon data (such as keyring)
+ default: /var/lib/ceph/mgr/$cluster-$id
+ services:
+ - mgr
+ flags:
+ - no_mon_update
+- name: mgr_pool
+ type: bool
+ level: dev
+ desc: Allow use/creation of .mgr pool.
+ default: true
+ services:
+ - mgr
+ flags:
+ - startup
+- name: mgr_stats_period
+ type: int
+ level: basic
+ desc: Period in seconds of OSD/MDS stats reports to manager
+ long_desc: Use this setting to control the granularity of time series data collection
+ from daemons. Adjust upwards if the manager CPU load is too high, or if you simply
+ do not require the most up to date performance counter data.
+ default: 5
+ services:
+ - mgr
+ - common
+- name: mgr_client_bytes
+ type: size
+ level: dev
+ default: 128_M
+ services:
+ - mgr
+- name: mgr_client_messages
+ type: uint
+ level: dev
+ default: 512
+ services:
+ - mgr
+- name: mgr_osd_bytes
+ type: size
+ level: dev
+ default: 512_M
+ services:
+ - mgr
+- name: mgr_osd_messages
+ type: uint
+ level: dev
+ default: 8_K
+ services:
+ - mgr
+- name: mgr_mds_bytes
+ type: size
+ level: dev
+ default: 128_M
+ services:
+ - mgr
+- name: mgr_mds_messages
+ type: uint
+ level: dev
+ default: 128
+ services:
+ - mgr
+- name: mgr_mon_bytes
+ type: size
+ level: dev
+ default: 128_M
+ services:
+ - mgr
+- name: mgr_mon_messages
+ type: uint
+ level: dev
+ default: 128
+ services:
+ - mgr
+- name: mgr_service_beacon_grace
+ type: float
+ level: advanced
+ desc: Period in seconds from last beacon to manager dropping state about a monitored
+ service (RGW, rbd-mirror etc)
+ default: 1_min
+ services:
+ - mgr
+- name: mgr_debug_aggressive_pg_num_changes
+ type: bool
+ level: dev
+ desc: Bypass most throttling and safety checks in pg[p]_num controller
+ default: false
+ services:
+ - mgr
+- name: mgr_max_pg_num_change
+ type: int
+ level: advanced
+ desc: maximum change in pg_num
+ default: 128
+ services:
+ - mgr
+ with_legacy: true
+- name: mgr_module_path
+ type: str
+ level: advanced
+ desc: Filesystem path to manager modules.
+ fmt_desc: Path to load modules from
+ default: @CEPH_INSTALL_DATADIR@/mgr
+ services:
+ - mgr
+- name: mgr_standby_modules
+ type: bool
+ default: true
+ level: advanced
+ desc: Start modules in standby (redirect) mode when mgr is standby
+ long_desc: By default, the standby modules will answer incoming requests with a
+ HTTP redirect to the active manager, allowing users to point their browser at any
+ mgr node and find their way to an active mgr. However, this mode is problematic
+ when using a load balancer because (1) the redirect locations are usually private
+ IPs and (2) the load balancer can't identify which mgr is the right one to send
+ traffic to. If a load balancer is being used, set this to false.
+- name: mgr_disabled_modules
+ type: str
+ level: advanced
+ desc: List of manager modules never get loaded
+ long_desc: A comma delimited list of module names. This list is read by manager
+ when it starts. By default, manager loads all modules found in specified 'mgr_module_path',
+ and it starts the enabled ones as instructed. The modules in this list will not
+ be loaded at all.
+ default: @mgr_disabled_modules@
+ services:
+ - mgr
+ see_also:
+ - mgr_module_path
+ flags:
+ - startup
+- name: mgr_initial_modules
+ type: str
+ level: basic
+ desc: List of manager modules to enable when the cluster is first started
+ long_desc: This list of module names is read by the monitor when the cluster is
+ first started after installation, to populate the list of enabled manager modules. Subsequent
+ updates are done using the 'mgr module [enable|disable]' commands. List may be
+ comma or space separated.
+ default: restful iostat nfs
+ services:
+ - mon
+ - common
+ flags:
+ - no_mon_update
+ - cluster_create
+- name: cephadm_path
+ type: str
+ level: advanced
+ desc: Path to cephadm utility
+ default: /usr/sbin/cephadm
+ services:
+ - mgr
+- name: mon_delta_reset_interval
+ type: float
+ level: advanced
+ desc: window duration for rate calculations in 'ceph status'
+ fmt_desc: Seconds of inactivity before we reset the PG delta to 0. We keep
+ track of the delta of the used space of each pool, so, for
+ example, it would be easier for us to understand the progress of
+ recovery or the performance of cache tier. But if there's no
+ activity reported for a certain pool, we just reset the history of
+ deltas of that pool.
+ default: 10
+ services:
+ - mgr
+ with_legacy: true
+- name: mon_stat_smooth_intervals
+ type: uint
+ level: advanced
+ desc: number of PGMaps stats over which we calc the average read/write throughput
+ of the whole cluster
+ fmt_desc: Ceph will smooth statistics over the last ``N`` PG maps.
+ default: 6
+ services:
+ - mgr
+ min: 1
+- name: mon_pool_quota_warn_threshold
+ type: int
+ level: advanced
+ desc: percent of quota at which to issue warnings
+ default: 0
+ services:
+ - mgr
+- name: mon_pool_quota_crit_threshold
+ type: int
+ level: advanced
+ desc: percent of quota at which to issue errors
+ default: 0
+ services:
+ - mgr
+- name: mon_cache_target_full_warn_ratio
+ type: float
+ level: advanced
+ desc: issue CACHE_POOL_NEAR_FULL health warning when cache pool utilization exceeds
+ this ratio of usable space
+ fmt_desc: Position between pool's ``cache_target_full`` and ``target_max_object``
+ where we start warning
+ default: 0.66
+ services:
+ - mgr
+ flags:
+ - no_mon_update
+ - cluster_create
+ with_legacy: true
+- name: mon_pg_check_down_all_threshold
+ type: float
+ level: advanced
+ desc: threshold of down osds after which we check all pgs
+ fmt_desc: Percentage threshold of ``down`` OSDs above which we check all PGs
+ for stale ones.
+ default: 0.5
+ services:
+ - mgr
+ with_legacy: true
+- name: mon_pg_stuck_threshold
+ type: int
+ level: advanced
+ desc: number of seconds after which pgs can be considered stuck inactive, unclean,
+ etc
+ long_desc: see doc/control.rst under dump_stuck for more info
+ fmt_desc: Number of seconds after which PGs can be considered as
+ being stuck.
+ default: 1_min
+ services:
+ - mgr
+- name: mon_pg_warn_min_per_osd
+ type: uint
+ level: advanced
+ desc: minimal number PGs per (in) osd before we warn the admin
+ fmt_desc: Raise ``HEALTH_WARN`` if the average number
+ of PGs per ``in`` OSD is under this number. A non-positive number
+ disables this.
+ default: 0
+ services:
+ - mgr
+- name: mon_pg_warn_max_object_skew
+ type: float
+ level: advanced
+ desc: max skew few average in objects per pg
+ fmt_desc: Raise ``HEALTH_WARN`` if the average RADOS object count per PG
+ of any pool is greater than ``mon_pg_warn_max_object_skew`` times
+ the average RADOS object count per PG of all pools. Zero or a non-positive
+ number disables this. Note that this option applies to ``ceph-mgr`` daemons.
+ default: 10
+ services:
+ - mgr
+- name: mon_pg_warn_min_objects
+ type: int
+ level: advanced
+ desc: 'do not warn below this object #'
+ fmt_desc: Do not warn if the total number of RADOS objects in cluster is below
+ this number
+ default: 10000
+ services:
+ - mgr
+- name: mon_pg_warn_min_pool_objects
+ type: int
+ level: advanced
+ desc: 'do not warn on pools below this object #'
+ fmt_desc: Do not warn on pools whose RADOS object count is below this number
+ default: 1000
+ services:
+ - mgr
+- name: mon_warn_on_misplaced
+ type: bool
+ level: advanced
+ desc: Issue a health warning if there are misplaced objects
+ default: false
+ services:
+ - mgr
+ with_legacy: true
+- name: mon_warn_on_pool_no_app
+ type: bool
+ level: dev
+ desc: issue POOL_APP_NOT_ENABLED health warning if pool has not application enabled
+ default: true
+ services:
+ - mgr
+- name: mon_warn_on_too_few_osds
+ type: bool
+ level: advanced
+ desc: Issue a health warning if there are fewer OSDs than osd_pool_default_size
+ default: true
+ services:
+ - mgr
+- name: mon_target_pg_per_osd
+ type: uint
+ level: advanced
+ desc: Automated PG management creates this many PGs per OSD
+ long_desc: When creating pools, the automated PG management logic will attempt to
+ reach this target. In some circumstances, it may exceed this target, up to the
+ ``mon_max_pg_per_osd`` limit. Conversely, a lower number of PGs per OSD may be
+ created if the cluster is not yet fully utilised
+ default: 100
+ min: 1
+# min pgs per osd for reweight-by-pg command
+- name: mon_reweight_min_pgs_per_osd
+ type: uint
+ level: advanced
+ default: 10
+ services:
+ - mgr
+ with_legacy: true
+# min bytes per osd for reweight-by-utilization command
+- name: mon_reweight_min_bytes_per_osd
+ type: size
+ level: advanced
+ default: 100_M
+ services:
+ - mgr
+ with_legacy: true
+# max osds to change per reweight-by-* command
+- name: mon_reweight_max_osds
+ type: int
+ level: advanced
+ default: 4
+ services:
+ - mgr
+ with_legacy: true
+- name: mon_reweight_max_change
+ type: float
+ level: advanced
+ default: 0.05
+ services:
+ - mgr
+ with_legacy: true
+- name: mgr_stats_threshold
+ type: int
+ level: advanced
+ desc: Lowest perfcounter priority collected by mgr
+ long_desc: Daemons only set perf counter data to the manager daemon if the counter
+ has a priority higher than this.
+ default: 5
+ min: 0
+ max: 11
+- name: mgr_tick_period
+ type: secs
+ level: advanced
+ desc: Period in seconds of beacon messages to monitor
+ fmt_desc: How many seconds between mgr beacons to monitors, and other
+ periodic checks.
+ default: 2
+ services:
+ - mgr
+ - mon
+- name: mon_osd_err_op_age_ratio
+ type: float
+ level: advanced
+ desc: issue REQUEST_STUCK health error if OSD ops are slower than is age (seconds)
+ default: 128
+ services:
+ - mgr
+ with_legacy: true
diff --git a/src/common/options/mon.yaml.in b/src/common/options/mon.yaml.in
new file mode 100644
index 000000000..1cd655ad4
--- /dev/null
+++ b/src/common/options/mon.yaml.in
@@ -0,0 +1,1340 @@
+# -*- mode: YAML -*-
+---
+
+options:
+- name: osd_crush_update_weight_set
+ type: bool
+ level: advanced
+ desc: update CRUSH weight-set weights when updating weights
+ long_desc: If this setting is true, we will update the weight-set weights when adjusting
+ an item's weight, effectively making changes take effect immediately, and discarding
+ any previous optimization in the weight-set value. Setting this value to false
+ will leave it to the balancer to (slowly, presumably) adjust weights to approach
+ the new target value.
+ default: true
+ with_legacy: true
+- name: osd_pool_erasure_code_stripe_unit
+ type: size
+ level: advanced
+ desc: the amount of data (in bytes) in a data chunk, per stripe
+ fmt_desc: Sets the default size, in bytes, of a chunk of an object
+ stripe for erasure coded pools. Every object of size S
+ will be stored as N stripes, with each data chunk
+ receiving ``stripe unit`` bytes. Each stripe of ``N *
+ stripe unit`` bytes will be encoded/decoded
+ individually. This option can is overridden by the
+ ``stripe_unit`` setting in an erasure code profile.
+ default: 4_K
+ services:
+ - mon
+- name: osd_pool_default_crimson
+ type: bool
+ level: advanced
+ desc: Create pools by default with FLAG_CRIMSON
+ default: false
+ services :
+ - mon
+ flags:
+ - runtime
+- name: mon_max_pool_pg_num
+ type: uint
+ level: advanced
+ default: 64_K
+ fmt_desc: The maximum number of placement groups per pool.
+- name: mon_mgr_digest_period
+ type: int
+ level: dev
+ desc: Period in seconds between monitor-to-manager health/status updates
+ default: 5
+ services:
+ - mon
+- name: mon_down_mkfs_grace
+ type: secs
+ level: advanced
+ desc: Period in seconds that the cluster may have a mon down after cluster creation
+ default: 1_min
+ services:
+ - mon
+- name: mon_mgr_beacon_grace
+ type: secs
+ level: advanced
+ desc: Period in seconds from last beacon to monitor marking a manager daemon as
+ failed
+ default: 30
+ services:
+ - mon
+- name: mon_mgr_inactive_grace
+ type: int
+ level: advanced
+ desc: Period in seconds after cluster creation during which cluster may have no
+ active manager
+ long_desc: This grace period enables the cluster to come up cleanly without raising
+ spurious health check failures about managers that aren't online yet
+ default: 1_min
+ services:
+ - mon
+- name: mon_mgr_mkfs_grace
+ type: int
+ level: advanced
+ desc: Period in seconds that the cluster may have no active manager before this
+ is reported as an ERR rather than a WARN
+ default: 2_min
+ services:
+ - mon
+- name: mon_mgr_proxy_client_bytes_ratio
+ type: float
+ level: dev
+ desc: ratio of mon_client_bytes that can be consumed by proxied mgr commands before
+ we error out to client
+ default: 0.3
+ services:
+ - mon
+- name: mon_cluster_log_to_stderr
+ type: bool
+ level: advanced
+ desc: Make monitor send cluster log messages to stderr (prefixed by channel)
+ default: false
+ services:
+ - mon
+ see_also:
+ - log_stderr_prefix
+ flags:
+ - runtime
+ with_legacy: true
+- name: mon_cluster_log_to_syslog
+ type: str
+ level: advanced
+ desc: Make monitor send cluster log messages to syslog
+ fmt_desc: Determines if the cluster log should be output to the syslog.
+ default: default=false
+ services:
+ - mon
+ flags:
+ - runtime
+ with_legacy: true
+- name: mon_cluster_log_to_syslog_level
+ type: str
+ level: advanced
+ desc: Syslog level for cluster log messages
+ default: info
+ services:
+ - mon
+ see_also:
+ - mon_cluster_log_to_syslog
+ flags:
+ - runtime
+ with_legacy: true
+- name: mon_cluster_log_to_syslog_facility
+ type: str
+ level: advanced
+ desc: Syslog facility for cluster log messages
+ default: daemon
+ services:
+ - mon
+ see_also:
+ - mon_cluster_log_to_syslog
+ flags:
+ - runtime
+ with_legacy: true
+- name: mon_cluster_log_to_file
+ type: bool
+ level: advanced
+ desc: Make monitor send cluster log messages to file
+ default: true
+ services:
+ - mon
+ see_also:
+ - mon_cluster_log_file
+ flags:
+ - runtime
+ with_legacy: true
+- name: mon_cluster_log_file
+ type: str
+ level: advanced
+ desc: File(s) to write cluster log to
+ long_desc: This can either be a simple file name to receive all messages, or a list
+ of key/value pairs where the key is the log channel and the value is the filename,
+ which may include $cluster and $channel metavariables
+ fmt_desc: |
+ The locations of the cluster's log files. There are two channels in
+ Ceph: ``cluster`` and ``audit``. This option represents a mapping
+ from channels to log files, where the log entries of that
+ channel are sent to. The ``default`` entry is a fallback
+ mapping for channels not explicitly specified. So, the following
+ default setting will send cluster log to ``$cluster.log``, and
+ send audit log to ``$cluster.audit.log``, where ``$cluster`` will
+ be replaced with the actual cluster name.
+ default: default=/var/log/ceph/$cluster.$channel.log cluster=/var/log/ceph/$cluster.log
+ services:
+ - mon
+ see_also:
+ - mon_cluster_log_to_file
+ flags:
+ - runtime
+ with_legacy: true
+- name: mon_cluster_log_file_level
+ type: str
+ level: advanced
+ desc: Lowest level to include is cluster log file
+ default: debug
+ services:
+ - mon
+ see_also:
+ - mon_cluster_log_file
+ flags:
+ - runtime
+ with_legacy: true
+- name: mon_cluster_log_to_graylog
+ type: str
+ level: advanced
+ desc: Make monitor send cluster log to graylog
+ default: 'false'
+ services:
+ - mon
+ flags:
+ - runtime
+ with_legacy: true
+- name: mon_cluster_log_to_graylog_host
+ type: str
+ level: advanced
+ desc: Graylog host for cluster log messages
+ default: 127.0.0.1
+ services:
+ - mon
+ see_also:
+ - mon_cluster_log_to_graylog
+ flags:
+ - runtime
+ with_legacy: true
+- name: mon_cluster_log_to_graylog_port
+ type: str
+ level: advanced
+ desc: Graylog port for cluster log messages
+ default: '12201'
+ services:
+ - mon
+ see_also:
+ - mon_cluster_log_to_graylog
+ flags:
+ - runtime
+ with_legacy: true
+- name: mon_cluster_log_to_journald
+ type: str
+ level: advanced
+ desc: Make monitor send cluster log to journald
+ default: 'false'
+ services:
+ - mon
+ flags:
+ - runtime
+- name: mon_log_max
+ type: uint
+ level: advanced
+ desc: number of recent cluster log messages to retain
+ default: 10000
+ services:
+ - mon
+ with_legacy: true
+- name: mon_log_max_summary
+ type: uint
+ level: advanced
+ desc: number of recent cluster log messages to dedup against
+ default: 50
+ services:
+ - mon
+ with_legacy: true
+- name: mon_log_full_interval
+ type: uint
+ level: advanced
+ desc: how many epochs before we encode a full copy of recent log keys
+ default: 50
+ services: [mon]
+ with_legacy: true
+- name: mon_max_log_entries_per_event
+ type: int
+ level: advanced
+ desc: max cluster log entries per paxos event
+ fmt_desc: The maximum number of log entries per event.
+ default: 4096
+ services:
+ - mon
+ with_legacy: true
+- name: mon_health_to_clog
+ type: bool
+ level: advanced
+ desc: log monitor health to cluster log
+ fmt_desc: Enable sending a health summary to the cluster log periodically.
+ default: true
+ services:
+ - mon
+ with_legacy: true
+- name: mon_health_to_clog_interval
+ type: int
+ level: advanced
+ desc: frequency to log monitor health to cluster log
+ fmt_desc: How often (in seconds) the monitor sends a health summary to the cluster
+ log (a non-positive number disables). Monitors will always
+ send a summary to the cluster log whether or not it differs from
+ the previous summary.
+ default: 10_min
+ services:
+ - mon
+ see_also:
+ - mon_health_to_clog
+ with_legacy: true
+- name: mon_health_to_clog_tick_interval
+ type: float
+ level: dev
+ fmt_desc: How often (in seconds) the monitor sends a health summary to the cluster
+ log (a non-positive number disables). If current health summary
+ is empty or identical to the last time, monitor will not send it
+ to cluster log.
+ default: 1_min
+ services:
+ - mon
+ with_legacy: true
+- name: mon_health_detail_to_clog
+ type: bool
+ level: dev
+ desc: log health detail to cluster log
+ default: true
+ with_legacy: true
+- name: mon_warn_on_filestore_osds
+ type: bool
+ level: dev
+ desc: log health warn for filestore OSDs
+ default: true
+ with_legacy: true
+- name: mon_health_max_detail
+ type: uint
+ level: advanced
+ desc: max detailed pgs to report in health detail
+ default: 50
+ services:
+ - mon
+- name: mon_health_log_update_period
+ type: int
+ level: dev
+ desc: minimum time in seconds between log messages about each health check
+ default: 5
+ services:
+ - mon
+ min: 0
+- name: mon_data_avail_crit
+ type: int
+ level: advanced
+ desc: issue MON_DISK_CRIT health error when mon available space below this percentage
+ fmt_desc: Raise ``HEALTH_ERR`` status when the filesystem that houses a
+ monitor's data store reports that its available capacity is
+ less than or equal to this percentage.
+ default: 5
+ services:
+ - mon
+ with_legacy: true
+- name: mon_data_avail_warn
+ type: int
+ level: advanced
+ desc: issue MON_DISK_LOW health warning when mon available space below this percentage
+ fmt_desc: Raise ``HEALTH_WARN`` status when the filesystem that houses a
+ monitor's data store reports that its available capacity is
+ less than or equal to this percentage .
+ default: 30
+ services:
+ - mon
+ with_legacy: true
+- name: mon_data_size_warn
+ type: size
+ level: advanced
+ desc: issue MON_DISK_BIG health warning when mon database is above this size
+ fmt_desc: Raise ``HEALTH_WARN`` status when a monitor's data
+ store grows to be larger than this size, 15GB by default.
+ default: 15_G
+ services:
+ - mon
+ with_legacy: true
+- name: mon_daemon_bytes
+ type: size
+ level: advanced
+ desc: max bytes of outstanding mon messages mon will read off the network
+ fmt_desc: The message memory cap for metadata server and OSD messages (in bytes).
+ default: 400_M
+ services:
+ - mon
+ with_legacy: true
+- name: mon_election_timeout
+ type: float
+ level: advanced
+ desc: maximum time for a mon election (seconds)
+ fmt_desc: On election proposer, maximum waiting time for all ACKs in seconds.
+ default: 5
+ services:
+ - mon
+ with_legacy: true
+- name: mon_election_default_strategy
+ type: uint
+ level: advanced
+ desc: The election strategy to set when constructing the first monmap.
+ default: 1
+ min: 1
+ max: 3
+- name: mon_lease
+ type: float
+ level: advanced
+ desc: lease interval between quorum monitors (seconds)
+ long_desc: This setting controls how sensitive your mon quorum is to intermittent
+ network issues or other failures.
+ fmt_desc: The length (in seconds) of the lease on the monitor's versions.
+ default: 5
+ services:
+ - mon
+ with_legacy: true
+- name: mon_lease_renew_interval_factor
+ type: float
+ level: advanced
+ desc: multiple of mon_lease for the lease renewal interval
+ long_desc: Leases must be renewed before they time out. A smaller value means frequent
+ renewals, while a value close to 1 makes a lease expiration more likely.
+ fmt_desc: |
+ ``mon_lease`` \* ``mon_lease_renew_interval_factor`` will be the
+ interval for the Leader to renew the other monitor's leases. The
+ factor should be less than ``1.0``.
+ default: 0.6
+ services:
+ - mon
+ see_also:
+ - mon_lease
+ min: 0
+ max: 0.9999999
+ with_legacy: true
+- name: mon_lease_ack_timeout_factor
+ type: float
+ level: advanced
+ desc: multiple of mon_lease for the lease ack interval before calling new election
+ fmt_desc: The Leader will wait ``mon_lease`` \* ``mon_lease_ack_timeout_factor``
+ for the Providers to acknowledge the lease extension.
+ default: 2
+ services:
+ - mon
+ see_also:
+ - mon_lease
+ min: 1.0001
+ max: 100
+ with_legacy: true
+- name: mon_accept_timeout_factor
+ type: float
+ level: advanced
+ desc: multiple of mon_lease for follower mons to accept proposed state changes before
+ calling a new election
+ fmt_desc: The Leader will wait ``mon_lease`` \* ``mon_accept_timeout_factor``
+ for the Requester(s) to accept a Paxos update. It is also used
+ during the Paxos recovery phase for similar purposes.
+ default: 2
+ services:
+ - mon
+ see_also:
+ - mon_lease
+ with_legacy: true
+- name: mon_elector_ping_timeout
+ type: float
+ level: advanced
+ desc: The time after which a ping 'times out' and a connection is considered down
+ default: 2
+ services:
+ - mon
+ see_also:
+ - mon_elector_ping_divisor
+- name: mon_elector_ping_divisor
+ type: uint
+ level: advanced
+ desc: We will send a ping up to this many times per timeout per
+ default: 2
+ services:
+ - mon
+ see_also:
+ - mon_elector_ping_timeout
+- name: mon_con_tracker_persist_interval
+ type: uint
+ level: advanced
+ desc: how many updates the ConnectionTracker takes before it persists to disk
+ default: 10
+ services:
+ - mon
+ min: 1
+ max: 100000
+- name: mon_con_tracker_score_halflife
+ type: uint
+ level: advanced
+ desc: The 'halflife' used when updating/calculating peer connection scores
+ default: 43200
+ services:
+ - mon
+ min: 60
+- name: mon_elector_ignore_propose_margin
+ type: float
+ level: advanced
+ desc: The difference in connection score allowed before a peon stops ignoring out-of-quorum
+ PROPOSEs
+ default: 0.0005
+ services:
+ - mon
+- name: mon_warn_on_cache_pools_without_hit_sets
+ type: bool
+ level: advanced
+ desc: issue CACHE_POOL_NO_HIT_SET health warning for cache pools that do not have
+ hit sets configured
+ fmt_desc: Raise ``HEALTH_WARN`` when a cache pool does not have the ``hit_set_type``
+ value configured. See :ref:`hit_set_type <hit_set_type>` for more details.
+ default: true
+ services:
+ - mon
+ with_legacy: true
+- name: mon_warn_on_pool_pg_num_not_power_of_two
+ type: bool
+ level: dev
+ desc: issue POOL_PG_NUM_NOT_POWER_OF_TWO warning if pool has a non-power-of-two
+ pg_num value
+ default: true
+ services:
+ - mon
+- name: mon_allow_pool_size_one
+ type: bool
+ level: advanced
+ desc: allow configuring pool with no replicas
+ default: false
+ services:
+ - mon
+- name: mon_warn_on_crush_straw_calc_version_zero
+ type: bool
+ level: advanced
+ desc: issue OLD_CRUSH_STRAW_CALC_VERSION health warning if the CRUSH map's straw_calc_version
+ is zero
+ fmt_desc: Raise ``HEALTH_WARN`` when the CRUSH ``straw_calc_version`` is zero. See
+ :ref:`CRUSH map tunables <crush-map-tunables>` for details.
+ default: true
+ services:
+ - mon
+ with_legacy: true
+- name: mon_warn_on_pool_no_redundancy
+ type: bool
+ level: advanced
+ desc: Issue a health warning if any pool is configured with no replicas
+ fmt_desc: Raise ``HEALTH_WARN`` if any pool is configured with no replicas.
+ default: true
+ services:
+ - mon
+ see_also:
+ - osd_pool_default_size
+ - osd_pool_default_min_size
+- name: mon_warn_on_osd_down_out_interval_zero
+ type: bool
+ level: advanced
+ desc: issue OSD_NO_DOWN_OUT_INTERVAL health warning if mon_osd_down_out_interval
+ is zero
+ long_desc: Having mon_osd_down_out_interval set to 0 means that down OSDs are not
+ marked out automatically and the cluster does not heal itself without administrator
+ intervention.
+ fmt_desc: Raise ``HEALTH_WARN`` when ``mon_osd_down_out_interval`` is zero. Having this
+ option set to zero on the leader acts much like the ``noout`` flag. It's hard to figure
+ out what's going wrong with clusters without the ``noout`` flag set but acting like that
+ just the same, so we report a warning in this case.
+ default: true
+ services:
+ - mon
+ see_also:
+ - mon_osd_down_out_interval
+ with_legacy: true
+- name: mon_warn_on_legacy_crush_tunables
+ type: bool
+ level: advanced
+ desc: issue OLD_CRUSH_TUNABLES health warning if CRUSH tunables are older than mon_crush_min_required_version
+ fmt_desc: Raise ``HEALTH_WARN`` when CRUSH tunables are too old (older than ``mon_min_crush_required_version``)
+ default: true
+ services:
+ - mon
+ see_also:
+ - mon_crush_min_required_version
+ with_legacy: true
+- name: mon_crush_min_required_version
+ type: str
+ level: advanced
+ desc: minimum ceph release to use for mon_warn_on_legacy_crush_tunables
+ fmt_desc: The minimum tunable profile required by the cluster. See
+ :ref:`CRUSH map tunables <crush-map-tunables>` for details.
+ default: hammer
+ services:
+ - mon
+ see_also:
+ - mon_warn_on_legacy_crush_tunables
+ with_legacy: true
+- name: mon_warn_on_degraded_stretch_mode
+ type: bool
+ level: advanced
+ desc: Issue a health warning if we are in degraded stretch mode
+ default: true
+ services:
+ - mon
+- name: mon_stretch_cluster_recovery_ratio
+ type: float
+ level: advanced
+ desc: the ratio of up OSDs at which a degraded stretch cluster enters recovery
+ default: 0.6
+ services:
+ - mon
+ min: 0.51
+ max: 1
+- name: mon_stretch_recovery_min_wait
+ type: float
+ level: advanced
+ desc: how long the monitors wait before considering fully-healthy PGs as evidence
+ the stretch mode is repaired
+ default: 15
+ services:
+ - mon
+ min: 1
+- name: mon_stretch_pool_size
+ type: uint
+ level: dev
+ default: 4
+ services:
+ - mon
+ min: 3
+ max: 6
+- name: mon_stretch_pool_min_size
+ type: uint
+ level: dev
+ default: 2
+ services:
+ - mon
+ min: 2
+ max: 4
+- name: mon_clock_drift_allowed
+ type: float
+ level: advanced
+ desc: allowed clock drift (in seconds) between mons before issuing a health warning
+ default: 0.05
+ services:
+ - mon
+ with_legacy: true
+# exponential backoff for clock drift warnings
+- name: mon_clock_drift_warn_backoff
+ type: float
+ level: advanced
+ desc: exponential backoff factor for logging clock drift warnings in the cluster
+ log
+ default: 5
+ services:
+ - mon
+ with_legacy: true
+# on leader, timecheck (clock drift check) interval (seconds)
+- name: mon_timecheck_interval
+ type: float
+ level: advanced
+ desc: frequency of clock synchronization checks between monitors (seconds)
+ fmt_desc: The time check interval (clock drift check) in seconds
+ for the Leader.
+ default: 5_min
+ services:
+ - mon
+ with_legacy: true
+# on leader, timecheck (clock drift check) interval when in presence of a skew (seconds)
+- name: mon_timecheck_skew_interval
+ type: float
+ level: advanced
+ desc: frequency of clock synchronization (re)checks between monitors while clocks
+ are believed to be skewed (seconds)
+ fmt_desc: The time check interval (clock drift check) in seconds when in
+ presence of a skew in seconds for the Leader.
+ default: 30
+ services:
+ - mon
+ see_also:
+ - mon_timecheck_interval
+ with_legacy: true
+# how often (in commits) to stash a full copy of the PaxosService state
+- name: paxos_stash_full_interval
+ type: int
+ level: advanced
+ default: 25
+ services:
+ - mon
+ fmt_desc: How often (in commits) to stash a full copy of the PaxosService state.
+ Current this setting only affects ``mds``, ``mon``, ``auth`` and ``mgr``
+ PaxosServices.
+ with_legacy: true
+# max paxos iterations before we must first sync the monitor stores
+- name: paxos_max_join_drift
+ type: int
+ level: advanced
+ default: 10
+ services:
+ - mon
+ fmt_desc: The maximum Paxos iterations before we must first sync the
+ monitor data stores. When a monitor finds that its peer is too
+ far ahead of it, it will first sync with data stores before moving
+ on.
+ with_legacy: true
+# gather updates for this long before proposing a map update
+- name: paxos_propose_interval
+ type: float
+ level: advanced
+ default: 1
+ services:
+ - mon
+ fmt_desc: Gather updates for this time interval before proposing
+ a map update.
+ with_legacy: true
+# min time to gather updates for after period of inactivity
+- name: paxos_min_wait
+ type: float
+ level: advanced
+ default: 0.05
+ services:
+ - mon
+ fmt_desc: The minimum amount of time to gather updates after a period of
+ inactivity.
+ with_legacy: true
+# minimum number of paxos states to keep around
+- name: paxos_min
+ type: int
+ level: advanced
+ default: 500
+ services:
+ - mon
+ fmt_desc: The minimum number of Paxos states to keep around
+ with_legacy: true
+# number of extra proposals tolerated before trimming
+- name: paxos_trim_min
+ type: int
+ level: advanced
+ default: 250
+ services:
+ - mon
+ fmt_desc: Number of extra proposals tolerated before trimming
+ with_legacy: true
+# maximum amount of versions to trim during a single proposal (0 disables it)
+- name: paxos_trim_max
+ type: int
+ level: advanced
+ default: 500
+ services:
+ - mon
+ fmt_desc: The maximum number of extra proposals to trim at a time
+ with_legacy: true
+# minimum amount of versions to trigger a trim (0 disables it)
+- name: paxos_service_trim_min
+ type: uint
+ level: advanced
+ default: 250
+ services:
+ - mon
+ fmt_desc: The minimum amount of versions to trigger a trim (0 disables it)
+ with_legacy: true
+# maximum amount of versions to trim during a single proposal (0 disables it)
+- name: paxos_service_trim_max
+ type: uint
+ level: advanced
+ default: 500
+ services:
+ - mon
+ fmt_desc: The maximum amount of versions to trim during a single proposal (0 disables it)
+ with_legacy: true
+- name: paxos_service_trim_max_multiplier
+ type: uint
+ level: advanced
+ desc: factor by which paxos_service_trim_max will be multiplied to get a new upper
+ bound when trim sizes are high (0 disables it)
+ default: 20
+ services:
+ - mon
+ min: 0
+ flags:
+ - runtime
+- name: paxos_kill_at
+ type: int
+ level: dev
+ default: 0
+ services:
+ - mon
+ with_legacy: true
+- name: mon_auth_validate_all_caps
+ type: bool
+ level: advanced
+ desc: Whether to parse non-monitor capabilities set by the 'ceph auth ...' commands.
+ Disabling this saves CPU on the monitor, but allows invalid capabilities to be
+ set, and only be rejected later, when they are used.
+ default: true
+ services:
+ - mon
+ flags:
+ - runtime
+# force mon to trim mdsmaps to this point (dangerous)
+- name: mon_mds_force_trim_to
+ type: int
+ level: dev
+ desc: force mons to trim mdsmaps/fsmaps up to this epoch
+ fmt_desc: Force monitor to trim mdsmaps up to but not including this FSMap
+ epoch. A value of 0 disables (the default) this config. This command is
+ potentially dangerous, use with care.
+ default: 0
+ services:
+ - mon
+ with_legacy: true
+- name: mds_beacon_mon_down_grace
+ type: secs
+ level: advanced
+ desc: tolerance in seconds for missed MDS beacons to monitors
+ fmt_desc: The interval without beacons before Ceph declares an MDS laggy
+ when a monitor is down.
+ default: 1_min
+# skip safety assertions on FSMap (in case of bugs where we want to continue anyway)
+- name: mon_mds_skip_sanity
+ type: bool
+ level: advanced
+ desc: skip sanity checks on fsmap/mdsmap
+ fmt_desc: Skip safety assertions on FSMap (in case of bugs where we want to
+ continue anyway). Monitor terminates if the FSMap sanity check
+ fails, but we can disable it by enabling this option.
+ default: false
+ services:
+ - mon
+ with_legacy: true
+- name: mon_mds_blocklist_interval
+ type: float
+ level: dev
+ desc: Duration in seconds that blocklist entries for MDS daemons remain in the OSD
+ map
+ fmt_desc: The blocklist duration for failed MDSs in the OSD map. Note,
+ this controls how long failed MDS daemons will stay in the
+ OSDMap blocklist. It has no effect on how long something is
+ blocklisted when the administrator blocklists it manually. For
+ example, ``ceph osd blocklist add`` will still use the default
+ blocklist time.
+ default: 1_day
+ services:
+ - mon
+ min: 1_hr
+ flags:
+ - runtime
+- name: mon_mgr_blocklist_interval
+ type: float
+ level: dev
+ desc: Duration in seconds that blocklist entries for mgr daemons remain in the OSD
+ map
+ default: 1_day
+ services:
+ - mon
+ min: 1_hr
+ flags:
+ - runtime
+- name: mon_osd_laggy_halflife
+ type: int
+ level: advanced
+ desc: halflife of OSD 'lagginess' factor
+ fmt_desc: The number of seconds laggy estimates will decay.
+ default: 1_hr
+ services:
+ - mon
+ with_legacy: true
+- name: mon_osd_laggy_weight
+ type: float
+ level: advanced
+ desc: how heavily to weight OSD marking itself back up in overall laggy_probability
+ long_desc: 1.0 means that an OSD marking itself back up (because it was marked down
+ but not actually dead) means a 100% laggy_probability; 0.0 effectively disables
+ tracking of laggy_probability.
+ fmt_desc: The weight for new samples in laggy estimation decay.
+ default: 0.3
+ services:
+ - mon
+ min: 0
+ max: 1
+ with_legacy: true
+- name: mon_osd_laggy_max_interval
+ type: int
+ level: advanced
+ desc: cap value for period for OSD to be marked for laggy_interval calculation
+ fmt_desc: Maximum value of ``laggy_interval`` in laggy estimations (in seconds).
+ Monitor uses an adaptive approach to evaluate the ``laggy_interval`` of
+ a certain OSD. This value will be used to calculate the grace time for
+ that OSD.
+ default: 5_min
+ services:
+ - mon
+ with_legacy: true
+- name: mon_osd_adjust_heartbeat_grace
+ type: bool
+ level: advanced
+ desc: increase OSD heartbeat grace if peers appear to be laggy
+ long_desc: If an OSD is marked down but then marks itself back up, it implies it
+ wasn't actually down but was unable to respond to heartbeats. If this option
+ is true, we can use the laggy_probability and laggy_interval values calculated
+ to model this situation to increase the heartbeat grace period for this OSD so
+ that it isn't marked down again. laggy_probability is an estimated probability
+ that the given OSD is down because it is laggy (not actually down), and laggy_interval
+ is an estiate on how long it stays down when it is laggy.
+ fmt_desc: If set to ``true``, Ceph will scale based on laggy estimations.
+ default: true
+ services:
+ - mon
+ see_also:
+ - mon_osd_laggy_halflife
+ - mon_osd_laggy_weight
+ - mon_osd_laggy_max_interval
+ with_legacy: true
+- name: mon_osd_adjust_down_out_interval
+ type: bool
+ level: advanced
+ desc: increase the mon_osd_down_out_interval if an OSD appears to be laggy
+ fmt_desc: If set to ``true``, Ceph will scaled based on laggy estimations.
+ default: true
+ services:
+ - mon
+ see_also:
+ - mon_osd_adjust_heartbeat_grace
+ with_legacy: true
+- name: mon_osd_auto_mark_in
+ type: bool
+ level: advanced
+ desc: mark any OSD that comes up 'in'
+ fmt_desc: Ceph will mark any booting Ceph OSD Daemons as ``in``
+ the Ceph Storage Cluster.
+ default: false
+ services:
+ - mon
+ with_legacy: true
+- name: mon_osd_auto_mark_auto_out_in
+ type: bool
+ level: advanced
+ desc: mark any OSD that comes up that was automatically marked 'out' back 'in'
+ fmt_desc: Ceph will mark booting Ceph OSD Daemons auto marked ``out``
+ of the Ceph Storage Cluster as ``in`` the cluster.
+ default: true
+ services:
+ - mon
+ see_also:
+ - mon_osd_down_out_interval
+ with_legacy: true
+- name: mon_osd_auto_mark_new_in
+ type: bool
+ level: advanced
+ desc: mark any new OSD that comes up 'in'
+ fmt_desc: Ceph will mark booting new Ceph OSD Daemons as ``in`` the
+ Ceph Storage Cluster.
+ default: true
+ services:
+ - mon
+ with_legacy: true
+- name: mon_osd_destroyed_out_interval
+ type: int
+ level: advanced
+ desc: mark any OSD 'out' that has been 'destroy'ed for this long (seconds)
+ default: 10_min
+ services:
+ - mon
+ with_legacy: true
+- name: mon_osd_down_out_interval
+ type: int
+ level: advanced
+ desc: mark any OSD 'out' that has been 'down' for this long (seconds)
+ fmt_desc: The number of seconds Ceph waits before marking a Ceph OSD Daemon
+ ``down`` and ``out`` if it doesn't respond.
+ default: 10_min
+ services:
+ - mon
+ with_legacy: true
+- name: mon_osd_down_out_subtree_limit
+ type: str
+ level: advanced
+ desc: do not automatically mark OSDs 'out' if an entire subtree of this size is
+ down
+ fmt_desc: The smallest :term:`CRUSH` unit type that Ceph will **not**
+ automatically mark out. For instance, if set to ``host`` and if
+ all OSDs of a host are down, Ceph will not automatically mark out
+ these OSDs.
+ default: rack
+ services:
+ - mon
+ see_also:
+ - mon_osd_down_out_interval
+ flags:
+ - runtime
+- name: mon_osd_min_up_ratio
+ type: float
+ level: advanced
+ desc: do not automatically mark OSDs 'out' if fewer than this many OSDs are 'up'
+ fmt_desc: The minimum ratio of ``up`` Ceph OSD Daemons before Ceph will
+ mark Ceph OSD Daemons ``down``.
+ default: 0.3
+ services:
+ - mon
+ see_also:
+ - mon_osd_down_out_interval
+ with_legacy: true
+- name: mon_osd_min_in_ratio
+ type: float
+ level: advanced
+ desc: do not automatically mark OSDs 'out' if fewer than this many OSDs are 'in'
+ fmt_desc: The minimum ratio of ``in`` Ceph OSD Daemons before Ceph will
+ mark Ceph OSD Daemons ``out``.
+ default: 0.75
+ services:
+ - mon
+ see_also:
+ - mon_osd_down_out_interval
+ with_legacy: true
+- name: mon_osd_warn_op_age
+ type: float
+ level: advanced
+ desc: issue REQUEST_SLOW health warning if OSD ops are slower than this age (seconds)
+ default: 32
+ services:
+ - mgr
+ with_legacy: true
+- name: mon_osd_warn_num_repaired
+ type: uint
+ level: advanced
+ desc: issue OSD_TOO_MANY_REPAIRS health warning if an OSD has more than this many
+ read repairs
+ default: 10
+ services:
+ - mon
+- name: mon_osd_prime_pg_temp
+ type: bool
+ level: dev
+ desc: minimize peering work by priming pg_temp values after a map change
+ fmt_desc: Enables or disables priming the PGMap with the previous OSDs when an ``out``
+ OSD comes back into the cluster. With the ``true`` setting, clients
+ will continue to use the previous OSDs until the newly ``in`` OSDs for
+ a PG have peered.
+ default: true
+ services:
+ - mon
+ with_legacy: true
+- name: mon_osd_prime_pg_temp_max_time
+ type: float
+ level: dev
+ desc: maximum time to spend precalculating PG mappings on map change (seconds)
+ fmt_desc: How much time in seconds the monitor should spend trying to prime the
+ PGMap when an out OSD comes back into the cluster.
+ default: 0.5
+ services:
+ - mon
+ with_legacy: true
+- name: mon_osd_prime_pg_temp_max_estimate
+ type: float
+ level: advanced
+ desc: calculate all PG mappings if estimated fraction of PGs that change is above
+ this amount
+ fmt_desc: Maximum estimate of time spent on each PG before we prime all PGs
+ in parallel.
+ default: 0.25
+ services:
+ - mon
+ with_legacy: true
+- name: mon_osd_blocklist_default_expire
+ type: float
+ level: advanced
+ desc: Duration in seconds that blocklist entries for clients remain in the OSD map
+ default: 1_hr
+ services:
+ - mon
+ with_legacy: true
+- name: mon_osd_crush_smoke_test
+ type: bool
+ level: advanced
+ desc: perform a smoke test on any new CRUSH map before accepting changes
+ default: true
+ services:
+ - mon
+ with_legacy: true
+- name: mon_smart_report_timeout
+ type: uint
+ level: advanced
+ desc: Timeout (in seconds) for smartctl to run, default is set to 5
+ default: 5
+ services:
+ - mon
+- name: mon_warn_on_older_version
+ type: bool
+ level: advanced
+ desc: issue DAEMON_OLD_VERSION health warning if daemons are not all running the
+ same version
+ default: true
+ services:
+ - mon
+- name: mon_warn_older_version_delay
+ type: secs
+ level: advanced
+ desc: issue DAEMON_OLD_VERSION health warning after this amount of time has elapsed
+ default: 7_day
+ services:
+ - mon
+- name: mon_data
+ type: str
+ level: advanced
+ desc: path to mon database
+ fmt_desc: The monitor's data location.
+ default: /var/lib/ceph/mon/$cluster-$id
+ services:
+ - mon
+ flags:
+ - no_mon_update
+ with_legacy: true
+- name: mon_rocksdb_options
+ type: str
+ level: advanced
+ default: write_buffer_size=33554432,compression=kNoCompression,level_compaction_dynamic_level_bytes=true
+ with_legacy: true
+- name: mon_enable_op_tracker
+ type: bool
+ level: advanced
+ desc: enable/disable MON op tracking
+ default: true
+ services:
+ - mon
+# compact leveldb on ceph-mon start
+- name: mon_compact_on_start
+ type: bool
+ level: advanced
+ default: false
+ services:
+ - mon
+ fmt_desc: Compact the database used as Ceph Monitor store on
+ ``ceph-mon`` start. A manual compaction helps to shrink the
+ monitor database and improve the performance of it if the regular
+ compaction fails to work.
+ with_legacy: true
+# trigger leveldb compaction on bootstrap
+- name: mon_compact_on_bootstrap
+ type: bool
+ level: advanced
+ default: false
+ services:
+ - mon
+ fmt_desc: Compact the database used as Ceph Monitor store
+ on bootstrap. Monitors probe each other to establish
+ a quorum after bootstrap. If a monitor times out before joining the
+ quorum, it will start over and bootstrap again.
+ with_legacy: true
+# compact (a prefix) when we trim old states
+- name: mon_compact_on_trim
+ type: bool
+ level: advanced
+ default: true
+ services:
+ - mon
+ fmt_desc: Compact a certain prefix (including paxos) when we trim its old states.
+ with_legacy: true
+- name: mon_op_complaint_time
+ type: secs
+ level: advanced
+ desc: time after which to consider a monitor operation blocked after no updates
+ default: 30
+ services:
+ - mon
+- name: mon_op_log_threshold
+ type: int
+ level: advanced
+ desc: max number of slow ops to display
+ default: 5
+ services:
+ - mon
+- name: mon_op_history_size
+ type: uint
+ level: advanced
+ desc: max number of completed ops to track
+ default: 20
+ services:
+ - mon
+- name: mon_op_history_duration
+ type: secs
+ level: advanced
+ desc: expiration time in seconds of historical MON OPS
+ default: 10_min
+ services:
+ - mon
+- name: mon_op_history_slow_op_size
+ type: uint
+ level: advanced
+ desc: max number of slow historical MON OPS to keep
+ default: 20
+ services:
+ - mon
+- name: mon_op_history_slow_op_threshold
+ type: secs
+ level: advanced
+ desc: duration of an op to be considered as a historical slow op
+ default: 10
+ services:
+ - mon
+- name: mon_osdmap_full_prune_enabled
+ type: bool
+ level: advanced
+ desc: enables pruning full osdmap versions when we go over a given number of maps
+ default: true
+ services:
+ - mon
+ see_also:
+ - mon_osdmap_full_prune_min
+ - mon_osdmap_full_prune_interval
+ - mon_osdmap_full_prune_txsize
+- name: mon_osdmap_full_prune_min
+ type: uint
+ level: advanced
+ desc: minimum number of versions in the store to trigger full map pruning
+ default: 10000
+ services:
+ - mon
+ see_also:
+ - mon_osdmap_full_prune_enabled
+ - mon_osdmap_full_prune_interval
+ - mon_osdmap_full_prune_txsize
+- name: mon_osdmap_full_prune_interval
+ type: uint
+ level: advanced
+ desc: interval between maps that will not be pruned; maps in the middle will be
+ pruned.
+ default: 10
+ services:
+ - mon
+ see_also:
+ - mon_osdmap_full_prune_enabled
+ - mon_osdmap_full_prune_interval
+ - mon_osdmap_full_prune_txsize
+- name: mon_osdmap_full_prune_txsize
+ type: uint
+ level: advanced
+ desc: number of maps we will prune per iteration
+ default: 100
+ services:
+ - mon
+ see_also:
+ - mon_osdmap_full_prune_enabled
+ - mon_osdmap_full_prune_interval
+ - mon_osdmap_full_prune_txsize
+- name: mon_osd_cache_size
+ type: int
+ level: advanced
+ desc: maximum number of OSDMaps to cache in memory
+ fmt_desc: The size of osdmaps cache, not to rely on underlying store's cache
+ default: 500
+ services:
+ - mon
+ with_legacy: true
+- name: mon_osd_cache_size_min
+ type: size
+ level: advanced
+ desc: The minimum amount of bytes to be kept mapped in memory for osd monitor caches.
+ fmt_desc: The minimum amount of bytes to be kept mapped in memory for osd
+ monitor caches.
+ default: 128_M
+ services:
+ - mon
+ with_legacy: true
+- name: mon_osd_mapping_pgs_per_chunk
+ type: int
+ level: dev
+ desc: granularity of PG placement calculation background work
+ fmt_desc: We calculate the mapping from placement group to OSDs in chunks.
+ This option specifies the number of placement groups per chunk.
+ default: 4096
+ services:
+ - mon
+ with_legacy: true
+- name: mon_clean_pg_upmaps_per_chunk
+ type: uint
+ level: dev
+ desc: granularity of PG upmap validation background work
+ default: 256
+ services:
+ - mon
+ with_legacy: true
+- name: mon_osd_max_creating_pgs
+ type: int
+ level: advanced
+ desc: maximum number of PGs the mon will create at once
+ default: 1024
+ services:
+ - mon
+ with_legacy: true
+- name: mon_osd_max_initial_pgs
+ type: int
+ level: advanced
+ desc: maximum number of PGs a pool will created with
+ long_desc: If the user specifies more PGs than this, the cluster will subsequently
+ split PGs after the pool is created in order to reach the target.
+ default: 1024
+ services:
+ - mon
+- name: mon_memory_target
+ type: size
+ level: basic
+ desc: The amount of bytes pertaining to osd monitor caches and kv cache to be kept
+ mapped in memory with cache auto-tuning enabled
+ fmt_desc: The amount of bytes pertaining to OSD monitor caches and KV cache
+ to be kept mapped in memory with cache auto-tuning enabled.
+ default: 2_G
+ services:
+ - mon
+ flags:
+ - runtime
+ with_legacy: true
+- name: mon_memory_autotune
+ type: bool
+ level: basic
+ desc: Autotune the cache memory being used for osd monitors and kv database
+ fmt_desc: Autotune the cache memory used for OSD monitors and KV
+ database.
+ default: true
+ services:
+ - mon
+ flags:
+ - runtime
+ with_legacy: true
+- name: mon_cpu_threads
+ type: int
+ level: advanced
+ desc: worker threads for CPU intensive background work
+ fmt_desc: Number of threads for performing CPU intensive work on monitor.
+ default: 4
+ services:
+ - mon
+ with_legacy: true
+- name: mon_tick_interval
+ type: int
+ level: advanced
+ desc: interval for internal mon background checks
+ fmt_desc: A monitor's tick interval in seconds.
+ default: 5
+ services:
+ - mon
+ with_legacy: true
+- name: mon_session_timeout
+ type: int
+ level: advanced
+ desc: close inactive mon client connections after this many seconds
+ fmt_desc: Monitor will terminate inactive sessions stay idle over this
+ time limit.
+ default: 5_min
+ services:
+ - mon
+ with_legacy: true
+- name: mon_subscribe_interval
+ type: float
+ level: dev
+ desc: subscribe interval for pre-jewel clients
+ fmt_desc: The refresh interval (in seconds) for subscriptions. The
+ subscription mechanism enables obtaining cluster maps
+ and log information.
+ default: 1_day
+ services:
+ - mon
+ with_legacy: true
+- name: mon_use_min_delay_socket
+ type: bool
+ level: advanced
+ default: false
+ desc: priority packets between mons
+ with_legacy: true
+ see_also:
+ - osd_heartbeat_use_min_delay_socket
diff --git a/src/common/options/osd.yaml.in b/src/common/options/osd.yaml.in
new file mode 100644
index 000000000..7291ce11d
--- /dev/null
+++ b/src/common/options/osd.yaml.in
@@ -0,0 +1,1415 @@
+# -*- mode: YAML -*-
+---
+
+options:
+- name: osd_numa_prefer_iface
+ type: bool
+ level: advanced
+ desc: prefer IP on network interface on same numa node as storage
+ default: true
+ see_also:
+ - osd_numa_auto_affinity
+ flags:
+ - startup
+- name: osd_numa_auto_affinity
+ type: bool
+ level: advanced
+ desc: automatically set affinity to numa node when storage and network match
+ default: true
+ flags:
+ - startup
+- name: osd_numa_node
+ type: int
+ level: advanced
+ desc: set affinity to a numa node (-1 for none)
+ default: -1
+ see_also:
+ - osd_numa_auto_affinity
+ flags:
+ - startup
+- name: set_keepcaps
+ type: bool
+ level: advanced
+ desc: set the keepcaps flag before changing UID, preserving the permitted capability set
+ long_desc: When ceph switches from root to the ceph uid, all capabilities in all sets are eraseed. If
+ a component that is capability aware needs a specific capability, the keepcaps flag maintains
+ the permitted capability set, allowing the capabilities in the effective set to be activated as needed.
+ default: false
+ flags:
+ - startup
+- name: osd_smart_report_timeout
+ type: uint
+ level: advanced
+ desc: Timeout (in seconds) for smartctl to run, default is set to 5
+ default: 5
+# verify backend can support configured max object name length
+- name: osd_check_max_object_name_len_on_startup
+ type: bool
+ level: dev
+ default: true
+ with_legacy: true
+- name: osd_max_backfills
+ type: uint
+ level: advanced
+ desc: Maximum number of concurrent local and remote backfills or recoveries per
+ OSD
+ long_desc: There can be osd_max_backfills local reservations AND the same remote
+ reservations per OSD. So a value of 1 lets this OSD participate as 1 PG primary
+ in recovery and 1 shard of another recovering PG.
+ fmt_desc: The maximum number of backfills allowed to or from a single OSD.
+ Note that this is applied separately for read and write operations.
+ default: 1
+ flags:
+ - runtime
+ with_legacy: true
+# Minimum recovery priority (255 = max, smaller = lower)
+- name: osd_min_recovery_priority
+ type: int
+ level: advanced
+ desc: Minimum priority below which recovery is not performed
+ long_desc: The purpose here is to prevent the cluster from doing *any* lower priority
+ work (e.g., rebalancing) below this threshold and focus solely on higher priority
+ work (e.g., replicating degraded objects).
+ default: 0
+ with_legacy: true
+- name: osd_backfill_retry_interval
+ type: float
+ level: advanced
+ desc: how frequently to retry backfill reservations after being denied (e.g., due
+ to a full OSD)
+ fmt_desc: The number of seconds to wait before retrying backfill requests.
+ default: 30
+ with_legacy: true
+- name: osd_recovery_retry_interval
+ type: float
+ level: advanced
+ desc: how frequently to retry recovery reservations after being denied (e.g., due
+ to a full OSD)
+ default: 30
+ with_legacy: true
+- name: osd_recovery_sleep
+ type: float
+ level: advanced
+ desc: Time in seconds to sleep before next recovery or backfill op. This setting
+ overrides _ssd, _hdd, and _hybrid if non-zero.
+ fmt_desc: Time in seconds to sleep before the next recovery or backfill op.
+ Increasing this value will slow down recovery operation while
+ client operations will be less impacted.
+ default: 0
+ flags:
+ - runtime
+ with_legacy: true
+- name: osd_recovery_sleep_hdd
+ type: float
+ level: advanced
+ desc: Time in seconds to sleep before next recovery or backfill op for HDDs
+ fmt_desc: Time in seconds to sleep before next recovery or backfill op
+ for HDDs.
+ default: 0.1
+ flags:
+ - runtime
+ with_legacy: true
+- name: osd_recovery_sleep_ssd
+ type: float
+ level: advanced
+ desc: Time in seconds to sleep before next recovery or backfill op for SSDs
+ fmt_desc: Time in seconds to sleep before the next recovery or backfill op
+ for SSDs.
+ default: 0
+ see_also:
+ - osd_recovery_sleep
+ flags:
+ - runtime
+ with_legacy: true
+- name: osd_recovery_sleep_hybrid
+ type: float
+ level: advanced
+ desc: Time in seconds to sleep before next recovery or backfill op when data is
+ on HDD and journal is on SSD
+ fmt_desc: Time in seconds to sleep before the next recovery or backfill op
+ when OSD data is on HDD and OSD journal / WAL+DB is on SSD.
+ default: 0.025
+ see_also:
+ - osd_recovery_sleep
+ flags:
+ - runtime
+- name: osd_snap_trim_sleep
+ type: float
+ level: advanced
+ desc: Time in seconds to sleep before next snap trim. This setting overrides _ssd,
+ _hdd, and _hybrid if non-zero.
+ fmt_desc: Time in seconds to sleep before next snap trim op.
+ Increasing this value will slow down snap trimming.
+ This option overrides backend specific variants.
+ default: 0
+ flags:
+ - runtime
+ with_legacy: true
+- name: osd_snap_trim_sleep_hdd
+ type: float
+ level: advanced
+ desc: Time in seconds to sleep before next snap trim for HDDs
+ default: 5
+ flags:
+ - runtime
+- name: osd_snap_trim_sleep_ssd
+ type: float
+ level: advanced
+ desc: Time in seconds to sleep before next snap trim for SSDs
+ fmt_desc: Time in seconds to sleep before next snap trim op
+ for SSD OSDs (including NVMe).
+ default: 0
+ flags:
+ - runtime
+- name: osd_snap_trim_sleep_hybrid
+ type: float
+ level: advanced
+ desc: Time in seconds to sleep before next snap trim when data is on HDD and journal
+ is on SSD
+ fmt_desc: Time in seconds to sleep before next snap trim op
+ when OSD data is on an HDD and the OSD journal or WAL+DB is on an SSD.
+ default: 2
+ flags:
+ - runtime
+- name: osd_scrub_invalid_stats
+ type: bool
+ level: advanced
+ default: true
+ with_legacy: true
+- name: osd_max_scrubs
+ type: int
+ level: advanced
+ desc: Maximum concurrent scrubs on a single OSD
+ fmt_desc: The maximum number of simultaneous scrub operations for
+ a Ceph OSD Daemon.
+ default: 1
+ with_legacy: true
+- name: osd_scrub_during_recovery
+ type: bool
+ level: advanced
+ desc: Allow scrubbing when PGs on the OSD are undergoing recovery
+ fmt_desc: Allow scrub during recovery. Setting this to ``false`` will disable
+ scheduling new scrub (and deep--scrub) while there is active recovery.
+ Already running scrubs will be continued. This might be useful to reduce
+ load on busy clusters.
+ default: false
+ with_legacy: true
+- name: osd_repair_during_recovery
+ type: bool
+ level: advanced
+ desc: Allow requested repairing when PGs on the OSD are undergoing recovery
+ default: false
+ with_legacy: true
+- name: osd_scrub_begin_hour
+ type: int
+ level: advanced
+ desc: Restrict scrubbing to this hour of the day or later
+ long_desc: Use osd_scrub_begin_hour=0 and osd_scrub_end_hour=0 for the entire day.
+ fmt_desc: This restricts scrubbing to this hour of the day or later.
+ Use ``osd_scrub_begin_hour = 0`` and ``osd_scrub_end_hour = 0``
+ to allow scrubbing the entire day. Along with ``osd_scrub_end_hour``, they define a time
+ window, in which the scrubs can happen.
+ But a scrub will be performed
+ no matter whether the time window allows or not, as long as the placement
+ group's scrub interval exceeds ``osd_scrub_max_interval``.
+ default: 0
+ see_also:
+ - osd_scrub_end_hour
+ min: 0
+ max: 23
+ with_legacy: true
+- name: osd_scrub_end_hour
+ type: int
+ level: advanced
+ desc: Restrict scrubbing to hours of the day earlier than this
+ long_desc: Use osd_scrub_begin_hour=0 and osd_scrub_end_hour=0 for the entire day.
+ fmt_desc: This restricts scrubbing to the hour earlier than this.
+ Use ``osd_scrub_begin_hour = 0`` and ``osd_scrub_end_hour = 0`` to allow scrubbing
+ for the entire day. Along with ``osd_scrub_begin_hour``, they define a time
+ window, in which the scrubs can happen. But a scrub will be performed
+ no matter whether the time window allows or not, as long as the placement
+ group's scrub interval exceeds ``osd_scrub_max_interval``.
+ default: 0
+ see_also:
+ - osd_scrub_begin_hour
+ min: 0
+ max: 23
+ with_legacy: true
+- name: osd_scrub_begin_week_day
+ type: int
+ level: advanced
+ desc: Restrict scrubbing to this day of the week or later
+ long_desc: 0 = Sunday, 1 = Monday, etc. Use osd_scrub_begin_week_day=0 osd_scrub_end_week_day=0
+ for the entire week.
+ fmt_desc: This restricts scrubbing to this day of the week or later.
+ 0 = Sunday, 1 = Monday, etc. Use ``osd_scrub_begin_week_day = 0``
+ and ``osd_scrub_end_week_day = 0`` to allow scrubbing for the entire week.
+ Along with ``osd_scrub_end_week_day``, they define a time window in which
+ scrubs can happen. But a scrub will be performed
+ no matter whether the time window allows or not, when the PG's
+ scrub interval exceeds ``osd_scrub_max_interval``.
+ default: 0
+ see_also:
+ - osd_scrub_end_week_day
+ min: 0
+ max: 6
+ with_legacy: true
+- name: osd_scrub_end_week_day
+ type: int
+ level: advanced
+ desc: Restrict scrubbing to days of the week earlier than this
+ long_desc: 0 = Sunday, 1 = Monday, etc. Use osd_scrub_begin_week_day=0 osd_scrub_end_week_day=0
+ for the entire week.
+ fmt_desc: This restricts scrubbing to days of the week earlier than this.
+ 0 = Sunday, 1 = Monday, etc. Use ``osd_scrub_begin_week_day = 0``
+ and ``osd_scrub_end_week_day = 0`` to allow scrubbing for the entire week.
+ Along with ``osd_scrub_begin_week_day``, they define a time
+ window, in which the scrubs can happen. But a scrub will be performed
+ no matter whether the time window allows or not, as long as the placement
+ group's scrub interval exceeds ``osd_scrub_max_interval``.
+ default: 0
+ see_also:
+ - osd_scrub_begin_week_day
+ min: 0
+ max: 6
+ with_legacy: true
+- name: osd_scrub_load_threshold
+ type: float
+ level: advanced
+ desc: Allow scrubbing when system load divided by number of CPUs is below this value
+ fmt_desc: The normalized maximum load. Ceph will not scrub when the system load
+ (as defined by ``getloadavg() / number of online CPUs``) is higher than this number.
+ Default is ``0.5``.
+ default: 0.5
+ with_legacy: true
+# if load is low
+- name: osd_scrub_min_interval
+ type: float
+ level: advanced
+ desc: Scrub each PG no more often than this interval
+ fmt_desc: The minimal interval in seconds for scrubbing the Ceph OSD Daemon
+ when the Ceph Storage Cluster load is low.
+ default: 1_day
+ see_also:
+ - osd_scrub_max_interval
+ with_legacy: true
+# regardless of load
+- name: osd_scrub_max_interval
+ type: float
+ level: advanced
+ desc: Scrub each PG no less often than this interval
+ fmt_desc: The maximum interval in seconds for scrubbing the Ceph OSD Daemon
+ irrespective of cluster load.
+ default: 7_day
+ see_also:
+ - osd_scrub_min_interval
+ with_legacy: true
+# randomize the scheduled scrub in the span of [min,min*(1+randomize_ratio))
+- name: osd_scrub_interval_randomize_ratio
+ type: float
+ level: advanced
+ desc: Ratio of scrub interval to randomly vary
+ long_desc: This prevents a scrub 'stampede' by randomly varying the scrub intervals
+ so that they are soon uniformly distributed over the week
+ fmt_desc: Add a random delay to ``osd_scrub_min_interval`` when scheduling
+ the next scrub job for a PG. The delay is a random
+ value less than ``osd_scrub_min_interval`` \*
+ ``osd_scrub_interval_randomized_ratio``. The default setting
+ spreads scrubs throughout the allowed time
+ window of ``[1, 1.5]`` \* ``osd_scrub_min_interval``.
+ default: 0.5
+ see_also:
+ - osd_scrub_min_interval
+ with_legacy: true
+# the probability to back off the scheduled scrub
+- name: osd_scrub_backoff_ratio
+ type: float
+ level: dev
+ desc: Backoff ratio for scheduling scrubs
+ long_desc: This is the precentage of ticks that do NOT schedule scrubs, 66% means
+ that 1 out of 3 ticks will schedule scrubs
+ default: 0.66
+ with_legacy: true
+- name: osd_scrub_chunk_min
+ type: int
+ level: advanced
+ desc: Minimum number of objects to deep-scrub in a single chunk
+ fmt_desc: The minimal number of object store chunks to scrub during single operation.
+ Ceph blocks writes to single chunk during scrub.
+ default: 5
+ see_also:
+ - osd_scrub_chunk_max
+ with_legacy: true
+- name: osd_scrub_chunk_max
+ type: int
+ level: advanced
+ desc: Maximum number of objects to deep-scrub in a single chunk
+ fmt_desc: The maximum number of object store chunks to scrub during single operation.
+ default: 25
+ see_also:
+ - osd_scrub_chunk_min
+ with_legacy: true
+- name: osd_shallow_scrub_chunk_min
+ type: int
+ level: advanced
+ desc: Minimum number of objects to scrub in a single chunk
+ fmt_desc: The minimum number of object store chunks to scrub during single operation.
+ Not applicable to deep scrubs.
+ Ceph blocks writes to single chunk during scrub.
+ default: 50
+ see_also:
+ - osd_shallow_scrub_chunk_max
+ - osd_scrub_chunk_min
+ with_legacy: true
+- name: osd_shallow_scrub_chunk_max
+ type: int
+ level: advanced
+ desc: Maximum number of objects to scrub in a single chunk
+ fmt_desc: The maximum number of object store chunks to scrub during single operation.
+ Not applicable to deep scrubs.
+ default: 100
+ see_also:
+ - osd_shallow_scrub_chunk_min
+ - osd_scrub_chunk_max
+ with_legacy: true
+# sleep between [deep]scrub ops
+- name: osd_scrub_sleep
+ type: float
+ level: advanced
+ desc: Duration to inject a delay during scrubbing
+ fmt_desc: Time to sleep before scrubbing the next group of chunks. Increasing this value will slow
+ down the overall rate of scrubbing so that client operations will be less impacted.
+ default: 0
+ flags:
+ - runtime
+ with_legacy: true
+# more sleep between [deep]scrub ops
+- name: osd_scrub_extended_sleep
+ type: float
+ level: advanced
+ desc: Duration to inject a delay during scrubbing out of scrubbing hours
+ default: 0
+ see_also:
+ - osd_scrub_begin_hour
+ - osd_scrub_end_hour
+ - osd_scrub_begin_week_day
+ - osd_scrub_end_week_day
+ with_legacy: true
+# whether auto-repair inconsistencies upon deep-scrubbing
+- name: osd_scrub_auto_repair
+ type: bool
+ level: advanced
+ desc: Automatically repair damaged objects detected during scrub
+ fmt_desc: Setting this to ``true`` will enable automatic PG repair when errors
+ are found by scrubs or deep-scrubs. However, if more than
+ ``osd_scrub_auto_repair_num_errors`` errors are found a repair is NOT performed.
+ default: false
+ with_legacy: true
+# only auto-repair when number of errors is below this threshold
+- name: osd_scrub_auto_repair_num_errors
+ type: uint
+ level: advanced
+ desc: Maximum number of detected errors to automatically repair
+ fmt_desc: Auto repair will not occur if more than this many errors are found.
+ default: 5
+ see_also:
+ - osd_scrub_auto_repair
+ with_legacy: true
+- name: osd_scrub_max_preemptions
+ type: uint
+ level: advanced
+ desc: Set the maximum number of times we will preempt a deep scrub due to a client
+ operation before blocking client IO to complete the scrub
+ default: 5
+ min: 0
+ max: 30
+- name: osd_deep_scrub_interval
+ type: float
+ level: advanced
+ desc: Deep scrub each PG (i.e., verify data checksums) at least this often
+ fmt_desc: The interval for "deep" scrubbing (fully reading all data). The
+ ``osd_scrub_load_threshold`` does not affect this setting.
+ default: 7_day
+ with_legacy: true
+- name: osd_deep_scrub_randomize_ratio
+ type: float
+ level: advanced
+ desc: Scrubs will randomly become deep scrubs at this rate (0.15 -> 15% of scrubs
+ are deep)
+ long_desc: This prevents a deep scrub 'stampede' by spreading deep scrubs so they
+ are uniformly distributed over the week
+ default: 0.15
+ with_legacy: true
+- name: osd_deep_scrub_stride
+ type: size
+ level: advanced
+ desc: Number of bytes to read from an object at a time during deep scrub
+ fmt_desc: Read size when doing a deep scrub.
+ default: 512_K
+ with_legacy: true
+- name: osd_deep_scrub_keys
+ type: int
+ level: advanced
+ desc: Number of keys to read from an object at a time during deep scrub
+ default: 1024
+ with_legacy: true
+# objects must be this old (seconds) before we update the whole-object digest on scrub
+- name: osd_deep_scrub_update_digest_min_age
+ type: int
+ level: advanced
+ desc: Update overall object digest only if object was last modified longer ago than
+ this
+ default: 2_hr
+ with_legacy: true
+- name: osd_deep_scrub_large_omap_object_key_threshold
+ type: uint
+ level: advanced
+ desc: Warn when we encounter an object with more omap keys than this
+ default: 200000
+ services:
+ - osd
+ - mds
+ see_also:
+ - osd_deep_scrub_large_omap_object_value_sum_threshold
+ with_legacy: true
+- name: osd_deep_scrub_large_omap_object_value_sum_threshold
+ type: size
+ level: advanced
+ desc: Warn when we encounter an object with more omap key bytes than this
+ default: 1_G
+ services:
+ - osd
+ see_also:
+ - osd_deep_scrub_large_omap_object_key_threshold
+ with_legacy: true
+# when scrubbing blocks on a locked object
+- name: osd_blocked_scrub_grace_period
+ type: int
+ level: advanced
+ desc: Time (seconds) before issuing a cluster-log warning
+ long_desc: Waiting too long for an object in the scrubbed chunk to be unlocked.
+ default: 120
+ with_legacy: true
+# timely updates to the 'pg dump' output, esp. re scrub scheduling
+- name: osd_stats_update_period_scrubbing
+ type: int
+ level: advanced
+ desc: Stats update period (seconds) when scrubbing
+ long_desc: A PG actively scrubbing (or blocked while scrubbing) publishes its
+ stats (inc. scrub/block duration) every this many seconds.
+ default: 15
+ with_legacy: false
+- name: osd_stats_update_period_not_scrubbing
+ type: int
+ level: advanced
+ desc: Stats update period (seconds) when not scrubbing
+ long_desc: A PG we are a primary of, publishes its
+ stats (inc. scrub/block duration) every this many seconds.
+ default: 120
+ with_legacy: false
+# when replicas are slow to respond to scrub resource reservations
+# Note: disable by using a very large value
+- name: osd_scrub_slow_reservation_response
+ type: millisecs
+ level: advanced
+ desc: Duration before issuing a cluster-log warning
+ long_desc: Waiting too long for a replica to respond (after at least half of the
+ replicas have responded).
+ default: 2200
+ min: 500
+ see_also:
+ - osd_scrub_reservation_timeout
+ with_legacy: false
+# when a replica does not respond to scrub resource request
+# Note: disable by using a very large value
+- name: osd_scrub_reservation_timeout
+ type: millisecs
+ level: advanced
+ desc: Duration before aborting the scrub session
+ long_desc: Waiting too long for some replicas to respond to
+ scrub reservation requests.
+ default: 5000
+ min: 2000
+ see_also:
+ - osd_scrub_slow_reservation_response
+ with_legacy: false
+# where rados plugins are stored
+- name: osd_class_dir
+ type: str
+ level: advanced
+ default: @CMAKE_INSTALL_LIBDIR@/rados-classes
+ fmt_desc: The class path for RADOS class plug-ins.
+ with_legacy: true
+- name: osd_open_classes_on_start
+ type: bool
+ level: advanced
+ default: true
+ with_legacy: true
+# list of object classes allowed to be loaded (allow all: *)
+- name: osd_class_load_list
+ type: str
+ level: advanced
+ default: cephfs hello journal lock log numops otp rbd refcount rgw rgw_gc timeindex
+ user version cas cmpomap queue 2pc_queue fifo
+ with_legacy: true
+# list of object classes with default execute perm (allow all: *)
+- name: osd_class_default_list
+ type: str
+ level: advanced
+ default: cephfs hello journal lock log numops otp rbd refcount rgw rgw_gc timeindex
+ user version cas cmpomap queue 2pc_queue fifo
+ with_legacy: true
+- name: osd_agent_max_ops
+ type: int
+ level: advanced
+ desc: maximum concurrent tiering operations for tiering agent
+ fmt_desc: The maximum number of simultaneous flushing ops per tiering agent
+ in the high speed mode.
+ default: 4
+ with_legacy: true
+- name: osd_agent_max_low_ops
+ type: int
+ level: advanced
+ desc: maximum concurrent low-priority tiering operations for tiering agent
+ fmt_desc: The maximum number of simultaneous flushing ops per tiering agent
+ in the low speed mode.
+ default: 2
+ with_legacy: true
+- name: osd_agent_min_evict_effort
+ type: float
+ level: advanced
+ desc: minimum effort to expend evicting clean objects
+ default: 0.1
+ min: 0
+ max: 0.99
+ with_legacy: true
+- name: osd_agent_quantize_effort
+ type: float
+ level: advanced
+ desc: size of quantize unit for eviction effort
+ default: 0.1
+ with_legacy: true
+- name: osd_agent_delay_time
+ type: float
+ level: advanced
+ desc: how long agent should sleep if it has no work to do
+ default: 5
+ with_legacy: true
+# decay atime and hist histograms after how many objects go by
+- name: osd_agent_hist_halflife
+ type: int
+ level: advanced
+ desc: halflife of agent atime and temp histograms
+ default: 1000
+ with_legacy: true
+# decay atime and hist histograms after how many objects go by
+- name: osd_agent_slop
+ type: float
+ level: advanced
+ desc: slop factor to avoid switching tiering flush and eviction mode
+ default: 0.02
+ with_legacy: true
+- name: osd_find_best_info_ignore_history_les
+ type: bool
+ level: dev
+ desc: ignore last_epoch_started value when peering AND PROBABLY LOSE DATA
+ long_desc: THIS IS AN EXTREMELY DANGEROUS OPTION THAT SHOULD ONLY BE USED AT THE
+ DIRECTION OF A DEVELOPER. It makes peering ignore the last_epoch_started value
+ when peering, which can allow the OSD to believe an OSD has an authoritative view
+ of a PG's contents even when it is in fact old and stale, typically leading to
+ data loss (by believing a stale PG is up to date).
+ default: false
+ with_legacy: true
+- name: osd_uuid
+ type: uuid
+ level: advanced
+ desc: uuid label for a new OSD
+ fmt_desc: The universally unique identifier (UUID) for the Ceph OSD Daemon.
+ note: The ``osd_uuid`` applies to a single Ceph OSD Daemon. The ``fsid``
+ applies to the entire cluster.
+ flags:
+ - create
+ with_legacy: true
+- name: osd_data
+ type: str
+ level: advanced
+ desc: path to OSD data
+ fmt_desc: The path to the OSDs data. You must create the directory when
+ deploying Ceph. You should mount a drive for OSD data at this
+ mount point. We do not recommend changing the default.
+ default: /var/lib/ceph/osd/$cluster-$id
+ flags:
+ - no_mon_update
+ with_legacy: true
+- name: osd_journal
+ type: str
+ level: advanced
+ desc: path to OSD journal (when FileStore backend is in use)
+ fmt_desc: The path to the OSD's journal. This may be a path to a file or a
+ block device (such as a partition of an SSD). If it is a file,
+ you must create the directory to contain it. We recommend using a
+ separate fast device when the ``osd_data`` drive is an HDD.
+ default: /var/lib/ceph/osd/$cluster-$id/journal
+ flags:
+ - no_mon_update
+ with_legacy: true
+- name: osd_journal_size
+ type: size
+ level: advanced
+ desc: size of FileStore journal (in MiB)
+ fmt_desc: The size of the journal in megabytes.
+ default: 5_K
+ flags:
+ - create
+ with_legacy: true
+- name: osd_journal_flush_on_shutdown
+ type: bool
+ level: advanced
+ desc: flush FileStore journal contents during clean OSD shutdown
+ default: true
+ with_legacy: true
+- name: osd_compact_on_start
+ type: bool
+ level: advanced
+ desc: compact OSD's object store's OMAP on start
+ default: false
+# flags for specific control purpose during osd mount() process.
+# e.g., can be 1 to skip over replaying journal
+# or 2 to skip over mounting omap or 3 to skip over both.
+# This might be helpful in case the journal is totally corrupted
+# and we still want to bring the osd daemon back normally, etc.
+- name: osd_os_flags
+ type: uint
+ level: dev
+ desc: flags to skip filestore omap or journal initialization
+ default: 0
+- name: osd_max_write_size
+ type: size
+ level: advanced
+ desc: Maximum size of a RADOS write operation in megabytes
+ long_desc: This setting prevents clients from doing very large writes to RADOS. If
+ you set this to a value below what clients expect, they will receive an error
+ when attempting to write to the cluster.
+ fmt_desc: The maximum size of a write in megabytes.
+ default: 90
+ min: 4
+ with_legacy: true
+- name: osd_max_pgls
+ type: uint
+ level: advanced
+ desc: maximum number of results when listing objects in a pool
+ fmt_desc: The maximum number of placement groups to list. A client
+ requesting a large number can tie up the Ceph OSD Daemon.
+ default: 1_K
+ with_legacy: true
+- name: osd_client_message_size_cap
+ type: size
+ level: advanced
+ desc: maximum memory to devote to in-flight client requests
+ long_desc: If this value is exceeded, the OSD will not read any new client data
+ off of the network until memory is freed.
+ fmt_desc: The largest client data message allowed in memory.
+ default: 500_M
+ with_legacy: true
+- name: osd_client_message_cap
+ type: uint
+ level: advanced
+ desc: maximum number of in-flight client requests
+ default: 256
+ with_legacy: true
+- name: osd_crush_update_on_start
+ type: bool
+ level: advanced
+ desc: update OSD CRUSH location on startup
+ default: true
+ with_legacy: true
+- name: osd_class_update_on_start
+ type: bool
+ level: advanced
+ desc: set OSD device class on startup
+ default: true
+ with_legacy: true
+- name: osd_crush_initial_weight
+ type: float
+ level: advanced
+ desc: if >= 0, initial CRUSH weight for newly created OSDs
+ long_desc: If this value is negative, the size of the OSD in TiB is used.
+ fmt_desc: The initial CRUSH weight for newly added OSDs. The default
+ value of this option is ``the size of a newly added OSD in TB``. By default,
+ the initial CRUSH weight for a newly added OSD is set to its device size in
+ TB. See `Weighting Bucket Items`_ for details.
+ default: -1
+ with_legacy: true
+# Allows the "peered" state for recovery and backfill below min_size
+- name: osd_allow_recovery_below_min_size
+ type: bool
+ level: dev
+ desc: allow replicated pools to recover with < min_size active members
+ default: true
+ services:
+ - osd
+ with_legacy: true
+# cap on # of inc maps we send to peers, clients
+- name: osd_map_share_max_epochs
+ type: int
+ level: advanced
+ default: 40
+ with_legacy: true
+- name: osd_map_cache_size
+ type: int
+ level: advanced
+ default: 50
+ fmt_desc: The number of OSD maps to keep cached.
+ with_legacy: true
+- name: osd_pg_epoch_max_lag_factor
+ type: float
+ level: advanced
+ desc: Max multiple of the map cache that PGs can lag before we throttle map injest
+ default: 2
+ see_also:
+ - osd_map_cache_size
+- name: osd_inject_bad_map_crc_probability
+ type: float
+ level: dev
+ default: 0
+ with_legacy: true
+- name: osd_inject_failure_on_pg_removal
+ type: bool
+ level: dev
+ default: false
+ with_legacy: true
+# shutdown the OSD if stuatus flipping more than max_markdown_count times in recent max_markdown_period seconds
+- name: osd_max_markdown_period
+ type: int
+ level: advanced
+ default: 10_min
+ with_legacy: true
+- name: osd_max_markdown_count
+ type: int
+ level: advanced
+ default: 5
+ with_legacy: true
+- name: osd_op_thread_timeout
+ type: int
+ level: advanced
+ default: 15
+ fmt_desc: The Ceph OSD Daemon operation thread timeout in seconds.
+ with_legacy: true
+- name: osd_op_thread_suicide_timeout
+ type: int
+ level: advanced
+ default: 150
+ with_legacy: true
+- name: osd_op_pq_max_tokens_per_priority
+ type: uint
+ level: advanced
+ default: 4_M
+ with_legacy: true
+- name: osd_op_pq_min_cost
+ type: size
+ level: advanced
+ default: 64_K
+ with_legacy: true
+# preserve clone_overlap during recovery/migration
+- name: osd_recover_clone_overlap
+ type: bool
+ level: advanced
+ default: true
+ fmt_desc: Preserves clone overlap during recovery. Should always be set
+ to ``true``.
+ with_legacy: true
+- name: osd_num_cache_shards
+ type: size
+ level: advanced
+ desc: The number of cache shards to use in the object store.
+ default: 32
+ flags:
+ - startup
+- name: osd_aggregated_slow_ops_logging
+ type: bool
+ level: advanced
+ desc: Allow OSD daemon to send an aggregated slow ops to the cluster log
+ fmt_desc: If set to ``true``, the OSD daemon will send slow ops information in
+ an aggregated format to the cluster log else sends every slow op to the
+ cluster log.
+ default: true
+ with_legacy: true
+- name: osd_op_num_threads_per_shard
+ type: int
+ level: advanced
+ default: 0
+ flags:
+ - startup
+ with_legacy: true
+- name: osd_op_num_threads_per_shard_hdd
+ type: int
+ level: advanced
+ default: 1
+ see_also:
+ - osd_op_num_threads_per_shard
+ flags:
+ - startup
+ with_legacy: true
+- name: osd_op_num_threads_per_shard_ssd
+ type: int
+ level: advanced
+ default: 2
+ see_also:
+ - osd_op_num_threads_per_shard
+ flags:
+ - startup
+ with_legacy: true
+- name: osd_op_num_shards
+ type: int
+ level: advanced
+ fmt_desc: The number of shards allocated for a given OSD. Each shard has its own processing queue.
+ PGs on the OSD are distributed evenly in the shard. This setting overrides _ssd and _hdd if
+ non-zero.
+ default: 0
+ flags:
+ - startup
+ with_legacy: true
+- name: osd_op_num_shards_hdd
+ type: int
+ level: advanced
+ fmt_desc: the number of shards allocated for a given OSD (for rotational media).
+ default: 5
+ see_also:
+ - osd_op_num_shards
+ flags:
+ - startup
+ with_legacy: true
+- name: osd_op_num_shards_ssd
+ type: int
+ level: advanced
+ fmt_desc: the number of shards allocated for a given OSD (for solid state media).
+ default: 8
+ see_also:
+ - osd_op_num_shards
+ flags:
+ - startup
+ with_legacy: true
+- name: osd_skip_data_digest
+ type: bool
+ level: dev
+ desc: Do not store full-object checksums if the backend (bluestore) does its own
+ checksums. Only usable with all BlueStore OSDs.
+ default: false
+# PrioritzedQueue (prio), Weighted Priority Queue (wpq ; default),
+# mclock_opclass, mclock_client, or debug_random. "mclock_opclass"
+# and "mclock_client" are based on the mClock/dmClock algorithm
+# (Gulati, et al. 2010). "mclock_opclass" prioritizes based on the
+# class the operation belongs to. "mclock_client" does the same but
+# also works to ienforce fairness between clients. "debug_random"
+# chooses among all four with equal probability.
+- name: osd_op_queue
+ type: str
+ level: advanced
+ desc: which operation priority queue algorithm to use
+ long_desc: which operation priority queue algorithm to use
+ fmt_desc: This sets the type of queue to be used for prioritizing ops
+ within each OSD. Both queues feature a strict sub-queue which is
+ dequeued before the normal queue. The normal queue is different
+ between implementations. The WeightedPriorityQueue (``wpq``)
+ dequeues operations in relation to their priorities to prevent
+ starvation of any queue. WPQ should help in cases where a few OSDs
+ are more overloaded than others. The mClockQueue
+ (``mclock_scheduler``) prioritizes operations based on which class
+ they belong to (recovery, scrub, snaptrim, client op, osd subop).
+ See `QoS Based on mClock`_. Requires a restart.
+ default: mclock_scheduler
+ see_also:
+ - osd_op_queue_cut_off
+ enum_values:
+ - wpq
+ - mclock_scheduler
+ - debug_random
+ with_legacy: true
+# Min priority to go to strict queue. (low, high)
+- name: osd_op_queue_cut_off
+ type: str
+ level: advanced
+ desc: the threshold between high priority ops and low priority ops
+ long_desc: the threshold between high priority ops that use strict priority ordering
+ and low priority ops that use a fairness algorithm that may or may not incorporate
+ priority
+ fmt_desc: This selects which priority ops will be sent to the strict
+ queue verses the normal queue. The ``low`` setting sends all
+ replication ops and higher to the strict queue, while the ``high``
+ option sends only replication acknowledgment ops and higher to
+ the strict queue. Setting this to ``high`` should help when a few
+ OSDs in the cluster are very busy especially when combined with
+ ``wpq`` in the ``osd_op_queue`` setting. OSDs that are very busy
+ handling replication traffic could starve primary client traffic
+ on these OSDs without these settings. Requires a restart.
+ default: high
+ see_also:
+ - osd_op_queue
+ enum_values:
+ - low
+ - high
+ - debug_random
+ with_legacy: true
+- name: osd_mclock_scheduler_client_res
+ type: float
+ level: advanced
+ desc: IO proportion reserved for each client (default). The default value
+ of 0 specifies the lowest possible reservation. Any value greater than
+ 0 and up to 1.0 specifies the minimum IO proportion to reserve for each
+ client in terms of a fraction of the OSD's maximum IOPS capacity.
+ long_desc: Only considered for osd_op_queue = mclock_scheduler
+ fmt_desc: IO proportion reserved for each client (default).
+ default: 0
+ min: 0
+ max: 1.0
+ see_also:
+ - osd_op_queue
+- name: osd_mclock_scheduler_client_wgt
+ type: uint
+ level: advanced
+ desc: IO share for each client (default) over reservation
+ long_desc: Only considered for osd_op_queue = mclock_scheduler
+ fmt_desc: IO share for each client (default) over reservation.
+ default: 1
+ see_also:
+ - osd_op_queue
+- name: osd_mclock_scheduler_client_lim
+ type: float
+ level: advanced
+ desc: IO limit for each client (default) over reservation. The default
+ value of 0 specifies no limit enforcement, which means each client can
+ use the maximum possible IOPS capacity of the OSD. Any value greater
+ than 0 and up to 1.0 specifies the upper IO limit over reservation
+ that each client receives in terms of a fraction of the OSD's
+ maximum IOPS capacity.
+ long_desc: Only considered for osd_op_queue = mclock_scheduler
+ fmt_desc: IO limit for each client (default) over reservation.
+ default: 0
+ min: 0
+ max: 1.0
+ see_also:
+ - osd_op_queue
+- name: osd_mclock_scheduler_background_recovery_res
+ type: float
+ level: advanced
+ desc: IO proportion reserved for background recovery (default). The
+ default value of 0 specifies the lowest possible reservation. Any value
+ greater than 0 and up to 1.0 specifies the minimum IO proportion to
+ reserve for background recovery operations in terms of a fraction of
+ the OSD's maximum IOPS capacity.
+ long_desc: Only considered for osd_op_queue = mclock_scheduler
+ fmt_desc: IO proportion reserved for background recovery (default).
+ default: 0
+ min: 0
+ max: 1.0
+ see_also:
+ - osd_op_queue
+- name: osd_mclock_scheduler_background_recovery_wgt
+ type: uint
+ level: advanced
+ desc: IO share for each background recovery over reservation
+ long_desc: Only considered for osd_op_queue = mclock_scheduler
+ fmt_desc: IO share for each background recovery over reservation.
+ default: 1
+ see_also:
+ - osd_op_queue
+- name: osd_mclock_scheduler_background_recovery_lim
+ type: float
+ level: advanced
+ desc: IO limit for background recovery over reservation. The default
+ value of 0 specifies no limit enforcement, which means background
+ recovery operation can use the maximum possible IOPS capacity of the
+ OSD. Any value greater than 0 and up to 1.0 specifies the upper IO
+ limit over reservation that background recovery operation receives in
+ terms of a fraction of the OSD's maximum IOPS capacity.
+ long_desc: Only considered for osd_op_queue = mclock_scheduler
+ fmt_desc: IO limit for background recovery over reservation.
+ default: 0
+ min: 0
+ max: 1.0
+ see_also:
+ - osd_op_queue
+- name: osd_mclock_scheduler_background_best_effort_res
+ type: float
+ level: advanced
+ desc: IO proportion reserved for background best_effort (default). The
+ default value of 0 specifies the lowest possible reservation. Any value
+ greater than 0 and up to 1.0 specifies the minimum IO proportion to
+ reserve for background best_effort operations in terms of a fraction
+ of the OSD's maximum IOPS capacity.
+ long_desc: Only considered for osd_op_queue = mclock_scheduler
+ fmt_desc: IO proportion reserved for background best_effort (default).
+ default: 0
+ min: 0
+ max: 1.0
+ see_also:
+ - osd_op_queue
+- name: osd_mclock_scheduler_background_best_effort_wgt
+ type: uint
+ level: advanced
+ desc: IO share for each background best_effort over reservation
+ long_desc: Only considered for osd_op_queue = mclock_scheduler
+ fmt_desc: IO share for each background best_effort over reservation.
+ default: 1
+ see_also:
+ - osd_op_queue
+- name: osd_mclock_scheduler_background_best_effort_lim
+ type: float
+ level: advanced
+ desc: IO limit for background best_effort over reservation. The default
+ value of 0 specifies no limit enforcement, which means background
+ best_effort operation can use the maximum possible IOPS capacity of the
+ OSD. Any value greater than 0 and up to 1.0 specifies the upper IO
+ limit over reservation that background best_effort operation receives
+ in terms of a fraction of the OSD's maximum IOPS capacity.
+ long_desc: Only considered for osd_op_queue = mclock_scheduler
+ fmt_desc: IO limit for background best_effort over reservation.
+ default: 0
+ min: 0
+ max: 1.0
+ see_also:
+ - osd_op_queue
+- name: osd_mclock_scheduler_anticipation_timeout
+ type: float
+ level: advanced
+ desc: mclock anticipation timeout in seconds
+ long_desc: the amount of time that mclock waits until the unused resource is forfeited
+ default: 0
+- name: osd_mclock_max_sequential_bandwidth_hdd
+ type: size
+ level: basic
+ desc: The maximum sequential bandwidth in bytes/second of the OSD (for
+ rotational media)
+ long_desc: This option specifies the maximum sequential bandwidth to consider
+ for an OSD whose underlying device type is rotational media. This is
+ considered by the mclock scheduler to derive the cost factor to be used in
+ QoS calculations. Only considered for osd_op_queue = mclock_scheduler
+ fmt_desc: The maximum sequential bandwidth in bytes/second to consider for the
+ OSD (for rotational media)
+ default: 150_M
+ flags:
+ - runtime
+- name: osd_mclock_max_sequential_bandwidth_ssd
+ type: size
+ level: basic
+ desc: The maximum sequential bandwidth in bytes/second of the OSD (for
+ solid state media)
+ long_desc: This option specifies the maximum sequential bandwidth to consider
+ for an OSD whose underlying device type is solid state media. This is
+ considered by the mclock scheduler to derive the cost factor to be used in
+ QoS calculations. Only considered for osd_op_queue = mclock_scheduler
+ fmt_desc: The maximum sequential bandwidth in bytes/second to consider for the
+ OSD (for solid state media)
+ default: 1200_M
+ flags:
+ - runtime
+- name: osd_mclock_max_capacity_iops_hdd
+ type: float
+ level: basic
+ desc: Max random write IOPS capacity (at 4KiB block size) to consider per OSD
+ (for rotational media)
+ long_desc: This option specifies the max OSD random write IOPS capacity per
+ OSD. Contributes in QoS calculations when enabling a dmclock profile. Only
+ considered for osd_op_queue = mclock_scheduler
+ fmt_desc: Max random write IOPS capacity (at 4 KiB block size) to consider per
+ OSD (for rotational media)
+ default: 315
+ flags:
+ - runtime
+- name: osd_mclock_max_capacity_iops_ssd
+ type: float
+ level: basic
+ desc: Max random write IOPS capacity (at 4 KiB block size) to consider per OSD
+ (for solid state media)
+ long_desc: This option specifies the max OSD random write IOPS capacity per
+ OSD. Contributes in QoS calculations when enabling a dmclock profile. Only
+ considered for osd_op_queue = mclock_scheduler
+ fmt_desc: Max random write IOPS capacity (at 4 KiB block size) to consider per
+ OSD (for solid state media)
+ default: 21500
+ flags:
+ - runtime
+- name: osd_mclock_force_run_benchmark_on_init
+ type: bool
+ level: advanced
+ desc: Force run the OSD benchmark on OSD initialization/boot-up
+ long_desc: This option specifies whether the OSD benchmark must be run during
+ the OSD boot-up sequence even if historical data about the OSD iops capacity
+ is available in the MON config store. Enable this to refresh the OSD iops
+ capacity if the underlying device's performance characteristics have changed
+ significantly. Only considered for osd_op_queue = mclock_scheduler.
+ fmt_desc: Force run the OSD benchmark on OSD initialization/boot-up
+ default: false
+ see_also:
+ - osd_mclock_max_capacity_iops_hdd
+ - osd_mclock_max_capacity_iops_ssd
+ flags:
+ - startup
+- name: osd_mclock_skip_benchmark
+ type: bool
+ level: dev
+ desc: Skip the OSD benchmark on OSD initialization/boot-up
+ long_desc: This option specifies whether the OSD benchmark must be skipped during
+ the OSD boot-up sequence. Only considered for osd_op_queue = mclock_scheduler.
+ fmt_desc: Skip the OSD benchmark on OSD initialization/boot-up
+ default: false
+ see_also:
+ - osd_mclock_max_capacity_iops_hdd
+ - osd_mclock_max_capacity_iops_ssd
+ flags:
+ - runtime
+- name: osd_mclock_profile
+ type: str
+ level: advanced
+ desc: Which mclock profile to use
+ long_desc: This option specifies the mclock profile to enable - one among the set
+ of built-in profiles or a custom profile. Only considered for osd_op_queue = mclock_scheduler
+ fmt_desc: |
+ This sets the type of mclock profile to use for providing QoS
+ based on operations belonging to different classes (background
+ recovery, scrub, snaptrim, client op, osd subop). Once a built-in
+ profile is enabled, the lower level mclock resource control
+ parameters [*reservation, weight, limit*] and some Ceph
+ configuration parameters are set transparently. Note that the
+ above does not apply for the *custom* profile.
+ default: balanced
+ see_also:
+ - osd_op_queue
+ enum_values:
+ - balanced
+ - high_recovery_ops
+ - high_client_ops
+ - custom
+ flags:
+ - runtime
+- name: osd_mclock_override_recovery_settings
+ type: bool
+ level: advanced
+ desc: Setting this option enables the override of recovery/backfill limits
+ for the mClock scheduler.
+ long_desc: This option when set enables the override of the max recovery
+ active and the max backfills limits with mClock scheduler active. These
+ options are not modifiable when mClock scheduler is active. Any attempt
+ to modify these values without setting this option will reset the
+ recovery or backfill option back to its default value.
+ fmt_desc: Setting this option will enable the override of the
+ recovery/backfill limits for the mClock scheduler as defined by the
+ ``osd_recovery_max_active_hdd``, ``osd_recovery_max_active_ssd`` and
+ ``osd_max_backfills`` options.
+ default: false
+ see_also:
+ - osd_recovery_max_active_hdd
+ - osd_recovery_max_active_ssd
+ - osd_max_backfills
+ flags:
+ - runtime
+- name: osd_mclock_iops_capacity_threshold_hdd
+ type: float
+ level: basic
+ desc: The threshold IOPs capacity (at 4KiB block size) beyond which to ignore
+ the OSD bench results for an OSD (for rotational media)
+ long_desc: This option specifies the threshold IOPS capacity for an OSD under
+ which the OSD bench results can be considered for QoS calculations. Only
+ considered for osd_op_queue = mclock_scheduler
+ fmt_desc: The threshold IOPS capacity (at 4KiB block size) beyond which to
+ ignore OSD bench results for an OSD (for rotational media)
+ default: 500
+ flags:
+ - runtime
+- name: osd_mclock_iops_capacity_threshold_ssd
+ type: float
+ level: basic
+ desc: The threshold IOPs capacity (at 4KiB block size) beyond which to ignore
+ the OSD bench results for an OSD (for solid state media)
+ long_desc: This option specifies the threshold IOPS capacity for an OSD under
+ which the OSD bench results can be considered for QoS calculations. Only
+ considered for osd_op_queue = mclock_scheduler
+ fmt_desc: The threshold IOPS capacity (at 4KiB block size) beyond which to
+ ignore OSD bench results for an OSD (for solid state media)
+ default: 80000
+ flags:
+ - runtime
+# Set to true for testing. Users should NOT set this.
+# If set to true even after reading enough shards to
+# decode the object, any error will be reported.
+- name: osd_read_ec_check_for_errors
+ type: bool
+ level: advanced
+ default: false
+ with_legacy: true
+- name: osd_recovery_delay_start
+ type: float
+ level: advanced
+ default: 0
+ fmt_desc: After peering completes, Ceph will delay for the specified number
+ of seconds before starting to recover RADOS objects.
+ with_legacy: true
+- name: osd_recovery_max_active
+ type: uint
+ level: advanced
+ desc: Number of simultaneous active recovery operations per OSD (overrides _ssd
+ and _hdd if non-zero)
+ fmt_desc: The number of active recovery requests per OSD at one time. More
+ requests will accelerate recovery, but the requests places an
+ increased load on the cluster.
+ note: This value is only used if it is non-zero. Normally it
+ is ``0``, which means that the ``hdd`` or ``ssd`` values
+ (below) are used, depending on the type of the primary
+ device backing the OSD.
+ default: 0
+ see_also:
+ - osd_recovery_max_active_hdd
+ - osd_recovery_max_active_ssd
+ flags:
+ - runtime
+ with_legacy: true
+- name: osd_recovery_max_active_hdd
+ type: uint
+ level: advanced
+ desc: Number of simultaneous active recovery operations per OSD (for rotational
+ devices)
+ fmt_desc: The number of active recovery requests per OSD at one time, if the
+ primary device is rotational.
+ default: 3
+ see_also:
+ - osd_recovery_max_active
+ - osd_recovery_max_active_ssd
+ flags:
+ - runtime
+ with_legacy: true
+- name: osd_recovery_max_active_ssd
+ type: uint
+ level: advanced
+ desc: Number of simultaneous active recovery operations per OSD (for non-rotational
+ solid state devices)
+ fmt_desc: The number of active recovery requests per OSD at one time, if the
+ primary device is non-rotational (i.e., an SSD).
+ default: 10
+ see_also:
+ - osd_recovery_max_active
+ - osd_recovery_max_active_hdd
+ flags:
+ - runtime
+ with_legacy: true
+- name: osd_recovery_max_single_start
+ type: uint
+ level: advanced
+ default: 1
+ fmt_desc: The maximum number of recovery operations per OSD that will be
+ newly started when an OSD is recovering.
+ with_legacy: true
+# max size of push chunk
+- name: osd_recovery_max_chunk
+ type: size
+ level: advanced
+ default: 8_M
+ fmt_desc: the maximum total size of data chunks a recovery op can carry.
+ with_legacy: true
+# max number of omap entries per chunk; 0 to disable limit
+- name: osd_recovery_max_omap_entries_per_chunk
+ type: uint
+ level: advanced
+ default: 8096
+ with_legacy: true
+# max size of a COPYFROM chunk
+- name: osd_copyfrom_max_chunk
+ type: size
+ level: advanced
+ default: 8_M
+ with_legacy: true
+# push cost per object
+- name: osd_push_per_object_cost
+ type: size
+ level: advanced
+ default: 1000
+ fmt_desc: the overhead for serving a push op
+ with_legacy: true
+# max size of push message
+- name: osd_max_push_cost
+ type: size
+ level: advanced
+ default: 8_M
+ with_legacy: true
+# max objects in single push op
+- name: osd_max_push_objects
+ type: uint
+ level: advanced
+ default: 10
+ with_legacy: true
+# Only use clone_overlap for recovery if there are fewer than
+# osd_recover_clone_overlap_limit entries in the overlap set
+- name: osd_recover_clone_overlap_limit
+ type: uint
+ level: advanced
+ default: 10
+ flags:
+ - runtime
+- name: osd_debug_feed_pullee
+ type: int
+ level: dev
+ desc: Feed a pullee, and force primary to pull a currently missing object from it
+ default: -1
+ with_legacy: true
+- name: osd_backfill_scan_min
+ type: int
+ level: advanced
+ default: 64
+ fmt_desc: The minimum number of objects per backfill scan.
+ with_legacy: true
+- name: osd_backfill_scan_max
+ type: int
+ level: advanced
+ default: 512
+ fmt_desc: The maximum number of objects per backfill scan.p
+ with_legacy: true
+- name: osd_extblkdev_plugins
+ type: str
+ level: advanced
+ desc: extended block device plugins to load, provide compression feedback at runtime
+ default: vdo
+ flags:
+ - startup
+# minimum number of peers
+- name: osd_heartbeat_min_peers
+ type: int
+ level: advanced
+ default: 10
+ with_legacy: true
+- name: osd_delete_sleep
+ type: float
+ level: advanced
+ desc: Time in seconds to sleep before next removal transaction. This setting
+ overrides _ssd, _hdd, and _hybrid if non-zero.
+ fmt_desc: Time in seconds to sleep before the next removal transaction. This
+ throttles the PG deletion process.
+ default: 0
+ flags:
+ - runtime
+- name: osd_delete_sleep_hdd
+ type: float
+ level: advanced
+ desc: Time in seconds to sleep before next removal transaction for HDDs
+ default: 5
+ flags:
+ - runtime
+- name: osd_delete_sleep_ssd
+ type: float
+ level: advanced
+ desc: Time in seconds to sleep before next removal transaction for SSDs
+ default: 1
+ flags:
+ - runtime
+- name: osd_delete_sleep_hybrid
+ type: float
+ level: advanced
+ desc: Time in seconds to sleep before next removal transaction when OSD data is on HDD
+ and OSD journal or WAL+DB is on SSD
+ default: 1
+ flags:
+ - runtime
+- name: osd_rocksdb_iterator_bounds_enabled
+ desc: Whether omap iterator bounds are applied to rocksdb iterator ReadOptions
+ type: bool
+ level: dev
+ default: true
+ with_legacy: true
diff --git a/src/common/options/rbd-mirror.yaml.in b/src/common/options/rbd-mirror.yaml.in
new file mode 100644
index 000000000..93c059ff2
--- /dev/null
+++ b/src/common/options/rbd-mirror.yaml.in
@@ -0,0 +1,210 @@
+# -*- mode: YAML -*-
+---
+
+options:
+- name: rbd_mirror_journal_commit_age
+ type: float
+ level: advanced
+ desc: commit time interval, seconds
+ default: 5
+ services:
+ - rbd-mirror
+- name: rbd_mirror_journal_poll_age
+ type: float
+ level: advanced
+ desc: maximum age (in seconds) between successive journal polls
+ default: 5
+ services:
+ - rbd-mirror
+- name: rbd_mirror_sync_point_update_age
+ type: float
+ level: advanced
+ desc: number of seconds between each update of the image sync point object number
+ default: 30
+ services:
+ - rbd-mirror
+- name: rbd_mirror_concurrent_image_syncs
+ type: uint
+ level: advanced
+ desc: maximum number of image syncs in parallel
+ default: 5
+ services:
+ - rbd-mirror
+- name: rbd_mirror_pool_replayers_refresh_interval
+ type: uint
+ level: advanced
+ desc: interval to refresh peers in rbd-mirror daemon
+ default: 30
+ services:
+ - rbd-mirror
+- name: rbd_mirror_concurrent_image_deletions
+ type: uint
+ level: advanced
+ desc: maximum number of image deletions in parallel
+ default: 1
+ services:
+ - rbd-mirror
+ min: 1
+- name: rbd_mirror_delete_retry_interval
+ type: float
+ level: advanced
+ desc: interval to check and retry the failed deletion requests
+ default: 30
+ services:
+ - rbd-mirror
+- name: rbd_mirror_image_state_check_interval
+ type: uint
+ level: advanced
+ desc: interval to get images from pool watcher and set sources in replayer
+ default: 30
+ services:
+ - rbd-mirror
+ min: 1
+- name: rbd_mirror_leader_heartbeat_interval
+ type: uint
+ level: advanced
+ desc: interval (in seconds) between mirror leader heartbeats
+ default: 5
+ services:
+ - rbd-mirror
+ min: 1
+- name: rbd_mirror_leader_max_missed_heartbeats
+ type: uint
+ level: advanced
+ desc: number of missed heartbeats for non-lock owner to attempt to acquire lock
+ default: 2
+ services:
+ - rbd-mirror
+- name: rbd_mirror_leader_max_acquire_attempts_before_break
+ type: uint
+ level: advanced
+ desc: number of failed attempts to acquire lock after missing heartbeats before
+ breaking lock
+ default: 3
+ services:
+ - rbd-mirror
+- name: rbd_mirror_image_policy_type
+ type: str
+ level: advanced
+ desc: active/active policy type for mapping images to instances
+ default: simple
+ services:
+ - rbd-mirror
+ enum_values:
+ - none
+ - simple
+- name: rbd_mirror_image_policy_migration_throttle
+ type: uint
+ level: advanced
+ desc: number of seconds after which an image can be reshuffled (migrated) again
+ default: 300
+ services:
+ - rbd-mirror
+- name: rbd_mirror_image_policy_update_throttle_interval
+ type: float
+ level: advanced
+ desc: interval (in seconds) to throttle images for mirror daemon peer updates
+ default: 1
+ services:
+ - rbd-mirror
+ min: 1
+- name: rbd_mirror_image_policy_rebalance_timeout
+ type: float
+ level: advanced
+ desc: number of seconds policy should be idle before trigerring reshuffle (rebalance)
+ of images
+ default: 0
+ services:
+ - rbd-mirror
+- name: rbd_mirror_perf_stats_prio
+ type: int
+ level: advanced
+ desc: Priority level for mirror daemon replication perf counters
+ long_desc: The daemon will send perf counter data to the manager daemon if the priority
+ is not lower than mgr_stats_threshold.
+ default: 5
+ services:
+ - rbd-mirror
+ min: 0
+ max: 11
+- name: rbd_mirror_image_perf_stats_prio
+ type: int
+ level: advanced
+ desc: Priority level for mirror daemon per-image replication perf counters
+ long_desc: The daemon will send per-image perf counter data to the manager daemon
+ if the priority is not lower than mgr_stats_threshold.
+ default: 5
+ services:
+ - rbd-mirror
+ min: 0
+ max: 11
+- name: rbd_mirror_memory_autotune
+ type: bool
+ level: dev
+ desc: Automatically tune the ratio of caches while respecting min values.
+ default: true
+ services:
+ - rbd-mirror
+ see_also:
+ - rbd_mirror_memory_target
+- name: rbd_mirror_memory_target
+ type: size
+ level: basic
+ desc: When tcmalloc and cache autotuning is enabled, try to keep this many bytes
+ mapped in memory.
+ default: 4_G
+ services:
+ - rbd-mirror
+ see_also:
+ - rbd_mirror_memory_autotune
+- name: rbd_mirror_memory_base
+ type: size
+ level: dev
+ desc: When tcmalloc and cache autotuning is enabled, estimate the minimum amount
+ of memory in bytes the rbd-mirror daemon will need.
+ default: 768_M
+ services:
+ - rbd-mirror
+ see_also:
+ - rbd_mirror_memory_autotune
+- name: rbd_mirror_memory_expected_fragmentation
+ type: float
+ level: dev
+ desc: When tcmalloc and cache autotuning is enabled, estimate the percent of memory
+ fragmentation.
+ default: 0.15
+ services:
+ - rbd-mirror
+ see_also:
+ - rbd_mirror_memory_autotune
+ min: 0
+ max: 1
+- name: rbd_mirror_memory_cache_min
+ type: size
+ level: dev
+ desc: When tcmalloc and cache autotuning is enabled, set the minimum amount of memory
+ used for cache.
+ default: 128_M
+ services:
+ - rbd-mirror
+ see_also:
+ - rbd_mirror_memory_autotune
+- name: rbd_mirror_memory_cache_resize_interval
+ type: float
+ level: dev
+ desc: When tcmalloc and cache autotuning is enabled, wait this many seconds between
+ resizing caches.
+ default: 5
+ services:
+ - rbd-mirror
+ see_also:
+ - rbd_mirror_memory_autotune
+- name: rbd_mirror_memory_cache_autotune_interval
+ type: float
+ level: dev
+ desc: The number of seconds to wait between rebalances when cache autotune is enabled.
+ default: 30
+ services:
+ - rbd-mirror
+ see_also:
+ - rbd_mirror_memory_autotune
diff --git a/src/common/options/rbd.yaml.in b/src/common/options/rbd.yaml.in
new file mode 100644
index 000000000..c2da27aaa
--- /dev/null
+++ b/src/common/options/rbd.yaml.in
@@ -0,0 +1,881 @@
+# -*- mode: YAML -*-
+---
+
+headers: |
+ #include <bit>
+ #include <regex>
+ // rbd feature and io operation validation
+ #include "include/stringify.h"
+ #include "common/strtol.h"
+ #include "librbd/Features.h"
+ #include "librbd/io/IoOperations.h"
+options:
+- name: rbd_default_pool
+ type: str
+ level: advanced
+ desc: default pool for storing new images
+ default: rbd
+ services:
+ - rbd
+ validator: |
+ [](std::string *value, std::string *error_message) {
+ std::regex pattern("^[^@/]+$");
+ if (!std::regex_match (*value, pattern)) {
+ *value = "rbd";
+ *error_message = "invalid RBD default pool, resetting to 'rbd'";
+ }
+ return 0;
+ }
+- name: rbd_default_data_pool
+ type: str
+ level: advanced
+ desc: default pool for storing data blocks for new images
+ services:
+ - rbd
+ validator: |
+ [](std::string *value, std::string *error_message) {
+ std::regex pattern("^[^@/]*$");
+ if (!std::regex_match (*value, pattern)) {
+ *value = "";
+ *error_message = "ignoring invalid RBD data pool";
+ }
+ return 0;
+ }
+- name: rbd_default_features
+ type: str
+ level: advanced
+ desc: default v2 image features for new images
+ long_desc: 'RBD features are only applicable for v2 images. This setting accepts
+ either an integer bitmask value or comma-delimited string of RBD feature names.
+ This setting is always internally stored as an integer bitmask value. The mapping
+ between feature bitmask value and feature name is as follows: +1 -> layering,
+ +2 -> striping, +4 -> exclusive-lock, +8 -> object-map, +16 -> fast-diff, +32
+ -> deep-flatten, +64 -> journaling, +128 -> data-pool'
+ default: layering,exclusive-lock,object-map,fast-diff,deep-flatten
+ services:
+ - rbd
+ flags:
+ - runtime
+ validator: |
+ [](std::string *value, std::string *error_message) {
+ std::stringstream ss;
+ uint64_t features = librbd::rbd_features_from_string(*value, &ss);
+ // Leave this in integer form to avoid breaking Cinder. Someday
+ // we would like to present this in string form instead...
+ *value = stringify(features);
+ if (ss.str().size()) {
+ return -EINVAL;
+ }
+ return 0;
+ }
+- name: rbd_op_threads
+ type: uint
+ level: advanced
+ desc: number of threads to utilize for internal processing
+ default: 1
+ services:
+ - rbd
+- name: rbd_op_thread_timeout
+ type: uint
+ level: advanced
+ desc: time in seconds for detecting a hung thread
+ default: 60
+ services:
+ - rbd
+- name: rbd_disable_zero_copy_writes
+ type: bool
+ level: advanced
+ desc: Disable the use of zero-copy writes to ensure unstable writes from clients
+ cannot cause a CRC mismatch
+ default: true
+ services:
+ - rbd
+- name: rbd_non_blocking_aio
+ type: bool
+ level: advanced
+ desc: process AIO ops from a dispatch thread to prevent blocking
+ default: true
+ services:
+ - rbd
+- name: rbd_cache
+ type: bool
+ level: advanced
+ desc: whether to enable caching (writeback unless rbd_cache_max_dirty is 0)
+ fmt_desc: Enable caching for RADOS Block Device (RBD).
+ default: true
+ services:
+ - rbd
+- name: rbd_cache_policy
+ type: str
+ level: advanced
+ desc: cache policy for handling writes.
+ fmt_desc: Select the caching policy for librbd.
+ default: writearound
+ services:
+ - rbd
+ enum_values:
+ - writethrough
+ - writeback
+ - writearound
+- name: rbd_cache_writethrough_until_flush
+ type: bool
+ level: advanced
+ desc: whether to make writeback caching writethrough until flush is called, to be
+ sure the user of librbd will send flushes so that writeback is safe
+ fmt_desc: Start out in ``writethrough`` mode, and switch to ``writeback``
+ after the first flush request is received. Enabling is a
+ conservative but safe strategy in case VMs running on RBD volumes
+ are too old to send flushes, like the ``virtio`` driver in Linux
+ kernels older than 2.6.32.
+ default: true
+ services:
+ - rbd
+- name: rbd_cache_size
+ type: size
+ level: advanced
+ desc: cache size in bytes
+ fmt_desc: The per-volume RBD client cache size in bytes.
+ default: 32_M
+ policies: write-back and write-through
+ services:
+ - rbd
+- name: rbd_cache_max_dirty
+ type: size
+ level: advanced
+ desc: dirty limit in bytes - set to 0 for write-through caching
+ fmt_desc: The ``dirty`` limit in bytes at which the cache triggers write-back.
+ If ``0``, uses write-through caching.
+ default: 24_M
+ constraint: Must be less than ``rbd_cache_size``.
+ policies: write-around and write-back
+ services:
+ - rbd
+- name: rbd_cache_target_dirty
+ type: size
+ level: advanced
+ desc: target dirty limit in bytes
+ fmt_desc: The ``dirty target`` before the cache begins writing data to the data
+ storage. Does not block writes to the cache.
+ default: 16_M
+ constraint: Must be less than ``rbd_cache_max_dirty``.
+ policies: write-back
+ services:
+ - rbd
+- name: rbd_cache_max_dirty_age
+ type: float
+ level: advanced
+ desc: seconds in cache before writeback starts
+ fmt_desc: The number of seconds dirty data is in the cache before writeback starts.
+ default: 1
+ policies: write-back
+ services:
+ - rbd
+- name: rbd_cache_max_dirty_object
+ type: uint
+ level: advanced
+ desc: dirty limit for objects - set to 0 for auto calculate from rbd_cache_size
+ default: 0
+ services:
+ - rbd
+- name: rbd_cache_block_writes_upfront
+ type: bool
+ level: advanced
+ desc: whether to block writes to the cache before the aio_write call completes
+ default: false
+ services:
+ - rbd
+- name: rbd_parent_cache_enabled
+ type: bool
+ level: advanced
+ desc: whether to enable rbd shared ro cache
+ default: false
+ services:
+ - rbd
+- name: rbd_concurrent_management_ops
+ type: uint
+ level: advanced
+ desc: how many operations can be in flight for a management operation like deleting
+ or resizing an image
+ default: 10
+ services:
+ - rbd
+ min: 1
+- name: rbd_balance_snap_reads
+ type: bool
+ level: advanced
+ desc: distribute snap read requests to random OSD
+ default: false
+ services:
+ - rbd
+ see_also:
+ - rbd_read_from_replica_policy
+- name: rbd_localize_snap_reads
+ type: bool
+ level: advanced
+ desc: localize snap read requests to closest OSD
+ default: false
+ services:
+ - rbd
+ see_also:
+ - rbd_read_from_replica_policy
+- name: rbd_balance_parent_reads
+ type: bool
+ level: advanced
+ desc: distribute parent read requests to random OSD
+ default: false
+ services:
+ - rbd
+ see_also:
+ - rbd_read_from_replica_policy
+- name: rbd_localize_parent_reads
+ type: bool
+ level: advanced
+ desc: localize parent requests to closest OSD
+ default: false
+ services:
+ - rbd
+ see_also:
+ - rbd_read_from_replica_policy
+- name: rbd_sparse_read_threshold_bytes
+ type: size
+ level: advanced
+ desc: threshold for issuing a sparse-read
+ long_desc: minimum number of sequential bytes to read against an object before issuing
+ a sparse-read request to the cluster. 0 implies it must be a full object read
+ to issue a sparse-read, 1 implies always use sparse-read, and any value larger
+ than the maximum object size will disable sparse-read for all requests
+ default: 64_K
+ services:
+ - rbd
+- name: rbd_readahead_trigger_requests
+ type: uint
+ level: advanced
+ desc: number of sequential requests necessary to trigger readahead
+ default: 10
+ services:
+ - rbd
+- name: rbd_readahead_max_bytes
+ type: size
+ level: advanced
+ desc: set to 0 to disable readahead
+ fmt_desc: Maximum size of a read-ahead request. If zero, read-ahead is disabled.
+ default: 512_K
+ services:
+ - rbd
+- name: rbd_readahead_disable_after_bytes
+ type: size
+ level: advanced
+ desc: how many bytes are read in total before readahead is disabled
+ fmt_desc: After this many bytes have been read from an RBD image, read-ahead
+ is disabled for that image until it is closed. This allows the
+ guest OS to take over read-ahead once it is booted. If zero,
+ read-ahead stays enabled.
+ default: 50_M
+ services:
+ - rbd
+- name: rbd_clone_copy_on_read
+ type: bool
+ level: advanced
+ desc: copy-up parent image blocks to clone upon read request
+ default: false
+ services:
+ - rbd
+- name: rbd_blocklist_on_break_lock
+ type: bool
+ level: advanced
+ desc: whether to blocklist clients whose lock was broken
+ default: true
+ services:
+ - rbd
+- name: rbd_blocklist_expire_seconds
+ type: uint
+ level: advanced
+ desc: number of seconds to blocklist - set to 0 for OSD default
+ default: 0
+ services:
+ - rbd
+- name: rbd_request_timed_out_seconds
+ type: uint
+ level: advanced
+ desc: number of seconds before maintenance request times out
+ default: 30
+ services:
+ - rbd
+- name: rbd_skip_partial_discard
+ type: bool
+ level: advanced
+ desc: skip discard (zero) of unaligned extents within an object
+ default: true
+ services:
+ - rbd
+- name: rbd_discard_granularity_bytes
+ type: uint
+ level: advanced
+ desc: minimum aligned size of discard operations
+ default: 64_K
+ services:
+ - rbd
+ min: 4_K
+ max: 32_M
+ validator: |
+ [](std::string *value, std::string *error_message) {
+ uint64_t f = strict_si_cast<uint64_t>(*value, error_message);
+ if (!error_message->empty()) {
+ return -EINVAL;
+ } else if (!std::has_single_bit(f)) {
+ *error_message = "value must be a power of two";
+ return -EINVAL;
+ }
+ return 0;
+ }
+- name: rbd_enable_alloc_hint
+ type: bool
+ level: advanced
+ desc: when writing a object, it will issue a hint to osd backend to indicate the
+ expected size object need
+ default: true
+ services:
+ - rbd
+- name: rbd_compression_hint
+ type: str
+ level: basic
+ desc: Compression hint to send to the OSDs during writes
+ fmt_desc: Hint to send to the OSDs on write operations. If set to
+ ``compressible`` and the OSD ``bluestore_compression_mode``
+ setting is ``passive``, the OSD will attempt to compress data.
+ If set to ``incompressible`` and the OSD compression setting
+ is ``aggressive``, the OSD will not attempt to compress data.
+ default: none
+ services:
+ - rbd
+ enum_values:
+ - none
+ - compressible
+ - incompressible
+ flags:
+ - runtime
+- name: rbd_read_from_replica_policy
+ type: str
+ level: basic
+ desc: Read replica policy send to the OSDS during reads
+ fmt_desc: |
+ Policy for determining which OSD will receive read operations.
+ If set to ``default``, each PG's primary OSD will always be used
+ for read operations. If set to ``balance``, read operations will
+ be sent to a randomly selected OSD within the replica set. If set
+ to ``localize``, read operations will be sent to the closest OSD
+ as determined by the CRUSH map. Unlike ``rbd_balance_snap_reads``
+ and ``rbd_localize_snap_reads`` or ``rbd_balance_parent_reads`` and
+ ``rbd_localize_parent_reads``, it affects all read operations, not
+ just snap or parent. Note: this feature requires the cluster to
+ be configured with a minimum compatible OSD release of Octopus.
+ default: default
+ services:
+ - rbd
+ enum_values:
+ - default
+ - balance
+ - localize
+ flags:
+ - runtime
+- name: rbd_tracing
+ type: bool
+ level: advanced
+ desc: true if LTTng-UST tracepoints should be enabled
+ default: false
+ services:
+ - rbd
+- name: rbd_blkin_trace_all
+ type: bool
+ level: advanced
+ desc: create a blkin trace for all RBD requests
+ default: false
+ services:
+ - rbd
+- name: rbd_validate_pool
+ type: bool
+ level: advanced
+ desc: validate empty pools for RBD compatibility
+ default: true
+ services:
+ - rbd
+- name: rbd_validate_names
+ type: bool
+ level: advanced
+ desc: validate new image names for RBD compatibility
+ default: true
+ services:
+ - rbd
+- name: rbd_invalidate_object_map_on_timeout
+ type: bool
+ level: dev
+ desc: true if object map should be invalidated when load or update timeout
+ default: true
+ services:
+ - rbd
+- name: rbd_auto_exclusive_lock_until_manual_request
+ type: bool
+ level: advanced
+ desc: automatically acquire/release exclusive lock until it is explicitly requested
+ default: true
+ services:
+ - rbd
+- name: rbd_move_to_trash_on_remove
+ type: bool
+ level: basic
+ desc: automatically move images to the trash when deleted
+ default: false
+ services:
+ - rbd
+- name: rbd_move_to_trash_on_remove_expire_seconds
+ type: uint
+ level: basic
+ desc: default number of seconds to protect deleted images in the trash
+ default: 0
+ services:
+ - rbd
+- name: rbd_move_parent_to_trash_on_remove
+ type: bool
+ level: basic
+ desc: move parent with clone format v2 children to the trash when deleted
+ default: false
+ services:
+ - rbd
+- name: rbd_mirroring_resync_after_disconnect
+ type: bool
+ level: advanced
+ desc: automatically start image resync after mirroring is disconnected due to being
+ laggy
+ default: false
+ services:
+ - rbd
+- name: rbd_mirroring_delete_delay
+ type: uint
+ level: advanced
+ desc: time-delay in seconds for rbd-mirror delete propagation
+ default: 0
+ services:
+ - rbd
+- name: rbd_mirroring_replay_delay
+ type: uint
+ level: advanced
+ desc: time-delay in seconds for rbd-mirror asynchronous replication
+ default: 0
+ services:
+ - rbd
+- name: rbd_mirroring_max_mirroring_snapshots
+ type: uint
+ level: advanced
+ desc: mirroring snapshots limit
+ default: 5
+ services:
+ - rbd
+ min: 3
+- name: rbd_default_format
+ type: uint
+ level: advanced
+ desc: default image format for new images
+ default: 2
+ services:
+ - rbd
+- name: rbd_default_order
+ type: uint
+ level: advanced
+ desc: default order (data block object size) for new images
+ long_desc: This configures the default object size for new images. The value is used as a
+ power of two, meaning ``default_object_size = 2 ^ rbd_default_order``. Configure a value
+ between 12 and 25 (inclusive), translating to 4KiB lower and 32MiB upper limit.
+ default: 22
+ services:
+ - rbd
+- name: rbd_default_stripe_count
+ type: uint
+ level: advanced
+ desc: default stripe count for new images
+ default: 0
+ services:
+ - rbd
+- name: rbd_default_stripe_unit
+ type: size
+ level: advanced
+ desc: default stripe width for new images
+ default: 0
+ services:
+ - rbd
+- name: rbd_default_map_options
+ type: str
+ level: advanced
+ desc: default krbd map options
+ services:
+ - rbd
+- name: rbd_default_clone_format
+ type: str
+ level: advanced
+ desc: default internal format for handling clones
+ long_desc: This sets the internal format for tracking cloned images. The setting
+ of '1' requires attaching to protected snapshots that cannot be removed until
+ the clone is removed/flattened. The setting of '2' will allow clones to be attached
+ to any snapshot and permits removing in-use parent snapshots but requires Mimic
+ or later clients. The default setting of 'auto' will use the v2 format if the
+ cluster is configured to require mimic or later clients.
+ default: auto
+ services:
+ - rbd
+ enum_values:
+ - '1'
+ - '2'
+ - auto
+ flags:
+ - runtime
+- name: rbd_journal_order
+ type: uint
+ level: advanced
+ desc: default order (object size) for journal data objects
+ default: 24
+ services:
+ - rbd
+ min: 12
+ max: 26
+- name: rbd_journal_splay_width
+ type: uint
+ level: advanced
+ desc: number of active journal objects
+ default: 4
+ services:
+ - rbd
+- name: rbd_journal_commit_age
+ type: float
+ level: advanced
+ desc: commit time interval, seconds
+ default: 5
+ services:
+ - rbd
+- name: rbd_journal_object_writethrough_until_flush
+ type: bool
+ level: advanced
+ desc: when enabled, the rbd_journal_object_flush* configuration options are ignored
+ until the first flush so that batched journal IO is known to be safe for consistency
+ default: true
+ services:
+ - rbd
+- name: rbd_journal_object_flush_interval
+ type: uint
+ level: advanced
+ desc: maximum number of pending commits per journal object
+ default: 0
+ services:
+ - rbd
+- name: rbd_journal_object_flush_bytes
+ type: size
+ level: advanced
+ desc: maximum number of pending bytes per journal object
+ default: 1_M
+ services:
+ - rbd
+- name: rbd_journal_object_flush_age
+ type: float
+ level: advanced
+ desc: maximum age (in seconds) for pending commits
+ default: 0
+ services:
+ - rbd
+- name: rbd_journal_object_max_in_flight_appends
+ type: uint
+ level: advanced
+ desc: maximum number of in-flight appends per journal object
+ default: 0
+ services:
+ - rbd
+- name: rbd_journal_pool
+ type: str
+ level: advanced
+ desc: pool for journal objects
+ services:
+ - rbd
+- name: rbd_journal_max_payload_bytes
+ type: size
+ level: advanced
+ desc: maximum journal payload size before splitting
+ default: 16_K
+ services:
+ - rbd
+- name: rbd_journal_max_concurrent_object_sets
+ type: uint
+ level: advanced
+ desc: maximum number of object sets a journal client can be behind before it is
+ automatically unregistered
+ default: 0
+ services:
+ - rbd
+- name: rbd_qos_iops_limit
+ type: uint
+ level: advanced
+ desc: the desired limit of IO operations per second
+ default: 0
+ services:
+ - rbd
+- name: rbd_qos_bps_limit
+ type: uint
+ level: advanced
+ desc: the desired limit of IO bytes per second
+ default: 0
+ services:
+ - rbd
+- name: rbd_qos_read_iops_limit
+ type: uint
+ level: advanced
+ desc: the desired limit of read operations per second
+ default: 0
+ services:
+ - rbd
+- name: rbd_qos_write_iops_limit
+ type: uint
+ level: advanced
+ desc: the desired limit of write operations per second
+ default: 0
+ services:
+ - rbd
+- name: rbd_qos_read_bps_limit
+ type: uint
+ level: advanced
+ desc: the desired limit of read bytes per second
+ default: 0
+ services:
+ - rbd
+- name: rbd_qos_write_bps_limit
+ type: uint
+ level: advanced
+ desc: the desired limit of write bytes per second
+ default: 0
+ services:
+ - rbd
+- name: rbd_qos_iops_burst
+ type: uint
+ level: advanced
+ desc: the desired burst limit of IO operations
+ default: 0
+ services:
+ - rbd
+- name: rbd_qos_bps_burst
+ type: uint
+ level: advanced
+ desc: the desired burst limit of IO bytes
+ default: 0
+ services:
+ - rbd
+- name: rbd_qos_read_iops_burst
+ type: uint
+ level: advanced
+ desc: the desired burst limit of read operations
+ default: 0
+ services:
+ - rbd
+- name: rbd_qos_write_iops_burst
+ type: uint
+ level: advanced
+ desc: the desired burst limit of write operations
+ default: 0
+ services:
+ - rbd
+- name: rbd_qos_read_bps_burst
+ type: uint
+ level: advanced
+ desc: the desired burst limit of read bytes
+ default: 0
+ services:
+ - rbd
+- name: rbd_qos_write_bps_burst
+ type: uint
+ level: advanced
+ desc: the desired burst limit of write bytes
+ default: 0
+ services:
+ - rbd
+- name: rbd_qos_iops_burst_seconds
+ type: uint
+ level: advanced
+ desc: the desired burst duration in seconds of IO operations
+ default: 1
+ services:
+ - rbd
+ min: 1
+- name: rbd_qos_bps_burst_seconds
+ type: uint
+ level: advanced
+ desc: the desired burst duration in seconds of IO bytes
+ default: 1
+ services:
+ - rbd
+ min: 1
+- name: rbd_qos_read_iops_burst_seconds
+ type: uint
+ level: advanced
+ desc: the desired burst duration in seconds of read operations
+ default: 1
+ services:
+ - rbd
+ min: 1
+- name: rbd_qos_write_iops_burst_seconds
+ type: uint
+ level: advanced
+ desc: the desired burst duration in seconds of write operations
+ default: 1
+ services:
+ - rbd
+ min: 1
+- name: rbd_qos_read_bps_burst_seconds
+ type: uint
+ level: advanced
+ desc: the desired burst duration in seconds of read bytes
+ default: 1
+ services:
+ - rbd
+ min: 1
+- name: rbd_qos_write_bps_burst_seconds
+ type: uint
+ level: advanced
+ desc: the desired burst duration in seconds of write bytes
+ default: 1
+ services:
+ - rbd
+ min: 1
+- name: rbd_qos_schedule_tick_min
+ type: uint
+ level: advanced
+ desc: minimum schedule tick (in milliseconds) for QoS
+ long_desc: This determines the minimum time (in milliseconds) at which I/Os
+ can become unblocked if the limit of a throttle is hit. In terms of the
+ token bucket algorithm, this is the minimum interval at which tokens are
+ added to the bucket.
+ default: 50
+ services:
+ - rbd
+ min: 1
+- name: rbd_qos_exclude_ops
+ type: str
+ level: advanced
+ desc: optionally exclude ops from QoS
+ long_desc: 'Optionally exclude ops from QoS. This setting accepts either an integer
+ bitmask value or comma-delimited string of op names. This setting is always internally
+ stored as an integer bitmask value. The mapping between op bitmask value and op
+ name is as follows: +1 -> read, +2 -> write, +4 -> discard, +8 -> write_same,
+ +16 -> compare_and_write'
+ services:
+ - rbd
+ flags:
+ - runtime
+ validator: |
+ [](std::string *value, std::string *error_message) {
+ std::ostringstream ss;
+ uint64_t exclude_ops = librbd::io::rbd_io_operations_from_string(*value, &ss);
+ // Leave this in integer form to avoid breaking Cinder. Someday
+ // we would like to present this in string form instead...
+ *value = stringify(exclude_ops);
+ if (ss.str().size()) {
+ return -EINVAL;
+ }
+ return 0;
+ }
+- name: rbd_discard_on_zeroed_write_same
+ type: bool
+ level: advanced
+ desc: discard data on zeroed write same instead of writing zero
+ default: true
+ services:
+ - rbd
+- name: rbd_mtime_update_interval
+ type: uint
+ level: advanced
+ desc: RBD Image modify timestamp refresh interval. Set to 0 to disable modify timestamp
+ update.
+ default: 60
+ services:
+ - rbd
+ min: 0
+- name: rbd_atime_update_interval
+ type: uint
+ level: advanced
+ desc: RBD Image access timestamp refresh interval. Set to 0 to disable access timestamp
+ update.
+ default: 60
+ services:
+ - rbd
+ min: 0
+- name: rbd_io_scheduler
+ type: str
+ level: advanced
+ desc: RBD IO scheduler
+ default: simple
+ services:
+ - rbd
+ enum_values:
+ - none
+ - simple
+- name: rbd_io_scheduler_simple_max_delay
+ type: uint
+ level: advanced
+ desc: maximum io delay (in milliseconds) for simple io scheduler (if set to 0 dalay
+ is calculated based on latency stats)
+ default: 0
+ services:
+ - rbd
+ min: 0
+- name: rbd_persistent_cache_mode
+ type: str
+ level: advanced
+ desc: enable persistent write back cache for this volume
+ default: disabled
+ services:
+ - rbd
+ enum_values:
+ - disabled
+ - rwl
+ - ssd
+- name: rbd_persistent_cache_size
+ type: uint
+ level: advanced
+ desc: size of the persistent write back cache for this volume
+ default: 1_G
+ services:
+ - rbd
+ min: 1_G
+- name: rbd_persistent_cache_path
+ type: str
+ level: advanced
+ desc: location of the persistent write back cache in a DAX-enabled filesystem on
+ persistent memory
+ default: /tmp
+ services:
+ - rbd
+- name: rbd_quiesce_notification_attempts
+ type: uint
+ level: dev
+ desc: the number of quiesce notification attempts
+ default: 10
+ services:
+ - rbd
+ min: 1
+- name: rbd_default_snapshot_quiesce_mode
+ type: str
+ level: advanced
+ desc: default snapshot quiesce mode
+ default: required
+ services:
+ - rbd
+ enum_values:
+ - required
+ - ignore-error
+ - skip
+- name: rbd_plugins
+ type: str
+ level: advanced
+ desc: comma-delimited list of librbd plugins to enable
+ services:
+ - rbd
+- name: rbd_config_pool_override_update_timestamp
+ type: uint
+ level: dev
+ desc: timestamp of last update to pool-level config overrides
+ default: 0
+ services:
+ - rbd
diff --git a/src/common/options/rgw.yaml.in b/src/common/options/rgw.yaml.in
new file mode 100644
index 000000000..241632a22
--- /dev/null
+++ b/src/common/options/rgw.yaml.in
@@ -0,0 +1,3770 @@
+# -*- mode: YAML -*-
+---
+
+options:
+# According to AWS S3(http://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html),
+# An ACL may have up to 100 grants.
+- name: rgw_acl_grants_max_num
+ type: int
+ level: advanced
+ desc: The maximum number of ACL grants in a single request.
+ default: 100
+ services:
+ - rgw
+ with_legacy: true
+# A user may have up to 100 IAM user policies.
+- name: rgw_user_policies_max_num
+ type: int
+ level: advanced
+ desc: The maximum number of IAM user policies for a single user.
+ default: 100
+ services:
+ - rgw
+ with_legacy: true
+# According to AWS S3 (http://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html),
+# A CORS request may have up to 100 rules.
+- name: rgw_cors_rules_max_num
+ type: int
+ level: advanced
+ desc: The maximum number of CORS rules in a single request.
+ default: 100
+ services:
+ - rgw
+ with_legacy: true
+# According to AWS S3 (https://docs.aws.amazon.com/AmazonS3/latest/dev/DeletingObjects.html),
+# Amazon S3 also provides the Multi-Object Delete API that you can use to delete up to 1000
+# objects in a single HTTP request.
+- name: rgw_delete_multi_obj_max_num
+ type: int
+ level: advanced
+ desc: The maximum number of objects in a single multi-object delete request.
+ default: 1000
+ services:
+ - rgw
+ with_legacy: true
+# According to AWS S3, An website routing config can have up to 50 rules.
+- name: rgw_website_routing_rules_max_num
+ type: int
+ level: advanced
+ desc: The maximum number of website routing rules in a single request.
+ default: 50
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_rados_tracing
+ type: bool
+ level: advanced
+ desc: Enables LTTng-UST tracepoints.
+ default: false
+ services:
+ - rgw
+- name: rgw_op_tracing
+ type: bool
+ level: advanced
+ desc: Enables LTTng-UST operator tracepoints.
+ default: false
+ services:
+ - rgw
+- name: rgw_max_chunk_size
+ type: size
+ level: advanced
+ desc: The maximum RGW chunk size.
+ long_desc: The chunk size is the size of RADOS I/O requests that RGW sends when
+ accessing data objects. RGW read and write operations will never request more than
+ this amount in a single request. This also defines the RGW head object size, as
+ head operations need to be atomic, and anything larger than this would require
+ more than a single operation. When RGW objects are written to the default
+ storage class, up to this amount of payload data will be stored alongside
+ metadata in the head object.
+ default: 4_M
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_put_obj_min_window_size
+ type: size
+ level: advanced
+ desc: The minimum RADOS write window size (in bytes).
+ long_desc: The window size determines the total concurrent RADOS writes of a single
+ RGW object. When writing an object RGW will send multiple chunks to RADOS. The
+ total size of the writes does not exceed the window size. The window size may
+ be adjusted dynamically in order to better utilize the pipe.
+ default: 16_M
+ services:
+ - rgw
+ see_also:
+ - rgw_put_obj_max_window_size
+ - rgw_max_chunk_size
+ with_legacy: true
+- name: rgw_put_obj_max_window_size
+ type: size
+ level: advanced
+ desc: The maximum RADOS write window size (in bytes).
+ long_desc: The window size may be dynamically adjusted, but will not surpass this
+ value.
+ default: 64_M
+ services:
+ - rgw
+ see_also:
+ - rgw_put_obj_min_window_size
+ - rgw_max_chunk_size
+ with_legacy: true
+- name: rgw_max_put_size
+ type: size
+ level: advanced
+ desc: The maximum size (in bytes) of regular (non multi-part) object upload.
+ long_desc: Plain object upload is capped at this amount of data. In order to upload
+ larger objects, a special upload mechanism is required. The S3 API provides the
+ multi-part upload, and Swift provides DLO and SLO.
+ default: 5_G
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_max_put_param_size
+ type: size
+ level: advanced
+ desc: The maximum size (in bytes) of data input of certain RESTful requests.
+ default: 1_M
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_max_attr_size
+ type: size
+ level: advanced
+ desc: The maximum length of metadata value. 0 skips the check
+ default: 0
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_max_attr_name_len
+ type: size
+ level: advanced
+ desc: The maximum length of metadata name. 0 skips the check
+ default: 0
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_max_attrs_num_in_req
+ type: uint
+ level: advanced
+ desc: The maximum number of metadata items that can be put via single request
+ default: 0
+ services:
+ - rgw
+ with_legacy: true
+# override max bucket index shards in zone configuration (if not zero)
+#
+# Represents the number of shards for the bucket index object, a value of zero
+# indicates there is no sharding. By default (no sharding, the name of the object
+# is '.dir.{marker}', with sharding, the name is '.dir.{markder}.{sharding_id}',
+# sharding_id is zero-based value. It is not recommended to set a too large value
+# (e.g. thousand) as it increases the cost for bucket listing.
+- name: rgw_override_bucket_index_max_shards
+ type: uint
+ level: dev
+ desc: The default number of bucket index shards for newly-created buckets. This
+ value overrides bucket_index_max_shards stored in the zone. Setting this value
+ in the zone is preferred, because it applies globally to all radosgw daemons running
+ in the zone.
+ fmt_desc: Represents the number of shards for the bucket index object,
+ a value of zero indicates there is no sharding. It is not
+ recommended to set a value too large (e.g. thousand) as it
+ increases the cost for bucket listing.
+ This variable should be set in the client or global sections
+ so that it is automatically applied to radosgw-admin commands.
+ default: 0
+ services:
+ - rgw
+ with_legacy: true
+# Represents the maximum AIO pending requests for the bucket index object shards.
+- name: rgw_bucket_index_max_aio
+ type: uint
+ level: advanced
+ desc: Max number of concurrent RADOS requests when handling bucket shards.
+ default: 128
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_multi_obj_del_max_aio
+ type: uint
+ level: advanced
+ desc: Max number of concurrent RADOS requests per multi-object delete request.
+ default: 16
+ services:
+ - rgw
+ with_legacy: true
+# whether or not the quota/gc threads should be started
+- name: rgw_enable_quota_threads
+ type: bool
+ level: advanced
+ desc: Enables the quota maintenance thread.
+ long_desc: The quota maintenance thread is responsible for quota related maintenance
+ work. The thread itself can be disabled, but in order for quota to work correctly,
+ at least one RGW in each zone needs to have this thread running. Having the thread
+ enabled on multiple RGW processes within the same zone can spread some of the
+ maintenance work between them.
+ default: true
+ services:
+ - rgw
+ see_also:
+ - rgw_enable_gc_threads
+ - rgw_enable_lc_threads
+ with_legacy: true
+- name: rgw_enable_gc_threads
+ type: bool
+ level: advanced
+ desc: Enables the garbage collection maintenance thread.
+ long_desc: The garbage collection maintenance thread is responsible for garbage
+ collector maintenance work. The thread itself can be disabled, but in order for
+ garbage collection to work correctly, at least one RGW in each zone needs to have
+ this thread running. Having the thread enabled on multiple RGW processes within
+ the same zone can spread some of the maintenance work between them.
+ default: true
+ services:
+ - rgw
+ see_also:
+ - rgw_enable_quota_threads
+ - rgw_enable_lc_threads
+ with_legacy: true
+- name: rgw_enable_lc_threads
+ type: bool
+ level: advanced
+ desc: Enables the lifecycle maintenance thread. This is required on at least one
+ rgw for each zone.
+ long_desc: The lifecycle maintenance thread is responsible for lifecycle related
+ maintenance work. The thread itself can be disabled, but in order for lifecycle
+ to work correctly, at least one RGW in each zone needs to have this thread running.
+ Havingthe thread enabled on multiple RGW processes within the same zone can spread
+ some of the maintenance work between them.
+ default: true
+ services:
+ - rgw
+ see_also:
+ - rgw_enable_gc_threads
+ - rgw_enable_quota_threads
+ with_legacy: true
+- name: rgw_data
+ type: str
+ level: advanced
+ desc: Alternative location for RGW configuration.
+ long_desc: If this is set, the different Ceph system configurables (such as the keyring file will be located in the path that is specified here.
+ fmt_desc: Sets the location of the data files for Ceph RADOS Gateway.
+ default: /var/lib/ceph/radosgw/$cluster-$id
+ services:
+ - rgw
+ flags:
+ - no_mon_update
+ with_legacy: true
+- name: rgw_enable_apis
+ type: str
+ level: advanced
+ desc: A list of set of RESTful APIs that rgw handles.
+ fmt_desc: |
+ Enables the specified APIs.
+
+ .. note:: Enabling the ``s3`` API is a requirement for
+ any ``radosgw`` instance that is meant to
+ participate in a `multi-site <../multisite>`_
+ configuration.
+ default: s3, s3website, swift, swift_auth, admin, sts, iam, notifications
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_cache_enabled
+ type: bool
+ level: advanced
+ desc: Enable RGW metadata cache.
+ long_desc: The metadata cache holds metadata entries that RGW requires for processing
+ requests. Metadata entries can be user info, bucket info, and bucket instance
+ info. If not found in the cache, entries will be fetched from the backing RADOS
+ store.
+ fmt_desc: Whether the Ceph Object Gateway cache is enabled.
+ default: true
+ services:
+ - rgw
+ see_also:
+ - rgw_cache_lru_size
+ with_legacy: true
+- name: rgw_cache_lru_size
+ type: int
+ level: advanced
+ desc: Max number of items in RGW metadata cache.
+ long_desc: When full, the RGW metadata cache evicts least recently used entries.
+ fmt_desc: The number of entries in the Ceph Object Gateway cache.
+ default: 10000
+ services:
+ - rgw
+ see_also:
+ - rgw_cache_enabled
+ with_legacy: true
+- name: rgw_dns_name
+ type: str
+ level: advanced
+ desc: The host names that RGW uses.
+ long_desc: A comma separated list of DNS names.
+ This is Needed for virtual hosting of buckets to work properly, unless
+ configured via zonegroup configuration.
+ fmt_desc: The DNS names of the served domains. See also the ``hostnames`` setting within zonegroups.
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_dns_s3website_name
+ type: str
+ level: advanced
+ desc: The host name that RGW uses for static websites (S3)
+ long_desc: This is needed for virtual hosting of buckets, unless configured via
+ zonegroup configuration.
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_numa_node
+ type: int
+ level: advanced
+ desc: set rgw's cpu affinity to a numa node (-1 for none)
+ default: -1
+ services:
+ - rgw
+ flags:
+ - startup
+- name: rgw_service_provider_name
+ type: str
+ level: advanced
+ desc: Service provider name which is contained in http response headers
+ long_desc: As S3 or other cloud storage providers do, http response headers should
+ contain the name of the provider. This name will be placed in http header 'Server'.
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_content_length_compat
+ type: bool
+ level: advanced
+ desc: Multiple content length headers compatibility
+ long_desc: Try to handle requests with abiguous multiple content length headers
+ (Content-Length, Http-Content-Length).
+ fmt_desc: Enable compatibility handling of FCGI requests with both ``CONTENT_LENGTH``
+ and ``HTTP_CONTENT_LENGTH`` set.
+ default: false
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_relaxed_region_enforcement
+ type: bool
+ level: advanced
+ desc: Disable region constraint enforcement
+ long_desc: Enable requests such as bucket creation to succeed irrespective of region
+ restrictions (Jewel compat).
+ default: false
+ services:
+ - rgw
+- name: rgw_lifecycle_work_time
+ type: str
+ level: advanced
+ desc: Lifecycle allowed work time
+ long_desc: Local time window in which the lifecycle maintenance thread can work.
+ default: 00:00-06:00
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_lc_lock_max_time
+ type: int
+ level: dev
+ default: 90
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_lc_thread_delay
+ type: int
+ level: advanced
+ desc: Delay after processing of bucket listing chunks (i.e., per 1000 entries) in
+ milliseconds
+ default: 0
+ services:
+ - rgw
+- name: rgw_lc_max_worker
+ type: int
+ level: advanced
+ desc: Number of LCWorker tasks that will be run in parallel
+ long_desc: Number of LCWorker tasks that will run in parallel--used to permit >1
+ bucket/index shards to be processed simultaneously
+ fmt_desc: This option specifies the number of lifecycle worker threads
+ to run in parallel, thereby processing bucket and index
+ shards simultaneously.
+ default: 3
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_lc_max_wp_worker
+ type: int
+ level: advanced
+ desc: Number of workpool threads per LCWorker
+ long_desc: Number of threads in per-LCWorker workpools--used to accelerate per-bucket
+ processing
+ fmt_desc: This option specifies the number of threads in each lifecycle
+ workers work pool. This option can help accelerate processing each bucket.
+ default: 3
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_lc_max_objs
+ type: int
+ level: advanced
+ desc: Number of lifecycle data shards
+ long_desc: Number of RADOS objects to use for storing lifecycle index. This affects
+ concurrency of lifecycle maintenance, as shards can be processed in parallel.
+ default: 32
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_lc_max_rules
+ type: uint
+ level: advanced
+ desc: Max number of lifecycle rules set on one bucket
+ long_desc: Number of lifecycle rules set on one bucket should be limited.
+ default: 1000
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_lc_debug_interval
+ type: int
+ level: dev
+ desc: The number of seconds that simulate one "day" in order to debug RGW LifeCycle.
+ Do *not* modify for a production cluster.
+ long_desc: For debugging RGW LifeCycle, the number of seconds that are equivalent to
+ one simulated "day". Values less than 1 are ignored and do not change LifeCycle behavior.
+ For example, during debugging if one wanted every 10 minutes to be equivalent to one day,
+ then this would be set to 600, the number of seconds in 10 minutes.
+ default: -1
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_mp_lock_max_time
+ type: int
+ level: advanced
+ desc: Multipart upload max completion time
+ long_desc: Time length to allow completion of a multipart upload operation. This
+ is done to prevent concurrent completions on the same object with the same upload
+ id.
+ default: 10_min
+ services:
+ - rgw
+- name: rgw_script_uri
+ type: str
+ level: dev
+ fmt_desc: The alternative value for the ``SCRIPT_URI`` if not set
+ in the request.
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_request_uri
+ type: str
+ level: dev
+ fmt_desc: The alternative value for the ``REQUEST_URI`` if not set
+ in the request.
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_ignore_get_invalid_range
+ type: bool
+ level: advanced
+ desc: Treat invalid (e.g., negative) range request as full
+ long_desc: Treat invalid (e.g., negative) range request as request for the full
+ object (AWS compatibility)
+ default: false
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_swift_url
+ type: str
+ level: advanced
+ desc: Swift-auth storage URL
+ long_desc: Used in conjunction with rgw internal swift authentication. This affects
+ the X-Storage-Url response header value.
+ fmt_desc: The URL for the Ceph Object Gateway Swift API.
+ services:
+ - rgw
+ see_also:
+ - rgw_swift_auth_entry
+ with_legacy: true
+- name: rgw_swift_url_prefix
+ type: str
+ level: advanced
+ desc: Swift URL prefix
+ long_desc: The URL path prefix for swift requests.
+ fmt_desc: |
+ The URL prefix for the Swift API, to distinguish it from
+ the S3 API endpoint. The default is ``swift``, which
+ makes the Swift API available at the URL
+ ``http://host:port/swift/v1`` (or
+ ``http://host:port/swift/v1/AUTH_%(tenant_id)s`` if
+ ``rgw swift account in url`` is enabled).
+
+ For compatibility, setting this configuration variable
+ to the empty string causes the default ``swift`` to be
+ used; if you do want an empty prefix, set this option to
+ ``/``.
+
+ .. warning:: If you set this option to ``/``, you must
+ disable the S3 API by modifying ``rgw
+ enable apis`` to exclude ``s3``. It is not
+ possible to operate radosgw with ``rgw
+ swift url prefix = /`` and simultaneously
+ support both the S3 and Swift APIs. If you
+ do need to support both APIs without
+ prefixes, deploy multiple radosgw instances
+ to listen on different hosts (or ports)
+ instead, enabling some for S3 and some for
+ Swift.
+ example: /swift-testing
+ default: swift
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_swift_auth_url
+ type: str
+ level: advanced
+ desc: Swift auth URL
+ long_desc: Default url to which RGW connects and verifies tokens for v1 auth (if
+ not using internal swift auth).
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_swift_auth_entry
+ type: str
+ level: advanced
+ desc: Swift auth URL prefix
+ long_desc: URL path prefix for internal swift auth requests.
+ fmt_desc: The entry point for a Swift auth URL.
+ default: auth
+ services:
+ - rgw
+ see_also:
+ - rgw_swift_url
+ with_legacy: true
+- name: rgw_swift_tenant_name
+ type: str
+ level: advanced
+ desc: Swift tenant name
+ long_desc: Tenant name that is used when constructing the swift path.
+ services:
+ - rgw
+ see_also:
+ - rgw_swift_account_in_url
+ with_legacy: true
+- name: rgw_swift_account_in_url
+ type: bool
+ level: advanced
+ desc: Swift account encoded in URL
+ long_desc: Whether the swift account is encoded in the uri path (AUTH_<account>).
+ fmt_desc: |
+ Whether or not the Swift account name should be included
+ in the Swift API URL.
+ If set to ``false`` (the default), then the Swift API
+ will listen on a URL formed like
+ ``http://host:port/<rgw_swift_url_prefix>/v1``, and the
+ account name (commonly a Keystone project UUID if
+ radosgw is configured with `Keystone integration
+ <../keystone>`_) will be inferred from request
+ headers.
+ If set to ``true``, the Swift API URL will be
+ ``http://host:port/<rgw_swift_url_prefix>/v1/AUTH_<account_name>``
+ (or
+ ``http://host:port/<rgw_swift_url_prefix>/v1/AUTH_<keystone_project_id>``)
+ instead, and the Keystone ``object-store`` endpoint must
+ accordingly be configured to include the
+ ``AUTH_%(tenant_id)s`` suffix.
+ You **must** set this option to ``true`` (and update the
+ Keystone service catalog) if you want radosgw to support
+ publicly-readable containers and `temporary URLs
+ <../swift/tempurl>`_.
+ default: false
+ services:
+ - rgw
+ see_also:
+ - rgw_swift_tenant_name
+ with_legacy: true
+- name: rgw_swift_enforce_content_length
+ type: bool
+ level: advanced
+ desc: Send content length when listing containers (Swift)
+ long_desc: Whether content length header is needed when listing containers. When
+ this is set to false, RGW will send extra info for each entry in the response.
+ default: false
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_keystone_url
+ type: str
+ level: basic
+ desc: The URL to the Keystone server.
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_keystone_admin_token
+ type: str
+ level: advanced
+ desc: 'DEPRECATED: The admin token (shared secret) that is used for the Keystone
+ requests.'
+ fmt_desc: The Keystone admin token (shared secret). In Ceph RGW
+ authentication with the admin token has priority over
+ authentication with the admin credentials
+ (``rgw_keystone_admin_user``, ``rgw_keystone_admin_password``,
+ ``rgw_keystone_admin_tenant``, ``rgw_keystone_admin_project``,
+ ``rgw_keystone_admin_domain``). The Keystone admin token
+ has been deprecated, but can be used to integrate with
+ older environments. It is preferred to instead configure
+ ``rgw_keystone_admin_token_path`` to avoid exposing the token.
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_keystone_admin_token_path
+ type: str
+ level: advanced
+ desc: Path to a file containing the admin token (shared secret) that is used for
+ the Keystone requests.
+ fmt_desc: Path to a file containing the Keystone admin token
+ (shared secret). In Ceph RadosGW authentication with
+ the admin token has priority over authentication with
+ the admin credentials
+ (``rgw_keystone_admin_user``, ``rgw_keystone_admin_password``,
+ ``rgw_keystone_admin_tenant``, ``rgw_keystone_admin_project``,
+ ``rgw_keystone_admin_domain``).
+ The Keystone admin token has been deprecated, but can be
+ used to integrate with older environments.
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_keystone_admin_user
+ type: str
+ level: advanced
+ desc: Keystone admin user.
+ fmt_desc: The name of OpenStack user with admin privilege for Keystone
+ authentication (Service User) when using OpenStack Identity API v2
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_keystone_admin_password
+ type: str
+ level: advanced
+ desc: 'DEPRECATED: Keystone admin password.'
+ fmt_desc: The password for OpenStack admin user when using OpenStack
+ Identity API v2. It is preferred to instead configure
+ ``rgw_keystone_admin_password_path`` to avoid exposing the token.
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_keystone_admin_password_path
+ type: str
+ level: advanced
+ desc: Path to a file containing the Keystone admin password.
+ fmt_desc: Path to a file containing the password for OpenStack
+ admin user when using OpenStack Identity API v2.
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_keystone_admin_tenant
+ type: str
+ level: advanced
+ desc: Keystone admin user tenant.
+ fmt_desc: The name of OpenStack tenant with admin privilege (Service Tenant) when
+ using OpenStack Identity API v2
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_keystone_admin_project
+ type: str
+ level: advanced
+ desc: Keystone admin user project (for Keystone v3).
+ fmt_desc: The name of OpenStack project with admin privilege when using
+ OpenStack Identity API v3. If left unspecified, value of
+ ``rgw keystone admin tenant`` will be used instead.
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_keystone_admin_domain
+ type: str
+ level: advanced
+ desc: Keystone admin user domain (for Keystone v3).
+ fmt_desc: The name of OpenStack domain with admin privilege when using
+ OpenStack Identity API v3.
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_keystone_service_token_enabled
+ type: bool
+ level: advanced
+ desc: Service tokens allowing the usage of expired Keystone auth tokens
+ fmt_desc: The service token support allows the incoming request to contain
+ a X-Service-Token header with a Keystone token that if it has acceptable
+ roles allows using an expired token in the X-Auth-Token header.
+ default: false
+ see_also:
+ - rgw_keystone_service_token_accepted_roles
+ - rgw_keystone_expired_token_cache_expiration
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_keystone_service_token_accepted_roles
+ type: str
+ level: advanced
+ desc: Only users with one of these roles will be valid for service users.
+ fmt_desc: The users that created the service token given must have one of
+ these roles to be considered a valid service user.
+ default: admin
+ see_also:
+ - rgw_keystone_service_token_enabled
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_keystone_expired_token_cache_expiration
+ type: int
+ level: advanced
+ desc: The number of seconds to add to current time for expired token expiration
+ fmt_desc: The expired token that is allowed when a valid service token is given
+ need a new expiration date for the caching. This is the seconds to add to the
+ current time and then set on an expired token that is verified with a service token.
+ default: 3600
+ services:
+ - rgw
+ see_also:
+ - rgw_keystone_service_token_enabled
+ with_legacy: true
+- name: rgw_keystone_barbican_user
+ type: str
+ level: advanced
+ desc: Keystone user to access barbican secrets.
+ fmt_desc: The name of the OpenStack user with access to the `Barbican`_
+ secrets used for `Encryption`_.
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_keystone_barbican_password
+ type: str
+ level: advanced
+ desc: Keystone password for barbican user.
+ fmt_desc: The password associated with the `Barbican`_ user.
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_keystone_barbican_tenant
+ type: str
+ level: advanced
+ desc: Keystone barbican user tenant (Keystone v2.0).
+ fmt_desc: The name of the OpenStack tenant associated with the `Barbican`_
+ user when using OpenStack Identity API v2.
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_keystone_barbican_project
+ type: str
+ level: advanced
+ desc: Keystone barbican user project (Keystone v3).
+ fmt_desc: The name of the OpenStack project associated with the `Barbican`_
+ user when using OpenStack Identity API v3.
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_keystone_barbican_domain
+ type: str
+ level: advanced
+ desc: Keystone barbican user domain.
+ fmt_desc: The name of the OpenStack domain associated with the `Barbican`_
+ user when using OpenStack Identity API v3.
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_keystone_api_version
+ type: int
+ level: advanced
+ desc: Version of Keystone API to use (2 or 3).
+ fmt_desc: The version (2 or 3) of OpenStack Identity API that should be
+ used for communication with the Keystone server.
+ default: 2
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_keystone_accepted_roles
+ type: str
+ level: advanced
+ desc: Only users with one of these roles will be served when doing Keystone authentication.
+ fmt_desc: The roles required to serve requests.
+ default: Member, admin
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_keystone_accepted_admin_roles
+ type: str
+ level: advanced
+ desc: List of roles allowing user to gain admin privileges (Keystone).
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_keystone_token_cache_size
+ type: int
+ level: advanced
+ desc: Keystone token cache size
+ long_desc: Max number of Keystone tokens that will be cached. Token that is not
+ cached requires RGW to access the Keystone server when authenticating.
+ fmt_desc: The maximum number of entries in each Keystone token cache.
+ default: 10000
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_keystone_verify_ssl
+ type: bool
+ level: advanced
+ desc: Should RGW verify the Keystone server SSL certificate.
+ fmt_desc: Verify SSL certificates while making token requests to keystone.
+ default: true
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_keystone_implicit_tenants
+ type: str
+ level: advanced
+ desc: RGW Keystone implicit tenants creation
+ long_desc: Implicitly create new users in their own tenant with the same name when
+ authenticating via Keystone. Can be limited to s3 or swift only.
+ default: 'false'
+ services:
+ - rgw
+ enum_values:
+ - 'false'
+ - 'true'
+ - swift
+ - s3
+ - both
+ - '0'
+ - '1'
+ - none
+ with_legacy: true
+- name: rgw_cross_domain_policy
+ type: str
+ level: advanced
+ desc: RGW handle cross domain policy
+ long_desc: Returned cross domain policy when accessing the crossdomain.xml resource
+ (Swift compatiility).
+ default: <allow-access-from domain="*" secure="false" />
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_healthcheck_disabling_path
+ type: str
+ level: dev
+ desc: Swift health check api can be disabled if a file can be accessed in this path.
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_s3_auth_use_rados
+ type: bool
+ level: advanced
+ desc: Should S3 authentication use credentials stored in RADOS backend.
+ default: true
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_s3_auth_use_keystone
+ type: bool
+ level: advanced
+ desc: Should S3 authentication use Keystone.
+ default: false
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_s3_auth_order
+ type: str
+ level: advanced
+ desc: Authentication strategy order to use for s3 authentication
+ long_desc: Order of authentication strategies to try for s3 authentication, the
+ allowed options are a comma separated list of engines external, local. The default
+ order is to try all the externally configured engines before attempting local
+ rados based authentication
+ default: sts, external, local
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_barbican_url
+ type: str
+ level: advanced
+ desc: URL to barbican server.
+ fmt_desc: The URL for the Barbican server.
+ services:
+ - rgw
+ with_legacy: true
+# OpenLDAP-style LDAP parameter strings
+- name: rgw_ldap_uri
+ type: str
+ level: advanced
+ desc: Space-separated list of LDAP servers in URI format.
+ default: ldaps://<ldap.your.domain>
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_ldap_binddn
+ type: str
+ level: advanced
+ desc: LDAP entry RGW will bind with (user match).
+ default: uid=admin,cn=users,dc=example,dc=com
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_ldap_searchdn
+ type: str
+ level: advanced
+ desc: LDAP search base (basedn).
+ default: cn=users,cn=accounts,dc=example,dc=com
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_ldap_dnattr
+ type: str
+ level: advanced
+ desc: LDAP attribute containing RGW user names (to form binddns).
+ default: uid
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_ldap_secret
+ type: str
+ level: advanced
+ desc: Path to file containing credentials for rgw_ldap_binddn.
+ default: /etc/openldap/secret
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_s3_auth_use_ldap
+ type: bool
+ level: advanced
+ desc: Should S3 authentication use LDAP.
+ default: false
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_ldap_searchfilter
+ type: str
+ level: advanced
+ desc: LDAP search filter.
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_opa_url
+ type: str
+ level: advanced
+ desc: URL to OPA server.
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_opa_token
+ type: str
+ level: advanced
+ desc: The Bearer token OPA uses to authenticate client requests.
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_opa_verify_ssl
+ type: bool
+ level: advanced
+ desc: Should RGW verify the OPA server SSL certificate.
+ default: true
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_use_opa_authz
+ type: bool
+ level: advanced
+ desc: Should OPA be used to authorize client requests.
+ default: false
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_admin_entry
+ type: str
+ level: advanced
+ desc: Path prefix to be used for accessing RGW RESTful admin API.
+ fmt_desc: The entry point for an admin request URL.
+ default: admin
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_enforce_swift_acls
+ type: bool
+ level: advanced
+ desc: RGW enforce swift acls
+ long_desc: Should RGW enforce special Swift-only ACLs. Swift has a special ACL that
+ gives permission to access all objects in a container.
+ fmt_desc: Enforces the Swift Access Control List (ACL) settings.
+ default: true
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_swift_token_expiration
+ type: int
+ level: advanced
+ desc: Expiration time (in seconds) for token generated through RGW Swift auth.
+ fmt_desc: The time in seconds for expiring a Swift token.
+ default: 1_day
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_print_continue
+ type: bool
+ level: advanced
+ desc: RGW support of 100-continue
+ long_desc: Should RGW explicitly send 100 (continue) responses. This is mainly relevant
+ when using FastCGI, as some FastCGI modules do not fully support this feature.
+ fmt_desc: Enable ``100-continue`` if it is operational.
+ default: true
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_print_prohibited_content_length
+ type: bool
+ level: advanced
+ desc: RGW RFC-7230 compatibility
+ long_desc: Specifies whether RGW violates RFC 7230 and sends Content-Length with
+ 204 or 304 statuses.
+ default: false
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_remote_addr_param
+ type: str
+ level: advanced
+ desc: HTTP header that holds the remote address in incoming requests.
+ long_desc: RGW will use this header to extract requests origin. When RGW runs behind
+ a reverse proxy, the remote address header will point at the proxy's address and
+ not at the originator's address. Therefore it is sometimes possible to have the
+ proxy add the originator's address in a separate HTTP header, which will allow
+ RGW to log it correctly.
+ fmt_desc: The remote address parameter. For example, the HTTP field
+ containing the remote address, or the ``X-Forwarded-For``
+ address if a reverse proxy is operational.
+ default: REMOTE_ADDR
+ services:
+ - rgw
+ see_also:
+ - rgw_enable_ops_log
+ with_legacy: true
+- name: rgw_op_thread_timeout
+ type: int
+ level: dev
+ desc: Timeout for async rados coroutine operations.
+ fmt_desc: The timeout in seconds for open threads.
+ default: 10_min
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_op_thread_suicide_timeout
+ type: int
+ level: dev
+ default: 0
+ fmt_desc: The time ``timeout`` in seconds before a Ceph Object Gateway
+ process dies. Disabled if set to ``0``.
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_thread_pool_size
+ type: int
+ level: basic
+ desc: RGW requests handling thread pool size.
+ long_desc: This parameter determines the number of concurrent requests RGW can process
+ when using either the civetweb, or the fastcgi frontends. The higher this number
+ is, RGW will be able to deal with more concurrent requests at the cost of more
+ resource utilization.
+ fmt_desc: The size of the thread pool.
+ default: 512
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_num_control_oids
+ type: int
+ level: advanced
+ desc: Number of control objects used for cross-RGW communication.
+ long_desc: RGW uses certain control objects to send messages between different RGW
+ processes running on the same zone. These messages include metadata cache invalidation
+ info that is being sent when metadata is modified (such as user or bucket information).
+ A higher number of control objects allows better concurrency of these messages,
+ at the cost of more resource utilization.
+ fmt_desc: The number of notification objects used for cache synchronization
+ between different ``rgw`` instances.
+ default: 8
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_verify_ssl
+ type: bool
+ level: advanced
+ desc: Should RGW verify SSL when connecing to a remote HTTP server
+ long_desc: RGW can send requests to other RGW servers (e.g., in multi-site sync
+ work). This configurable selects whether RGW should verify the certificate for
+ the remote peer and host.
+ fmt_desc: Verify SSL certificates while making requests.
+ default: true
+ services:
+ - rgw
+ see_also:
+ - rgw_keystone_verify_ssl
+ with_legacy: true
+# The following are tunables for caches of RGW NFS (and other file
+# client) objects.
+#
+# The file handle cache is a partitioned hash table
+# (fhcache_partitions), each with a closed hash part and backing
+# b-tree mapping. The number of partions is expected to be a small
+# prime, the cache size something larger but less than 5K, the total
+# size of the cache is n_part * cache_size.
+- name: rgw_nfs_lru_lanes
+ type: int
+ level: advanced
+ default: 5
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_nfs_lru_lane_hiwat
+ type: int
+ level: advanced
+ default: 911
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_nfs_fhcache_partitions
+ type: int
+ level: advanced
+ default: 3
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_nfs_fhcache_size
+ type: int
+ level: advanced
+ default: 2017
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_nfs_namespace_expire_secs
+ type: int
+ level: advanced
+ default: 5_min
+ services:
+ - rgw
+ min: 1
+ with_legacy: true
+- name: rgw_nfs_max_gc
+ type: int
+ level: advanced
+ default: 5_min
+ services:
+ - rgw
+ min: 1
+ with_legacy: true
+- name: rgw_nfs_write_completion_interval_s
+ type: int
+ level: advanced
+ default: 10
+ services:
+ - rgw
+ with_legacy: true
+# use fast S3 attrs from bucket index--currently assumes NFS mounts are immutable
+- name: rgw_nfs_s3_fast_attrs
+ type: bool
+ level: advanced
+ desc: use fast S3 attrs from bucket index (immutable only)
+ long_desc: use fast S3 attrs from bucket index (assumes NFS mounts are immutable)
+ default: false
+ services:
+ - rgw
+ with_legacy: true
+# overrides for librgw/nfs
+- name: rgw_nfs_run_gc_threads
+ type: bool
+ level: advanced
+ desc: run GC threads in librgw (default off)
+ default: false
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_nfs_run_lc_threads
+ type: bool
+ level: advanced
+ desc: run lifecycle threads in librgw (default off)
+ default: false
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_nfs_run_quota_threads
+ type: bool
+ level: advanced
+ desc: run quota threads in librgw (default off)
+ default: false
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_nfs_run_sync_thread
+ type: bool
+ level: advanced
+ desc: run sync thread in librgw (default off)
+ default: false
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_nfs_frontends
+ type: str
+ level: basic
+ desc: RGW frontends configuration when running as librgw/nfs
+ long_desc: A comma-delimited list of frontends configuration. Each configuration
+ contains the type of the frontend followed by an optional space delimited set
+ of key=value config parameters.
+ fmt_desc: Configures the HTTP frontend(s). The configuration for multiple
+ frontends can be provided in a comma-delimited list. Each frontend
+ configuration may include a list of options separated by spaces,
+ where each option is in the form "key=value" or "key". See
+ `HTTP Frontends`_ for more on supported options.
+ default: rgw-nfs
+ services:
+ - rgw
+ with_legacy: true
+ see_also:
+ - rgw_frontends
+- name: rgw_rados_pool_autoscale_bias
+ type: float
+ level: advanced
+ desc: pg_autoscale_bias value for RGW metadata (omap-heavy) pools
+ default: 4
+ services:
+ - rgw
+ min: 0.01
+ max: 100000
+- name: rgw_rados_pool_recovery_priority
+ type: uint
+ level: advanced
+ desc: recovery_priority value for RGW metadata (omap-heavy) pools
+ default: 5
+ services:
+ - rgw
+ min: -10
+ max: 10
+- name: rgw_zone
+ type: str
+ level: advanced
+ desc: Zone name
+ fmt_desc: The name of the zone for the gateway instance. If no zone is
+ set, a cluster-wide default can be configured with the command
+ ``radosgw-admin zone default``.
+ services:
+ - rgw
+ see_also:
+ - rgw_zonegroup
+ - rgw_realm
+ with_legacy: true
+- name: rgw_zone_id
+ type: str
+ level: advanced
+ desc: Zone ID
+ services:
+ - rgw
+ see_also:
+ - rgw_zone
+ - rgw_zonegroup
+ - rgw_realm
+- name: rgw_zone_root_pool
+ type: str
+ level: advanced
+ desc: Zone root pool name
+ long_desc: The zone root pool, is the pool where the RGW zone configuration located.
+ default: .rgw.root
+ services:
+ - rgw
+ see_also:
+ - rgw_zonegroup_root_pool
+ - rgw_realm_root_pool
+ - rgw_period_root_pool
+ with_legacy: true
+- name: rgw_default_zone_info_oid
+ type: str
+ level: advanced
+ desc: Default zone info object id
+ long_desc: Name of the RADOS object that holds the default zone information.
+ default: default.zone
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_region
+ type: str
+ level: advanced
+ desc: Region name
+ long_desc: Obsolete config option. The rgw_zonegroup option should be used instead.
+ services:
+ - rgw
+ see_also:
+ - rgw_zonegroup
+ with_legacy: true
+- name: rgw_region_root_pool
+ type: str
+ level: advanced
+ desc: Region root pool
+ long_desc: Obsolete config option. The rgw_zonegroup_root_pool should be used instead.
+ default: .rgw.root
+ services:
+ - rgw
+ see_also:
+ - rgw_zonegroup_root_pool
+ with_legacy: true
+- name: rgw_default_region_info_oid
+ type: str
+ level: advanced
+ desc: Default region info object id
+ long_desc: Obsolete config option. The rgw_default_zonegroup_info_oid should be
+ used instead.
+ default: default.region
+ services:
+ - rgw
+ see_also:
+ - rgw_default_zonegroup_info_oid
+ with_legacy: true
+- name: rgw_zonegroup
+ type: str
+ level: advanced
+ desc: Zonegroup name
+ fmt_desc: The name of the zonegroup for the gateway instance. If no
+ zonegroup is set, a cluster-wide default can be configured with
+ the command ``radosgw-admin zonegroup default``.
+ services:
+ - rgw
+ see_also:
+ - rgw_zone
+ - rgw_realm
+ with_legacy: true
+- name: rgw_zonegroup_id
+ type: str
+ level: advanced
+ desc: Zonegroup ID
+ services:
+ - rgw
+ see_also:
+ - rgw_zone
+ - rgw_zonegroup
+ - rgw_realm
+- name: rgw_zonegroup_root_pool
+ type: str
+ level: advanced
+ desc: Zonegroup root pool
+ long_desc: The zonegroup root pool, is the pool where the RGW zonegroup configuration
+ located.
+ default: .rgw.root
+ services:
+ - rgw
+ see_also:
+ - rgw_zone_root_pool
+ - rgw_realm_root_pool
+ - rgw_period_root_pool
+ with_legacy: true
+- name: rgw_default_zonegroup_info_oid
+ type: str
+ level: advanced
+ default: default.zonegroup
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_realm
+ type: str
+ level: advanced
+ fmt_desc: The name of the realm for the gateway instance. If no realm is
+ set, a cluster-wide default can be configured with the command
+ ``radosgw-admin realm default``.
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_realm_id
+ type: str
+ level: advanced
+ services:
+ - rgw
+- name: rgw_realm_root_pool
+ type: str
+ level: advanced
+ desc: Realm root pool
+ long_desc: The realm root pool, is the pool where the RGW realm configuration located.
+ default: .rgw.root
+ services:
+ - rgw
+ see_also:
+ - rgw_zonegroup_root_pool
+ - rgw_zone_root_pool
+ - rgw_period_root_pool
+ with_legacy: true
+- name: rgw_default_realm_info_oid
+ type: str
+ level: advanced
+ default: default.realm
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_period_root_pool
+ type: str
+ level: advanced
+ desc: Period root pool
+ long_desc: The period root pool, is the pool where the RGW period configuration
+ located.
+ default: .rgw.root
+ services:
+ - rgw
+ see_also:
+ - rgw_zonegroup_root_pool
+ - rgw_zone_root_pool
+ - rgw_realm_root_pool
+ with_legacy: true
+- name: rgw_period_latest_epoch_info_oid
+ type: str
+ level: dev
+ default: .latest_epoch
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_log_nonexistent_bucket
+ type: bool
+ level: advanced
+ desc: Should RGW log operations on bucket that does not exist
+ long_desc: This config option applies to the ops log. When this option is set, the
+ ops log will log operations that are sent to non existing buckets. These operations
+ inherently fail, and do not correspond to a specific user.
+ fmt_desc: Enables Ceph Object Gateway to log a request for a non-existent
+ bucket.
+ default: false
+ services:
+ - rgw
+ see_also:
+ - rgw_enable_ops_log
+ with_legacy: true
+# man date to see codes (a subset are supported)
+- name: rgw_log_object_name
+ type: str
+ level: advanced
+ desc: Ops log object name format
+ long_desc: Defines the format of the RADOS objects names that ops log uses to store
+ ops log data
+ fmt_desc: The logging format for an object name. See ma npage
+ :manpage:`date` for details about format specifiers.
+ default: '%Y-%m-%d-%H-%i-%n'
+ services:
+ - rgw
+ see_also:
+ - rgw_enable_ops_log
+ with_legacy: true
+- name: rgw_log_object_name_utc
+ type: bool
+ level: advanced
+ desc: Should ops log object name based on UTC
+ long_desc: If set, the names of the RADOS objects that hold the ops log data will
+ be based on UTC time zone. If not set, it will use the local time zone.
+ fmt_desc: Whether a logged object name includes a UTC time.
+ If ``false``, it uses the local time.
+ default: false
+ services:
+ - rgw
+ see_also:
+ - rgw_enable_ops_log
+ - rgw_log_object_name
+ with_legacy: true
+- name: rgw_usage_max_shards
+ type: int
+ level: advanced
+ desc: Number of shards for usage log.
+ long_desc: The number of RADOS objects that RGW will use in order to store the usage
+ log data.
+ fmt_desc: The maximum number of shards for usage logging.
+ default: 32
+ services:
+ - rgw
+ see_also:
+ - rgw_enable_usage_log
+ with_legacy: true
+- name: rgw_usage_max_user_shards
+ type: int
+ level: advanced
+ desc: Number of shards for single user in usage log
+ long_desc: The number of shards that a single user will span over in the usage log.
+ fmt_desc: The maximum number of shards used for a single user's
+ usage logging.
+ default: 1
+ services:
+ - rgw
+ see_also:
+ - rgw_enable_usage_log
+ min: 1
+ with_legacy: true
+# enable logging every rgw operation
+- name: rgw_enable_ops_log
+ type: bool
+ level: advanced
+ desc: Enable ops log
+ fmt_desc: Enable logging for each successful Ceph Object Gateway operation.
+ default: false
+ services:
+ - rgw
+ see_also:
+ - rgw_log_nonexistent_bucket
+ - rgw_log_object_name
+ - rgw_ops_log_rados
+ - rgw_ops_log_socket_path
+ - rgw_ops_log_file_path
+ with_legacy: true
+# enable logging bandwidth usage
+- name: rgw_enable_usage_log
+ type: bool
+ level: advanced
+ desc: Enable the usage log
+ default: false
+ services:
+ - rgw
+ see_also:
+ - rgw_usage_max_shards
+ with_legacy: true
+# whether ops log should go to rados
+- name: rgw_ops_log_rados
+ type: bool
+ level: advanced
+ desc: Use RADOS for ops log
+ long_desc: If set, RGW will store ops log information in RADOS. WARNING,
+ there is no automation to clean up these log entries, so by default they
+ will pile up without bound. This MUST NOT be enabled unless the admin has
+ a strategy to manage and trim these log entries with `radosgw-admin log rm`.
+ fmt_desc: Whether the operations log should be written to the
+ Ceph Storage Cluster backend.
+ default: false
+ services:
+ - rgw
+ see_also:
+ - rgw_enable_ops_log
+ - rgw_log_object_name_utc
+ - rgw_log_object_name
+ with_legacy: true
+# path to unix domain socket where ops log can go
+- name: rgw_ops_log_socket_path
+ type: str
+ level: advanced
+ desc: Unix domain socket path for ops log.
+ long_desc: Path to unix domain socket that RGW will listen for connection on. When
+ connected, RGW will send ops log data through it.
+ fmt_desc: The Unix domain socket for writing operations logs.
+ services:
+ - rgw
+ see_also:
+ - rgw_enable_ops_log
+ - rgw_ops_log_data_backlog
+ with_legacy: true
+# path to file where ops log can go
+- name: rgw_ops_log_file_path
+ type: str
+ level: advanced
+ desc: File-system path for ops log.
+ long_desc: Path to file that RGW will log ops logs to. A cephadm deployment will automatically
+ rotate these logs under /var/log/ceph/. Other deployments should arrange for similar log rotation.
+ fmt_desc: The file-system path for writing operations logs.
+ daemon_default: /var/log/ceph/ops-log-$cluster-$name.log
+ services:
+ - rgw
+ see_also:
+ - rgw_enable_ops_log
+ with_legacy: true
+# max data backlog for ops log
+- name: rgw_ops_log_data_backlog
+ type: size
+ level: advanced
+ desc: Ops log socket backlog
+ long_desc: Maximum amount of data backlog that RGW can keep when ops log is configured
+ to send info through unix domain socket. When data backlog is higher than this,
+ ops log entries will be lost. In order to avoid ops log information loss, the
+ listener needs to clear data (by reading it) quickly enough.
+ fmt_desc: The maximum data backlog data size for operations logs written
+ to a Unix domain socket.
+ default: 5_M
+ services:
+ - rgw
+ see_also:
+ - rgw_enable_ops_log
+ - rgw_ops_log_socket_path
+ with_legacy: true
+- name: rgw_usage_log_flush_threshold
+ type: int
+ level: advanced
+ desc: Number of entries in usage log before flushing
+ long_desc: This is the max number of entries that will be held in the usage log,
+ before it will be flushed to the backend. Note that the usage log is periodically
+ flushed, even if number of entries does not reach this threshold. A usage log
+ entry corresponds to one or more operations on a single bucket.i
+ fmt_desc: The number of dirty merged entries in the usage log before
+ flushing synchronously.
+ default: 1024
+ services:
+ - rgw
+ see_also:
+ - rgw_enable_usage_log
+ - rgw_usage_log_tick_interval
+ with_legacy: true
+- name: rgw_usage_log_tick_interval
+ type: int
+ level: advanced
+ desc: Number of seconds between usage log flush cycles
+ long_desc: The number of seconds between consecutive usage log flushes. The usage
+ log will also flush itself to the backend if the number of pending entries reaches
+ a certain threshold.
+ fmt_desc: Flush pending usage log data every ``n`` seconds.
+ default: 30
+ services:
+ - rgw
+ see_also:
+ - rgw_enable_usage_log
+ - rgw_usage_log_flush_threshold
+ with_legacy: true
+- name: rgw_init_timeout
+ type: int
+ level: basic
+ desc: Initialization timeout
+ long_desc: The time length (in seconds) that RGW will allow for its initialization.
+ RGW process will give up and quit if initialization is not complete after this
+ amount of time.
+ fmt_desc: The number of seconds before Ceph Object Gateway gives up on
+ initialization.
+ default: 5_min
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_mime_types_file
+ type: str
+ level: basic
+ desc: Path to local mime types file
+ long_desc: The mime types file is needed in Swift when uploading an object. If object's
+ content type is not specified, RGW will use data from this file to assign a content
+ type to the object.
+ fmt_desc: The path and location of the MIME-types file. Used for Swift
+ auto-detection of object types.
+ default: /etc/mime.types
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_gc_max_objs
+ type: int
+ level: advanced
+ desc: Number of shards for garbage collector data
+ long_desc: The number of garbage collector data shards, is the number of RADOS objects
+ that RGW will use to store the garbage collection information on.
+ fmt_desc: The maximum number of objects that may be handled by
+ garbage collection in one garbage collection processing cycle.
+ Please do not change this value after the first deployment.
+ default: 32
+ services:
+ - rgw
+ see_also:
+ - rgw_gc_obj_min_wait
+ - rgw_gc_processor_max_time
+ - rgw_gc_processor_period
+ - rgw_gc_max_concurrent_io
+ with_legacy: true
+# wait time before object may be handled by gc, recommended lower limit is 30 mins
+- name: rgw_gc_obj_min_wait
+ type: int
+ level: advanced
+ desc: Garbage collection object expiration time
+ long_desc: The length of time (in seconds) that the RGW collector will wait before
+ purging a deleted object's data. RGW will not remove object immediately, as object
+ could still have readers. A mechanism exists to increase the object's expiration
+ time when it's being read. The recommended value of its lower limit is 30 minutes
+ fmt_desc: The minimum wait time before a deleted object may be removed
+ and handled by garbage collection processing.
+ default: 2_hr
+ services:
+ - rgw
+ see_also:
+ - rgw_gc_max_objs
+ - rgw_gc_processor_max_time
+ - rgw_gc_processor_period
+ - rgw_gc_max_concurrent_io
+ with_legacy: true
+- name: rgw_gc_processor_max_time
+ type: int
+ level: advanced
+ desc: Length of time GC processor can lease shard
+ long_desc: Garbage collection thread in RGW process holds a lease on its data shards.
+ These objects contain the information about the objects that need to be removed.
+ RGW takes a lease in order to prevent multiple RGW processes from handling the
+ same objects concurrently. This time signifies that maximum amount of time (in
+ seconds) that RGW is allowed to hold that lease. In the case where RGW goes down
+ uncleanly, this is the amount of time where processing of that data shard will
+ be blocked.
+ fmt_desc: The maximum time between the beginning of two consecutive garbage
+ collection processing cycles.
+ default: 1_hr
+ services:
+ - rgw
+ see_also:
+ - rgw_gc_max_objs
+ - rgw_gc_obj_min_wait
+ - rgw_gc_processor_period
+ - rgw_gc_max_concurrent_io
+ with_legacy: true
+- name: rgw_gc_processor_period
+ type: int
+ level: advanced
+ desc: Garbage collector cycle run time
+ long_desc: The amount of time between the start of consecutive runs of the garbage
+ collector threads. If garbage collector runs takes more than this period, it will
+ not wait before running again.
+ fmt_desc: The cycle time for garbage collection processing.
+ default: 1_hr
+ services:
+ - rgw
+ see_also:
+ - rgw_gc_max_objs
+ - rgw_gc_obj_min_wait
+ - rgw_gc_processor_max_time
+ - rgw_gc_max_concurrent_io
+ - rgw_gc_max_trim_chunk
+ with_legacy: true
+- name: rgw_gc_max_concurrent_io
+ type: int
+ level: advanced
+ desc: Max concurrent RADOS IO operations for garbage collection
+ long_desc: The maximum number of concurrent IO operations that the RGW garbage collection
+ thread will use when purging old data.
+ default: 10
+ services:
+ - rgw
+ see_also:
+ - rgw_gc_max_objs
+ - rgw_gc_obj_min_wait
+ - rgw_gc_processor_max_time
+ - rgw_gc_max_trim_chunk
+ with_legacy: true
+- name: rgw_gc_max_trim_chunk
+ type: int
+ level: advanced
+ desc: Max number of keys to remove from garbage collector log in a single operation
+ default: 16
+ services:
+ - rgw
+ see_also:
+ - rgw_gc_max_objs
+ - rgw_gc_obj_min_wait
+ - rgw_gc_processor_max_time
+ - rgw_gc_max_concurrent_io
+ with_legacy: true
+- name: rgw_gc_max_deferred_entries_size
+ type: uint
+ level: advanced
+ desc: maximum allowed size of deferred entries in queue head for gc
+ default: 3_K
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_gc_max_queue_size
+ type: uint
+ level: advanced
+ desc: Maximum allowed queue size for gc
+ long_desc: The maximum allowed size of each gc queue, and its value should not be
+ greater than (osd_max_object_size - rgw_gc_max_deferred_entries_size - 1K).
+ default: 131068_K
+ services:
+ - rgw
+ see_also:
+ - osd_max_object_size
+ - rgw_gc_max_deferred_entries_size
+ with_legacy: true
+- name: rgw_gc_max_deferred
+ type: uint
+ level: advanced
+ desc: Number of maximum deferred data entries to be stored in queue for gc
+ default: 50
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_s3_success_create_obj_status
+ type: int
+ level: advanced
+ desc: HTTP return code override for object creation
+ long_desc: If not zero, this is the HTTP return code that will be returned on a
+ successful S3 object creation.
+ fmt_desc: The alternate success status response for ``create-obj``.
+ default: 0
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_s3_client_max_sig_ver
+ type: int
+ level: advanced
+ desc: Max S3 authentication signature version
+ long_desc: If greater than zero, would force max signature version to use
+ default: -1
+ services:
+ - rgw
+- name: rgw_resolve_cname
+ type: bool
+ level: advanced
+ desc: Support vanity domain names via CNAME
+ long_desc: If true, RGW will query DNS when detecting that it's serving a request
+ that was sent to a host in another domain. If a CNAME record is configured for
+ that domain it will use it instead. This gives user to have the ability of creating
+ a unique domain of their own to point at data in their bucket.
+ fmt_desc: Whether ``rgw`` should use DNS CNAME record of the request
+ hostname field (if hostname is not equal to ``rgw dns name``).
+ default: false
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_obj_stripe_size
+ type: size
+ level: advanced
+ desc: RGW object stripe size
+ long_desc: The size of an object stripe for RGW objects. This is the maximum size
+ a backing RADOS object will have. RGW objects that are larger than this will span
+ over multiple objects.
+ fmt_desc: The size of an object stripe for Ceph Object Gateway objects.
+ See `Architecture`_ for details on striping.
+ default: 4_M
+ services:
+ - rgw
+ with_legacy: true
+# list of extended attrs that can be set on objects (beyond the default)
+- name: rgw_extended_http_attrs
+ type: str
+ level: advanced
+ desc: RGW support extended HTTP attrs
+ long_desc: Add new set of attributes that could be set on an object. These extra
+ attributes can be set through HTTP header fields when putting the objects. If
+ set, these attributes will return as HTTP fields when doing GET/HEAD on the object.
+ fmt_desc: Add new set of attributes that could be set on an entity
+ (user, bucket or object). These extra attributes can be set
+ through HTTP header fields when putting the entity or modifying
+ it using POST method. If set, these attributes will return as
+ HTTP fields when doing GET/HEAD on the entity.
+ services:
+ - rgw
+ example: content_foo, content_bar, x-foo-bar
+ with_legacy: true
+- name: rgw_exit_timeout_secs
+ type: int
+ level: advanced
+ desc: RGW shutdown timeout
+ long_desc: Number of seconds to wait for a process before exiting unconditionally.
+ default: 2_min
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_get_obj_window_size
+ type: size
+ level: advanced
+ desc: RGW object read window size
+ long_desc: The window size in bytes for a single object read request
+ default: 16_M
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_get_obj_max_req_size
+ type: size
+ level: advanced
+ desc: RGW object read chunk size
+ long_desc: The maximum request size of a single object read operation sent to RADOS
+ fmt_desc: The maximum request size of a single get operation sent to the
+ Ceph Storage Cluster.
+ default: 4_M
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_relaxed_s3_bucket_names
+ type: bool
+ level: advanced
+ desc: RGW enable relaxed S3 bucket names
+ long_desc: RGW enable relaxed S3 bucket name rules for US region buckets.
+ fmt_desc: Enables relaxed S3 bucket names rules for US region buckets.
+ default: false
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_defer_to_bucket_acls
+ type: str
+ level: advanced
+ desc: Bucket ACLs override object ACLs
+ long_desc: If not empty, a string that selects that mode of operation. 'recurse'
+ will use bucket's ACL for the authorization. 'full-control' will allow users that
+ users that have full control permission on the bucket have access to the object.
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_list_buckets_max_chunk
+ type: int
+ level: advanced
+ desc: Max number of buckets to retrieve in a single listing operation
+ long_desc: When RGW fetches lists of user's buckets from the backend, this is the
+ max number of entries it will try to retrieve in a single operation. Note that
+ the backend may choose to return a smaller number of entries.
+ fmt_desc: The maximum number of buckets to retrieve in a single operation
+ when listing user buckets.
+ default: 1000
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_md_log_max_shards
+ type: int
+ level: advanced
+ desc: RGW number of metadata log shards
+ long_desc: The number of shards the RGW metadata log entries will reside in. This
+ affects the metadata sync parallelism as a shard can only be processed by a single
+ RGW at a time
+ fmt_desc: The maximum number of shards for the metadata log.
+ default: 64
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_curl_buffersize
+ type: int
+ level: dev
+ long_desc: 'Pass a long specifying your preferred size (in bytes) for the receivebuffer
+ in libcurl. See: https://curl.se/libcurl/c/CURLOPT_BUFFERSIZE.html'
+ default: 524288
+ services:
+ - rgw
+ min: 1024
+ max: 524288
+ with_legacy: true
+- name: rgw_curl_wait_timeout_ms
+ type: int
+ level: dev
+ default: 1000
+ fmt_desc: The timeout in milliseconds for certain ``curl`` calls.
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_curl_low_speed_limit
+ type: int
+ level: advanced
+ long_desc: It contains the average transfer speed in bytes per second that the transfer
+ should be below during rgw_curl_low_speed_time seconds for libcurl to consider
+ it to be too slow and abort. Set it zero to disable this.
+ default: 1024
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_curl_low_speed_time
+ type: int
+ level: advanced
+ long_desc: It contains the time in number seconds that the transfer speed should
+ be below the rgw_curl_low_speed_limit for the library to consider it too slow
+ and abort. Set it zero to disable this.
+ default: 5_min
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_curl_tcp_keepalive
+ type: int
+ level: advanced
+ long_desc: Enable TCP keepalive on the HTTP client sockets managed by libcurl. This does not apply to connections received by the HTTP frontend, but only to HTTP requests sent by radosgw. Examples include requests to Keystone for authentication, sync requests from multisite, and requests to key management servers for SSE.
+ enum_values:
+ - 0
+ - 1
+ default: 0
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_copy_obj_progress
+ type: bool
+ level: advanced
+ desc: Send progress report through copy operation
+ long_desc: If true, RGW will send progress information when copy operation is executed.
+ fmt_desc: Enables output of object progress during long copy operations.
+ default: true
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_copy_obj_progress_every_bytes
+ type: size
+ level: advanced
+ desc: Send copy-object progress info after these many bytes
+ fmt_desc: The minimum bytes between copy progress output.
+ default: 1_M
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_max_copy_obj_concurrent_io
+ type: int
+ level: advanced
+ desc: Number of refcount operations to process concurrently when executing copy_obj
+ default: 10
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_sync_obj_etag_verify
+ type: bool
+ level: advanced
+ desc: Verify if the object copied from remote is identical to its source
+ long_desc: If true, this option computes the MD5 checksum of the data which is written
+ at the destination and checks if it is identical to the ETAG stored in the source.
+ It ensures integrity of the objects fetched from a remote server over HTTP including
+ multisite sync.
+ default: false
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_obj_tombstone_cache_size
+ type: int
+ level: advanced
+ desc: Max number of entries to keep in tombstone cache
+ long_desc: The tombstone cache is used when doing a multi-zone data sync. RGW keeps
+ there information about removed objects which is needed in order to prevent re-syncing
+ of objects that were already removed.
+ default: 1000
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_data_log_window
+ type: int
+ level: advanced
+ desc: Data log time window
+ long_desc: The data log keeps information about buckets that have objectst that
+ were modified within a specific timeframe. The sync process then knows which buckets
+ are needed to be scanned for data sync.
+ fmt_desc: The data log entries window in seconds.
+ default: 30
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_data_log_changes_size
+ type: int
+ level: dev
+ desc: Max size of pending changes in data log
+ long_desc: RGW will trigger update to the data log if the number of pending entries
+ reached this number.
+ fmt_dsec: The number of in-memory entries to hold for the data changes log.
+ default: 1000
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_data_log_num_shards
+ type: int
+ level: advanced
+ desc: Number of data log shards
+ long_desc: The number of shards the RGW data log entries will reside in. This affects
+ the data sync parallelism as a shard can only be processed by a single RGW at
+ a time.
+ fmt_desc: The number of shards (objects) on which to keep the
+ data changes log.
+ default: 128
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_data_log_obj_prefix
+ type: str
+ level: dev
+ default: data_log
+ fmt_desc: The object name prefix for the data log.
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_data_sync_poll_interval
+ type: int
+ level: dev
+ default: 20
+ fmt_desc: Once multisite's incremental sync of a datalog shard is caught up
+ with its source, it will wait this long (in seconds) before polling for
+ more changes.
+ services:
+ - rgw
+ see_also:
+ - rgw_meta_sync_poll_interval
+ with_legacy: true
+- name: rgw_meta_sync_poll_interval
+ type: int
+ level: dev
+ default: 20
+ fmt_desc: Once multisite's incremental sync of a mdlog shard is caught up
+ with its source, it will wait this long (in seconds) before polling for
+ more changes.
+ services:
+ - rgw
+ see_also:
+ - rgw_data_sync_poll_interval
+ with_legacy: true
+- name: rgw_bucket_sync_spawn_window
+ type: int
+ level: dev
+ default: 20
+ fmt_desc: The maximum number of items that bucket sync is willing to
+ process in parallel (per remote bilog shard).
+ services:
+ - rgw
+ see_also:
+ - rgw_data_sync_spawn_window
+ - rgw_meta_sync_spawn_window
+ with_legacy: true
+- name: rgw_data_sync_spawn_window
+ type: int
+ level: dev
+ default: 20
+ fmt_desc: The maximum number of items that data sync is willing to
+ process in parallel (per remote datalog shard).
+ services:
+ - rgw
+ see_also:
+ - rgw_bucket_sync_spawn_window
+ - rgw_meta_sync_spawn_window
+ with_legacy: true
+- name: rgw_meta_sync_spawn_window
+ type: int
+ level: dev
+ default: 20
+ fmt_desc: The maximum number of items that metadata sync is willing to
+ process in parallel (per remote mdlog shard).
+ services:
+ - rgw
+ see_also:
+ - rgw_bucket_sync_spawn_window
+ - rgw_data_sync_spawn_window
+ with_legacy: true
+- name: rgw_bucket_quota_ttl
+ type: int
+ level: advanced
+ desc: Bucket quota stats cache TTL
+ long_desc: Length of time for bucket stats to be cached within RGW instance.
+ fmt_desc: The amount of time in seconds cached quota information is
+ trusted. After this timeout, the quota information will be
+ re-fetched from the cluster.
+ default: 10_min
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_bucket_quota_cache_size
+ type: int
+ level: advanced
+ desc: RGW quota stats cache size
+ long_desc: Maximum number of entries in the quota stats cache.
+ default: 10000
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_bucket_default_quota_max_objects
+ type: int
+ level: basic
+ desc: Default quota for max objects in a bucket
+ long_desc: The default quota configuration for max number of objects in a bucket.
+ A negative number means 'unlimited'.
+ fmt_desc: Default max number of objects per bucket. Set on new users,
+ if no other quota is specified. Has no effect on existing users.
+ This variable should be set in the client or global sections
+ so that it is automatically applied to radosgw-admin commands.
+ default: -1
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_bucket_default_quota_max_size
+ type: int
+ level: advanced
+ desc: Default quota for total size in a bucket
+ long_desc: The default quota configuration for total size of objects in a bucket.
+ A negative number means 'unlimited'.
+ fmt_desc: Default max capacity per bucket, in bytes. Set on new users,
+ if no other quota is specified. Has no effect on existing users.
+ default: -1
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_expose_bucket
+ type: bool
+ level: advanced
+ desc: Send Bucket HTTP header with the response
+ long_desc: If true, RGW will send a Bucket HTTP header with the responses. The header
+ will contain the name of the bucket the operation happened on.
+ default: false
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_frontends
+ type: str
+ level: basic
+ desc: RGW frontends configuration
+ long_desc: A comma delimited list of frontends configuration. Each configuration
+ contains the type of the frontend followed by an optional space delimited set
+ of key=value config parameters.
+ fmt_desc: Configures the HTTP frontend(s). The configuration for multiple
+ frontends can be provided in a comma-delimited list. Each frontend
+ configuration may include a list of options separated by spaces,
+ where each option is in the form "key=value" or "key". See
+ `HTTP Frontends`_ for more on supported options.
+ default: beast port=7480
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_frontend_defaults
+ type: str
+ level: advanced
+ desc: RGW frontends default configuration
+ long_desc: A comma delimited list of default frontends configuration.
+ default: beast ssl_certificate=config://rgw/cert/$realm/$zone.crt ssl_private_key=config://rgw/cert/$realm/$zone.key
+ services:
+ - rgw
+- name: rgw_beast_enable_async
+ type: bool
+ level: dev
+ desc: Enable async request processing under beast using coroutines
+ long_desc: When enabled, the beast frontend will process requests using
+ coroutines, allowing the concurrent processing of several requests on the
+ same thread. When disabled, the number of concurrent requests will be
+ limited by the thread count, but debugging and tracing the synchronous
+ calls can be easier.
+ default: true
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_user_quota_bucket_sync_interval
+ type: int
+ level: advanced
+ desc: User quota bucket sync interval
+ long_desc: Time period for accumulating modified buckets before syncing these stats.
+ fmt_desc: The amount of time in seconds bucket quota information is
+ accumulated before syncing to the cluster. During this time,
+ other RGW instances will not see the changes in bucket quota
+ stats from operations on this instance.
+ default: 3_min
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_user_quota_sync_interval
+ type: int
+ level: advanced
+ desc: User quota sync interval
+ long_desc: Time period for accumulating modified buckets before syncing entire user
+ stats.
+ fmt_desc: The amount of time in seconds user quota information is
+ accumulated before syncing to the cluster. During this time,
+ other RGW instances will not see the changes in user quota stats
+ from operations on this instance.
+ default: 1_day
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_user_quota_sync_idle_users
+ type: bool
+ level: advanced
+ desc: Should sync idle users quota
+ long_desc: Whether stats for idle users be fully synced.
+ default: false
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_user_quota_sync_wait_time
+ type: int
+ level: advanced
+ desc: User quota full-sync wait time
+ long_desc: Minimum time between two full stats sync for non-idle users.
+ default: 1_day
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_user_default_quota_max_objects
+ type: int
+ level: basic
+ desc: User quota max objects
+ long_desc: The default quota configuration for total number of objects for a single
+ user. A negative number means 'unlimited'.
+ fmt_desc: Default max number of objects for a user. This includes all
+ objects in all buckets owned by the user. Set on new users,
+ if no other quota is specified. Has no effect on existing users.
+ default: -1
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_user_default_quota_max_size
+ type: int
+ level: basic
+ desc: User quota max size
+ long_desc: The default quota configuration for total size of objects for a single
+ user. A negative number means 'unlimited'.
+ fmt_desc: The value for user max size quota in bytes set on new users,
+ if no other quota is specified. Has no effect on existing users.
+ default: -1
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_multipart_min_part_size
+ type: size
+ level: advanced
+ desc: Minimum S3 multipart-upload part size
+ long_desc: When doing a multipart upload, each part (other than the last part) must
+ be at least this size.
+ default: 5_M
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_multipart_part_upload_limit
+ type: int
+ level: advanced
+ desc: Max number of parts in multipart upload
+ default: 10000
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_max_slo_entries
+ type: int
+ level: advanced
+ desc: Max number of entries in Swift Static Large Object manifest
+ default: 1000
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_olh_pending_timeout_sec
+ type: int
+ level: dev
+ desc: Max time for pending OLH change to complete
+ long_desc: OLH is a versioned object's logical head. Operations on it are journaled
+ and as pending before completion. If an operation doesn't complete with this amount
+ of seconds, we remove the operation from the journal.
+ default: 1_hr
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_user_max_buckets
+ type: int
+ level: basic
+ desc: Max number of buckets per user
+ long_desc: A user can create at most this number of buckets. Zero means no limit;
+ a negative value means users cannot create any new buckets, although users will
+ retain buckets already created.
+ default: 1000
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_objexp_gc_interval
+ type: uint
+ level: advanced
+ desc: Swift objects expirer garbage collector interval
+ default: 600
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_objexp_hints_num_shards
+ type: uint
+ level: advanced
+ desc: Number of object expirer data shards
+ long_desc: The number of shards the (Swift) object expirer will store its data on.
+ default: 127
+ services:
+ - rgw
+ with_legacy: true
+# maximum number of entries in a single operation when processing objexp data
+- name: rgw_objexp_chunk_size
+ type: uint
+ level: dev
+ default: 100
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_enable_static_website
+ type: bool
+ level: basic
+ desc: Enable static website APIs
+ long_desc: This configurable controls whether RGW handles the website control APIs.
+ RGW can server static websites if s3website hostnames are configured, and unrelated
+ to this configurable.
+ default: false
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_user_unique_email
+ type: bool
+ level: basic
+ desc: Require local RGW users to have unique email addresses
+ long_desc: Enforce builtin user accounts to have unique email addresses. This setting
+ is historical. In future, non-enforcement of email address uniqueness is likely
+ to become the default.
+ default: true
+ services:
+ - rgw
+- name: rgw_log_http_headers
+ type: str
+ level: basic
+ desc: List of HTTP headers to log
+ long_desc: A comma delimited list of HTTP headers to log when seen, ignores case
+ (e.g., http_x_forwarded_for).
+ fmt_desc: Comma-delimited list of HTTP headers to include with ops
+ log entries. Header names are case insensitive, and use
+ the full header name with words separated by underscores.
+ example: http_x_forwarded_for, http_x_special_k
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_num_async_rados_threads
+ type: int
+ level: advanced
+ desc: Number of concurrent RADOS operations in multisite sync
+ long_desc: The number of concurrent RADOS IO operations that will be triggered for
+ handling multisite sync operations. This includes control related work, and not
+ the actual sync operations.
+ default: 32
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_md_notify_interval_msec
+ type: int
+ level: advanced
+ desc: Length of time to aggregate metadata changes
+ long_desc: Length of time (in milliseconds) in which the master zone aggregates
+ all the metadata changes that occurred, before sending notifications to all the
+ other zones.
+ default: 200
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_run_sync_thread
+ type: bool
+ level: advanced
+ desc: Should run sync thread
+ fmt_desc: If there are other zones in the realm to sync from, spawn threads
+ to handle the sync of data and metadata.
+ default: true
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_sync_lease_period
+ type: int
+ level: dev
+ default: 2_min
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_sync_log_trim_interval
+ type: int
+ level: advanced
+ desc: Sync log trim interval
+ long_desc: Time in seconds between attempts to trim sync logs.
+ default: 20_min
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_sync_log_trim_max_buckets
+ type: int
+ level: advanced
+ desc: Maximum number of buckets to trim per interval
+ long_desc: The maximum number of buckets to consider for bucket index log trimming
+ each trim interval, regardless of the number of bucket index shards. Priority
+ is given to buckets with the most sync activity over the last trim interval.
+ default: 16
+ services:
+ - rgw
+ see_also:
+ - rgw_sync_log_trim_interval
+ - rgw_sync_log_trim_min_cold_buckets
+ - rgw_sync_log_trim_concurrent_buckets
+- name: rgw_sync_log_trim_min_cold_buckets
+ type: int
+ level: advanced
+ desc: Minimum number of cold buckets to trim per interval
+ long_desc: Of the `rgw_sync_log_trim_max_buckets` selected for bucket index log
+ trimming each trim interval, at least this many of them must be 'cold' buckets.
+ These buckets are selected in order from the list of all bucket instances, to
+ guarantee that all buckets will be visited eventually.
+ default: 4
+ services:
+ - rgw
+ see_also:
+ - rgw_sync_log_trim_interval
+ - rgw_sync_log_trim_max_buckets
+ - rgw_sync_log_trim_concurrent_buckets
+- name: rgw_sync_log_trim_concurrent_buckets
+ type: int
+ level: advanced
+ desc: Maximum number of buckets to trim in parallel
+ default: 4
+ services:
+ - rgw
+ see_also:
+ - rgw_sync_log_trim_interval
+ - rgw_sync_log_trim_max_buckets
+ - rgw_sync_log_trim_min_cold_buckets
+- name: rgw_sync_data_inject_err_probability
+ type: float
+ level: dev
+ default: 0
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_sync_meta_inject_err_probability
+ type: float
+ level: dev
+ default: 0
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_sync_data_full_inject_err_probability
+ type: float
+ level: dev
+ default: 0
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_sync_trace_history_size
+ type: size
+ level: advanced
+ desc: Sync trace history size
+ long_desc: Maximum number of complete sync trace entries to keep.
+ default: 4_K
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_sync_trace_per_node_log_size
+ type: int
+ level: advanced
+ desc: Sync trace per-node log size
+ long_desc: The number of log entries to keep per sync-trace node.
+ default: 32
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_sync_trace_servicemap_update_interval
+ type: int
+ level: advanced
+ desc: Sync-trace service-map update interval
+ long_desc: Number of seconds between service-map updates of sync-trace events.
+ default: 10
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_period_push_interval
+ type: float
+ level: advanced
+ desc: Period push interval
+ long_desc: Number of seconds to wait before retrying 'period push' operation.
+ default: 2
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_period_push_interval_max
+ type: float
+ level: advanced
+ desc: Period push maximum interval
+ long_desc: The max number of seconds to wait before retrying 'period push' after
+ exponential backoff.
+ default: 30
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_safe_max_objects_per_shard
+ type: int
+ level: advanced
+ desc: Safe number of objects per shard
+ long_desc: This is the max number of objects per bucket index shard that RGW considers
+ safe. RGW will warn if it identifies a bucket where its per-shard count is higher
+ than a percentage of this number.
+ default: 102400
+ services:
+ - rgw
+ see_also:
+ - rgw_shard_warning_threshold
+ with_legacy: true
+# pct of safe max at which to warn
+- name: rgw_shard_warning_threshold
+ type: float
+ level: advanced
+ desc: Warn about max objects per shard
+ long_desc: Warn if number of objects per shard in a specific bucket passed this
+ percentage of the safe number.
+ default: 90
+ services:
+ - rgw
+ see_also:
+ - rgw_safe_max_objects_per_shard
+ with_legacy: true
+- name: rgw_swift_versioning_enabled
+ type: bool
+ level: advanced
+ desc: Enable Swift versioning
+ fmt_desc: |
+ Enables the Object Versioning of OpenStack Object Storage API.
+ This allows clients to put the ``X-Versions-Location`` attribute
+ on containers that should be versioned. The attribute specifies
+ the name of container storing archived versions. It must be owned
+ by the same user that the versioned container due to access
+ control verification - ACLs are NOT taken into consideration.
+ Those containers cannot be versioned by the S3 object versioning
+ mechanism.
+
+ A slightly different attribute, ``X-History-Location``, which is also understood by
+ `OpenStack Swift <https://docs.openstack.org/swift/latest/api/object_versioning.html>`_
+ for handling ``DELETE`` operations, is currently not supported.
+ default: false
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_swift_custom_header
+ type: str
+ level: advanced
+ desc: Enable swift custom header
+ long_desc: If not empty, specifies a name of HTTP header that can include custom
+ data. When uploading an object, if this header is passed RGW will store this header
+ info and it will be available when listing the bucket.
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_swift_need_stats
+ type: bool
+ level: advanced
+ desc: Enable stats on bucket listing in Swift
+ default: true
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_reshard_num_logs
+ type: uint
+ level: advanced
+ default: 16
+ services:
+ - rgw
+ - rgw
+ min: 1
+- name: rgw_reshard_bucket_lock_duration
+ type: uint
+ level: advanced
+ desc: Number of seconds the timeout on the reshard locks (bucket reshard lock and
+ reshard log lock) are set to. As a reshard proceeds these locks can be renewed/extended.
+ If too short, reshards cannot complete and will fail, causing a future reshard
+ attempt. If too long a hung or crashed reshard attempt will keep the bucket locked
+ for an extended period, not allowing RGW to detect the failed reshard attempt
+ and recover.
+ default: 360
+ tags:
+ - performance
+ services:
+ - rgw
+ - rgw
+ min: 30
+- name: rgw_debug_inject_set_olh_err
+ type: uint
+ level: dev
+ desc: Whether to inject errors between rados olh modification initialization and
+ bucket index instance linking. The value determines the error code. This exists
+ for development and testing purposes to help simulate cases where bucket index
+ entries aren't cleaned up by the request thread after an error scenario.
+ default: 0
+ with_legacy: true
+ services:
+ - rgw
+- name: rgw_debug_inject_olh_cancel_modification_err
+ type: bool
+ level: dev
+ desc: Whether to inject an error to simulate a failure to cancel olh
+ modification. This exists for development and testing purposes.
+ default: false
+ with_legacy: true
+ services:
+ - rgw
+- name: rgw_reshard_batch_size
+ type: uint
+ level: advanced
+ desc: Number of reshard entries to batch together before sending the operations
+ to the CLS back-end
+ default: 64
+ tags:
+ - performance
+ services:
+ - rgw
+ - rgw
+ min: 8
+- name: rgw_reshard_max_aio
+ type: uint
+ level: advanced
+ desc: Maximum number of outstanding asynchronous I/O operations to allow at a time
+ during resharding
+ default: 128
+ tags:
+ - performance
+ services:
+ - rgw
+ - rgw
+ min: 16
+- name: rgw_trust_forwarded_https
+ type: bool
+ level: advanced
+ desc: Trust Forwarded and X-Forwarded-Proto headers
+ long_desc: When a proxy in front of radosgw is used for ssl termination, radosgw
+ does not know whether incoming http connections are secure. Enable this option
+ to trust the Forwarded and X-Forwarded-Proto headers sent by the proxy when determining
+ whether the connection is secure. This is required for some features, such as
+ server side encryption. (Never enable this setting if you do not have a trusted
+ proxy in front of radosgw, or else malicious users will be able to set these headers
+ in any request.)
+ fmt_desc: When a proxy in front of radosgw is used for ssl termination, radosgw
+ does not know whether incoming http connections are secure. Enable
+ this option to trust the ``Forwarded`` and ``X-Forwarded-Proto`` headers
+ sent by the proxy when determining whether the connection is secure.
+ This is required for some features, such as server side encryption.
+ (Never enable this setting if you do not have a trusted proxy in front of
+ radosgw, or else malicious users will be able to set these headers in
+ any request.)
+ default: false
+ services:
+ - rgw
+ see_also:
+ - rgw_crypt_require_ssl
+ with_legacy: true
+- name: rgw_crypt_require_ssl
+ type: bool
+ level: advanced
+ desc: Requests including encryption key headers must be sent over ssl
+ default: true
+ services:
+ - rgw
+ with_legacy: true
+# base64 encoded key for encryption of rgw objects
+- name: rgw_crypt_default_encryption_key
+ type: str
+ level: dev
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_crypt_s3_kms_backend
+ type: str
+ level: advanced
+ desc: Where the SSE-KMS encryption keys are stored. Supported KMS systems are OpenStack
+ Barbican ('barbican', the default) and HashiCorp Vault ('vault').
+ fmt_desc: Where the SSE-KMS encryption keys are stored. Supported KMS
+ systems are OpenStack Barbican (``barbican``, the default) and
+ HashiCorp Vault (``vault``).
+ default: barbican
+ services:
+ - rgw
+ enum_values:
+ - barbican
+ - vault
+ - testing
+ - kmip
+ with_legacy: true
+# extra keys that may be used for aws:kms
+# defined as map "key1=YmluCmJvb3N0CmJvb3N0LQ== key2=b3V0CnNyYwpUZXN0aW5nCg=="
+- name: rgw_crypt_s3_kms_encryption_keys
+ type: str
+ level: dev
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_crypt_vault_auth
+ type: str
+ level: advanced
+ desc: Type of authentication method to be used with Vault.
+ fmt_desc: Type of authentication method to be used. The only method
+ currently supported is ``token``.
+ default: token
+ services:
+ - rgw
+ see_also:
+ - rgw_crypt_s3_kms_backend
+ - rgw_crypt_vault_addr
+ - rgw_crypt_vault_token_file
+ enum_values:
+ - token
+ - agent
+ with_legacy: true
+- name: rgw_crypt_vault_token_file
+ type: str
+ level: advanced
+ desc: If authentication method is 'token', provide a path to the token file, which
+ for security reasons should readable only by Rados Gateway.
+ services:
+ - rgw
+ see_also:
+ - rgw_crypt_s3_kms_backend
+ - rgw_crypt_vault_auth
+ - rgw_crypt_vault_addr
+ with_legacy: true
+- name: rgw_crypt_vault_addr
+ type: str
+ level: advanced
+ desc: Vault server base address.
+ fmt_desc: Vault server base address, e.g. ``http://vaultserver:8200``.
+ services:
+ - rgw
+ see_also:
+ - rgw_crypt_s3_kms_backend
+ - rgw_crypt_vault_auth
+ - rgw_crypt_vault_prefix
+ with_legacy: true
+# Optional URL prefix to Vault secret path
+- name: rgw_crypt_vault_prefix
+ type: str
+ level: advanced
+ desc: Vault secret URL prefix, which can be used to restrict access to a particular
+ subset of the Vault secret space.
+ fmt_desc: The Vault secret URL prefix, which can be used to restrict access
+ to a particular subset of the secret space, e.g. ``/v1/secret/data``.
+ services:
+ - rgw
+ see_also:
+ - rgw_crypt_s3_kms_backend
+ - rgw_crypt_vault_addr
+ - rgw_crypt_vault_auth
+ with_legacy: true
+# kv, transit or other supported secret engines
+- name: rgw_crypt_vault_secret_engine
+ type: str
+ level: advanced
+ desc: Vault Secret Engine to be used to retrieve encryption keys.
+ fmt_desc: |
+ Vault Secret Engine to be used to retrieve encryption keys: choose
+ between kv-v2, transit.
+ default: transit
+ services:
+ - rgw
+ see_also:
+ - rgw_crypt_s3_kms_backend
+ - rgw_crypt_vault_auth
+ - rgw_crypt_vault_addr
+ with_legacy: true
+# Vault Namespace (only availabe in Vault Enterprise Version)
+- name: rgw_crypt_vault_namespace
+ type: str
+ level: advanced
+ desc: Vault Namespace to be used to select your tenant
+ fmt_desc: If set, Vault Namespace provides tenant isolation for teams and individuals
+ on the same Vault Enterprise instance, e.g. ``acme/tenant1``
+ services:
+ - rgw
+ see_also:
+ - rgw_crypt_s3_kms_backend
+ - rgw_crypt_vault_auth
+ - rgw_crypt_vault_addr
+ with_legacy: true
+# Enable TLS authentication rgw and vault
+- name: rgw_crypt_vault_verify_ssl
+ type: bool
+ level: advanced
+ desc: Should RGW verify the vault server SSL certificate.
+ default: true
+ services:
+ - rgw
+ with_legacy: true
+# TLS certs options
+- name: rgw_crypt_vault_ssl_cacert
+ type: str
+ level: advanced
+ desc: Path for custom ca certificate for accessing vault server
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_crypt_vault_ssl_clientcert
+ type: str
+ level: advanced
+ desc: Path for custom client certificate for accessing vault server
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_crypt_vault_ssl_clientkey
+ type: str
+ level: advanced
+ desc: Path for private key required for client cert
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_crypt_kmip_addr
+ type: str
+ level: advanced
+ desc: kmip server address
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_crypt_kmip_ca_path
+ type: str
+ level: advanced
+ desc: ca for kmip servers
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_crypt_kmip_username
+ type: str
+ level: advanced
+ desc: when authenticating via username
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_crypt_kmip_password
+ type: str
+ level: advanced
+ desc: optional w/ username
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_crypt_kmip_client_cert
+ type: str
+ level: advanced
+ desc: connect using client certificate
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_crypt_kmip_client_key
+ type: str
+ level: advanced
+ desc: connect using client certificate
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_crypt_kmip_kms_key_template
+ type: str
+ level: advanced
+ desc: sse-kms; kmip key names
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_crypt_kmip_s3_key_template
+ type: str
+ level: advanced
+ desc: sse-s3; kmip key template
+ default: $keyid
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_crypt_suppress_logs
+ type: bool
+ level: advanced
+ desc: Suppress logs that might print client key
+ default: true
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_crypt_sse_s3_backend
+ type: str
+ level: advanced
+ desc: Where the SSE-S3 encryption keys are stored. The only valid choice here is
+ HashiCorp Vault ('vault').
+ fmt_desc: Where the SSE-S3 encryption keys are stored. The only valid
+ choice is HashiCorp Vault (``vault``).
+ default: vault
+ services:
+ - rgw
+ enum_values:
+ - vault
+ with_legacy: true
+
+- name: rgw_crypt_sse_s3_vault_secret_engine
+ type: str
+ level: advanced
+ desc: Vault Secret Engine to be used to retrieve encryption keys.
+ fmt_desc: |
+ Vault Secret Engine to be used to retrieve encryption keys. The
+ only valid choice here is transit.
+ default: transit
+ services:
+ - rgw
+ see_also:
+ - rgw_crypt_sse_s3_backend
+ - rgw_crypt_sse_s3_vault_auth
+ - rgw_crypt_sse_s3_vault_addr
+ with_legacy: true
+- name: rgw_crypt_sse_s3_key_template
+ type: str
+ level: advanced
+ desc: template for per-bucket sse-s3 keys in vault.
+ long_desc: This is the template for per-bucket sse-s3 keys.
+ This string may include ``%bucket_id`` which will be expanded out to
+ the bucket marker, a unique uuid assigned to that bucket.
+ It could contain ``%owner_id``, which will expand out to the owner's id.
+ Any other use of % is reserved and should not be used.
+ If the template contains ``%bucket_id``, associated bucket keys
+ will be automatically removed when the bucket is removed.
+ services:
+ - rgw
+ default: "%bucket_id"
+ see_also:
+ - rgw_crypt_sse_s3_backend
+ - rgw_crypt_sse_s3_vault_auth
+ - rgw_crypt_sse_s3_vault_addr
+ with_legacy: true
+- name: rgw_crypt_sse_s3_vault_auth
+ type: str
+ level: advanced
+ desc: Type of authentication method to be used with SSE-S3 and Vault.
+ fmt_desc: Type of authentication method to be used. The only method
+ currently supported is ``token``.
+ default: token
+ services:
+ - rgw
+ see_also:
+ - rgw_crypt_sse_s3_backend
+ - rgw_crypt_sse_s3_vault_addr
+ - rgw_crypt_sse_s3_vault_token_file
+ enum_values:
+ - token
+ - agent
+ with_legacy: true
+- name: rgw_crypt_sse_s3_vault_token_file
+ type: str
+ level: advanced
+ desc: If authentication method is 'token', provide a path to the token file, which
+ for security reasons should readable only by Rados Gateway.
+ services:
+ - rgw
+ see_also:
+ - rgw_crypt_sse_s3_backend
+ - rgw_crypt_sse_s3_vault_auth
+ - rgw_crypt_sse_s3_vault_addr
+ with_legacy: true
+- name: rgw_crypt_sse_s3_vault_addr
+ type: str
+ level: advanced
+ desc: SSE-S3 Vault server base address.
+ fmt_desc: Vault server base address, e.g. ``http://vaultserver:8200``.
+ services:
+ - rgw
+ see_also:
+ - rgw_crypt_sse_s3_backend
+ - rgw_crypt_sse_s3_vault_auth
+ - rgw_crypt_sse_s3_vault_prefix
+ with_legacy: true
+# Optional URL prefix to Vault secret path
+- name: rgw_crypt_sse_s3_vault_prefix
+ type: str
+ level: advanced
+ desc: SSE-S3 Vault secret URL prefix, which can be used to restrict access to a particular
+ subset of the Vault secret space.
+ fmt_desc: The Vault secret URL prefix, which can be used to restrict access
+ to a particular subset of the secret space, e.g. ``/v1/secret/data``.
+ services:
+ - rgw
+ see_also:
+ - rgw_crypt_sse_s3_backend
+ - rgw_crypt_sse_s3_vault_addr
+ - rgw_crypt_sse_s3_vault_auth
+ with_legacy: true
+# Vault Namespace (only availabe in Vault Enterprise Version)
+- name: rgw_crypt_sse_s3_vault_namespace
+ type: str
+ level: advanced
+ desc: Vault Namespace to be used to select your tenant
+ fmt_desc: If set, Vault Namespace provides tenant isolation for teams and individuals
+ on the same Vault Enterprise instance, e.g. ``acme/tenant1``
+ services:
+ - rgw
+ see_also:
+ - rgw_crypt_sse_s3_backend
+ - rgw_crypt_sse_s3_vault_auth
+ - rgw_crypt_sse_s3_vault_addr
+ with_legacy: true
+# Enable TLS authentication rgw and vault
+- name: rgw_crypt_sse_s3_vault_verify_ssl
+ type: bool
+ level: advanced
+ desc: Should RGW verify the vault server SSL certificate.
+ default: true
+ services:
+ - rgw
+ with_legacy: true
+# TLS certs options
+- name: rgw_crypt_sse_s3_vault_ssl_cacert
+ type: str
+ level: advanced
+ desc: Path for custom ca certificate for accessing vault server
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_crypt_sse_s3_vault_ssl_clientcert
+ type: str
+ level: advanced
+ desc: Path for custom client certificate for accessing vault server
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_crypt_sse_s3_vault_ssl_clientkey
+ type: str
+ level: advanced
+ desc: Path for private key required for client cert
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_list_bucket_min_readahead
+ type: int
+ level: advanced
+ desc: Minimum number of entries to request from rados for bucket listing
+ default: 1000
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_rest_getusage_op_compat
+ type: bool
+ level: advanced
+ desc: REST GetUsage request backward compatibility
+ default: false
+ services:
+ - rgw
+ with_legacy: true
+# The following are tunables for torrent data
+- name: rgw_torrent_flag
+ type: bool
+ level: advanced
+ desc: When true, uploaded objects will calculate and store a SHA256 hash of object
+ data so the object can be retrieved as a torrent file
+ default: false
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_torrent_tracker
+ type: str
+ level: advanced
+ desc: Torrent field announce and announce list
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_torrent_createby
+ type: str
+ level: advanced
+ desc: torrent field created by
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_torrent_comment
+ type: str
+ level: advanced
+ desc: Torrent field comment
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_torrent_encoding
+ type: str
+ level: advanced
+ desc: torrent field encoding
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_data_notify_interval_msec
+ type: int
+ level: advanced
+ desc: data changes notification interval to followers
+ long_desc: In multisite, radosgw will occasionally broadcast new entries in its
+ data changes log to peer zones, so they can prioritize sync of some
+ of the most recent changes. Can be disabled with 0.
+ default: 0
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_torrent_origin
+ type: str
+ level: advanced
+ desc: Torrent origin
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_torrent_sha_unit
+ type: size
+ level: advanced
+ default: 512_K
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_dynamic_resharding
+ type: bool
+ level: basic
+ desc: Enable dynamic resharding
+ long_desc: If true, RGW will dynamically increase the number of shards in buckets
+ that have a high number of objects per shard.
+ default: true
+ services:
+ - rgw
+ see_also:
+ - rgw_max_objs_per_shard
+ - rgw_max_dynamic_shards
+- name: rgw_max_objs_per_shard
+ type: uint
+ level: basic
+ desc: Max objects per shard for dynamic resharding
+ long_desc: This is the max number of objects per bucket index shard that RGW will
+ allow with dynamic resharding. RGW will trigger an automatic reshard operation
+ on the bucket if it exceeds this number.
+ default: 100000
+ services:
+ - rgw
+ see_also:
+ - rgw_dynamic_resharding
+ - rgw_max_dynamic_shards
+- name: rgw_max_dynamic_shards
+ type: uint
+ level: advanced
+ desc: Max shards that dynamic resharding can create
+ long_desc: This is the maximum number of bucket index shards that dynamic sharding
+ is able to create on its own. This does not limit user requested resharding. Ideally
+ this value is a prime number.
+ default: 1999
+ services:
+ - rgw
+ see_also:
+ - rgw_dynamic_resharding
+ - rgw_max_objs_per_shard
+ min: 1
+- name: rgw_reshard_thread_interval
+ type: uint
+ level: advanced
+ desc: Number of seconds between processing of reshard log entries
+ default: 600
+ services:
+ - rgw
+ min: 10
+- name: rgw_cache_expiry_interval
+ type: uint
+ level: advanced
+ desc: Number of seconds before entries in the cache are assumed stale and re-fetched.
+ Zero is never.
+ long_desc: The Rados Gateway stores metadata and objects in an internal cache. This
+ should be kept consistent by the OSD's relaying notify events between multiple
+ watching RGW processes. In the event that this notification protocol fails, bounding
+ the length of time that any data in the cache will be assumed valid will ensure
+ that any RGW instance that falls out of sync will eventually recover. This seems
+ to be an issue mostly for large numbers of RGW instances under heavy use. If you
+ would like to turn off cache expiry, set this value to zero.
+ default: 900
+ tags:
+ - performance
+ services:
+ - rgw
+ - rgw
+- name: rgw_inject_notify_timeout_probability
+ type: float
+ level: dev
+ desc: Likelihood of ignoring a notify
+ long_desc: This is the probability that the RGW cache will ignore a cache notify
+ message. It exists to help with the development and testing of cache consistency
+ and recovery improvements. Please do not set it in a production cluster, as it
+ actively causes failures. Set this to a floating point value between 0 and 1.
+ default: 0
+ tags:
+ - fault injection
+ - testing
+ services:
+ - rgw
+ - rgw
+ min: 0
+ max: 1
+- name: rgw_max_notify_retries
+ type: uint
+ level: advanced
+ desc: Number of attempts to notify peers before giving up.
+ long_desc: The number of times we will attempt to update a peer's cache in the event
+ of error before giving up. This is unlikely to be an issue unless your cluster
+ is very heavily loaded. Beware that increasing this value may cause some operations
+ to take longer in exceptional cases and thus may, rarely, cause clients to time
+ out.
+ default: 10
+ tags:
+ - error recovery
+ services:
+ - rgw
+ - rgw
+- name: rgw_sts_entry
+ type: str
+ level: advanced
+ desc: STS URL prefix
+ long_desc: URL path prefix for internal STS requests.
+ default: sts
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_sts_key
+ type: str
+ level: advanced
+ desc: STS Key
+ long_desc: Key used for encrypting/ decrypting session token.
+ default: sts
+ services:
+ - rgw
+ with_legacy: true
+# should we try to use sts for s3?
+- name: rgw_s3_auth_use_sts
+ type: bool
+ level: advanced
+ desc: Should S3 authentication use STS.
+ default: false
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_sts_max_session_duration
+ type: uint
+ level: advanced
+ desc: Session token max duration
+ long_desc: Max duration in seconds for which the session token is valid.
+ default: 43200
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_sts_min_session_duration
+ type: uint
+ level: advanced
+ desc: Minimum allowed duration of a session
+ default: 900
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_max_listing_results
+ type: uint
+ level: advanced
+ desc: Upper bound on results in listing operations, ListBucket max-keys
+ long_desc: This caps the maximum permitted value for listing-like operations in
+ RGW S3. Affects ListBucket(max-keys), ListBucketVersions(max-keys), ListBucketMultipartUploads(max-uploads),
+ ListMultipartUploadParts(max-parts)
+ default: 1000
+ services:
+ - rgw
+ - rgw
+ min: 1
+ max: 100000
+- name: rgw_sts_token_introspection_url
+ type: str
+ level: advanced
+ desc: STS Web Token introspection URL
+ long_desc: URL for introspecting an STS Web Token.
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_sts_client_id
+ type: str
+ level: advanced
+ desc: Client Id
+ long_desc: Client Id needed for introspecting a Web Token.
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_sts_client_secret
+ type: str
+ level: advanced
+ desc: Client Secret
+ long_desc: Client Secret needed for introspecting a Web Token.
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_max_concurrent_requests
+ type: int
+ level: basic
+ desc: Maximum number of concurrent HTTP requests.
+ long_desc: Maximum number of concurrent HTTP requests that the beast frontend will
+ process. Tuning this can help to limit memory usage under heavy load.
+ default: 1024
+ tags:
+ - performance
+ services:
+ - rgw
+ see_also:
+ - rgw_frontends
+- name: rgw_scheduler_type
+ type: str
+ level: advanced
+ desc: Set the type of dmclock scheduler, defaults to throttler Other valid values
+ are dmclock which is experimental
+ fmt_desc: |
+ The RGW scheduler to use. Valid values are ``throttler` and
+ ``dmclock``. Currently defaults to ``throttler`` which throttles Beast
+ frontend requests. ``dmclock` is *experimental* and requires the
+ ``dmclock`` to be included in the ``experimental_feature_enabled``
+ configuration option.
+
+ The options below tune the experimental dmclock scheduler. For
+ additional reading on dmclock, see :ref:`dmclock-qos`. `op_class` for the flags below is
+ one of ``admin``, ``auth``, ``metadata``, or ``data``.
+ default: throttler
+ services:
+ - rgw
+- name: rgw_dmclock_admin_res
+ type: float
+ level: advanced
+ desc: mclock reservation for admin requests
+ default: 100
+ services:
+ - rgw
+ see_also:
+ - rgw_dmclock_admin_wgt
+ - rgw_dmclock_admin_lim
+- name: rgw_dmclock_admin_wgt
+ type: float
+ level: advanced
+ desc: mclock weight for admin requests
+ default: 100
+ services:
+ - rgw
+ see_also:
+ - rgw_dmclock_admin_res
+ - rgw_dmclock_admin_lim
+- name: rgw_dmclock_admin_lim
+ type: float
+ level: advanced
+ desc: mclock limit for admin requests
+ default: 0
+ services:
+ - rgw
+ see_also:
+ - rgw_dmclock_admin_res
+ - rgw_dmclock_admin_wgt
+- name: rgw_dmclock_auth_res
+ type: float
+ level: advanced
+ desc: mclock reservation for object data requests
+ default: 200
+ services:
+ - rgw
+ see_also:
+ - rgw_dmclock_auth_wgt
+ - rgw_dmclock_auth_lim
+- name: rgw_dmclock_auth_wgt
+ type: float
+ level: advanced
+ desc: mclock weight for object data requests
+ default: 100
+ services:
+ - rgw
+ see_also:
+ - rgw_dmclock_auth_res
+ - rgw_dmclock_auth_lim
+- name: rgw_dmclock_auth_lim
+ type: float
+ level: advanced
+ desc: mclock limit for object data requests
+ default: 0
+ services:
+ - rgw
+ see_also:
+ - rgw_dmclock_auth_res
+ - rgw_dmclock_auth_wgt
+- name: rgw_dmclock_data_res
+ type: float
+ level: advanced
+ desc: mclock reservation for object data requests
+ default: 500
+ services:
+ - rgw
+ see_also:
+ - rgw_dmclock_data_wgt
+ - rgw_dmclock_data_lim
+- name: rgw_dmclock_data_wgt
+ type: float
+ level: advanced
+ desc: mclock weight for object data requests
+ default: 500
+ services:
+ - rgw
+ see_also:
+ - rgw_dmclock_data_res
+ - rgw_dmclock_data_lim
+- name: rgw_dmclock_data_lim
+ type: float
+ level: advanced
+ desc: mclock limit for object data requests
+ default: 0
+ services:
+ - rgw
+ see_also:
+ - rgw_dmclock_data_res
+ - rgw_dmclock_data_wgt
+- name: rgw_dmclock_metadata_res
+ type: float
+ level: advanced
+ desc: mclock reservation for metadata requests
+ default: 500
+ services:
+ - rgw
+ see_also:
+ - rgw_dmclock_metadata_wgt
+ - rgw_dmclock_metadata_lim
+- name: rgw_dmclock_metadata_wgt
+ type: float
+ level: advanced
+ desc: mclock weight for metadata requests
+ default: 500
+ services:
+ - rgw
+ see_also:
+ - rgw_dmclock_metadata_res
+ - rgw_dmclock_metadata_lim
+- name: rgw_dmclock_metadata_lim
+ type: float
+ level: advanced
+ desc: mclock limit for metadata requests
+ default: 0
+ services:
+ - rgw
+ see_also:
+ - rgw_dmclock_metadata_res
+ - rgw_dmclock_metadata_wgt
+- name: rgw_default_data_log_backing
+ type: str
+ level: advanced
+ desc: Default backing store for the RGW data sync log
+ long_desc: Whether to use the older OMAP backing store or the high performance FIFO
+ based backing store by default. This only covers the creation of the log on startup
+ if none exists.
+ default: fifo
+ services:
+ - rgw
+ enum_values:
+ - fifo
+ - omap
+- name: rgw_d3n_l1_local_datacache_enabled
+ type: bool
+ level: advanced
+ desc: Enable datacenter-scale dataset delivery local cache
+ default: false
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_d3n_l1_datacache_persistent_path
+ type: str
+ level: advanced
+ desc: path for the directory for storing the local cache objects data
+ default: /tmp/rgw_datacache/
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_d3n_l1_datacache_size
+ type: size
+ level: advanced
+ desc: datacache maximum size on disk in bytes
+ default: 1_G
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_d3n_l1_evict_cache_on_start
+ type: bool
+ level: advanced
+ desc: clear the content of the persistent data cache directory on start
+ default: true
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_d3n_l1_fadvise
+ type: int
+ level: advanced
+ desc: posix_fadvise() flag for access pattern of cache files
+ long_desc: for example to bypass the page-cache -
+ POSIX_FADV_DONTNEED=4
+ default: 4
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_d3n_l1_eviction_policy
+ type: str
+ level: advanced
+ desc: select the d3n cache eviction policy
+ default: lru
+ services:
+ - rgw
+ enum_values:
+ - lru
+ - random
+ with_legacy: true
+- name: rgw_d3n_libaio_aio_threads
+ type: int
+ level: advanced
+ desc: specifies the maximum number of worker threads that may be used by libaio
+ default: 20
+ services:
+ - rgw
+ see_also:
+ - rgw_thread_pool_size
+ with_legacy: true
+- name: rgw_d3n_libaio_aio_num
+ type: int
+ level: advanced
+ desc: specifies the maximum number of simultaneous I/O requests that libaio expects to enqueue
+ default: 64
+ services:
+ - rgw
+ see_also:
+ - rgw_thread_pool_size
+ with_legacy: true
+- name: rgw_backend_store
+ type: str
+ level: advanced
+ desc: experimental Option to set backend store type
+ long_desc: defaults to rados. Other valid values are dbstore, motr, and daos (All experimental).
+ default: rados
+ services:
+ - rgw
+ enum_values:
+ - rados
+ - dbstore
+ - motr
+ - daos
+- name: rgw_config_store
+ type: str
+ level: advanced
+ desc: Configuration storage backend
+ default: rados
+ services:
+ - rgw
+ enum_values:
+ - rados
+ - dbstore
+ - json
+- name: rgw_filter
+ type: str
+ level: advanced
+ desc: experimental Option to set a filter
+ long_desc: defaults to none. Other valid values are base and trace (both experimental).
+ default: none
+ services:
+ - rgw
+ enum_values:
+ - none
+ - base
+ - trace
+- name: dbstore_db_dir
+ type: str
+ level: advanced
+ desc: path for the directory for storing the db backend store data
+ default: /var/lib/ceph/radosgw
+ services:
+ - rgw
+- name: dbstore_db_name_prefix
+ type: str
+ level: advanced
+ desc: prefix to the file names created by db backend store
+ default: dbstore
+ services:
+ - rgw
+- name: dbstore_config_uri
+ type: str
+ level: advanced
+ desc: 'Config database URI. URIs beginning with file: refer to local files opened with SQLite.'
+ default: file:/var/lib/ceph/radosgw/dbstore-config.db
+ see_also:
+ - rgw_config_store
+ services:
+ - rgw
+- name: rgw_json_config
+ type: str
+ level: advanced
+ desc: Path to a json file that contains the static zone and zonegroup configuration. Requires rgw_config_store=json.
+ default: /var/lib/ceph/radosgw/config.json
+ see_also:
+ - rgw_config_store
+ services:
+ - rgw
+- name: motr_profile_fid
+ type: str
+ level: advanced
+ desc: experimental Option to set Motr profile fid
+ long_desc: example value 0x7000000000000001:0x4f
+ default: 0x7000000000000001:0x0
+ services:
+ - rgw
+- name: motr_my_fid
+ type: str
+ level: advanced
+ desc: experimental Option to set my Motr fid
+ long_desc: example value 0x7200000000000001:0x29
+ default: 0x7200000000000001:0x0
+ services:
+ - rgw
+- name: motr_admin_fid
+ type: str
+ level: advanced
+ desc: Admin Tool Motr FID for admin-level access.
+ long_desc: example value 0x7200000000000001:0x2c
+ default: 0x7200000000000001:0x0
+ services:
+ - rgw
+- name: motr_admin_endpoint
+ type: str
+ level: advanced
+ desc: experimental Option to set Admin Motr endpoint address
+ long_desc: example value 192.168.180.182@tcp:12345:4:1
+ default: 192.168.180.182@tcp:12345:4:1
+ services:
+ - rgw
+- name: motr_my_endpoint
+ type: str
+ level: advanced
+ desc: experimental Option to set my Motr endpoint address
+ long_desc: example value 192.168.180.182@tcp:12345:4:1
+ default: 192.168.180.182@tcp:12345:4:1
+ services:
+ - rgw
+- name: motr_ha_endpoint
+ type: str
+ level: advanced
+ desc: experimental Option to set Motr HA agent endpoint address
+ long_desc: example value 192.168.180.182@tcp:12345:1:1
+ default: 192.168.180.182@tcp:12345:1:1
+ services:
+ - rgw
+- name: motr_tracing_enabled
+ type: bool
+ level: advanced
+ desc: Set to true when Motr client debugging is needed
+ default: false
+ services:
+ - rgw
+- name: rgw_luarocks_location
+ type: str
+ level: advanced
+ desc: Directory where luarocks install packages from allowlist
+ default: @rgw_luarocks_location@
+ services:
+ - rgw
+ flags:
+ - startup
+- name: rgwlc_auto_session_clear
+ type: bool
+ level: advanced
+ desc: Automatically clear stale lifecycle sessions (i.e., after 2 idle processing cycles)
+ default: true
+ services:
+ - rgw
+ with_legacy: true
+- name: rgwlc_skip_bucket_step
+ type: bool
+ level: advanced
+ desc: Conditionally skip the processing (but not the scheduling) of bucket lifecycle
+ default: false
+ services:
+ - rgw
+ with_legacy: true
+- name: rgw_pending_bucket_index_op_expiration
+ type: uint
+ level: advanced
+ default: 120
+ desc: Number of seconds a pending operation can remain in bucket index shard.
+ long_desc: Number of seconds a pending operation can remain in bucket
+ index shard before it expires. Used for transactional bucket index
+ operations, and if the operation does not complete in this time
+ period, the operation will be dropped.
+ services:
+ - rgw
+ - osd
+ with_legacy: true
+- name: rgw_bucket_index_transaction_instrumentation
+ type: bool
+ level: dev
+ default: false
+ desc: Turns on extra instrumentation surrounding bucket index transactions.
+ services:
+ - rgw
+ - osd
+ with_legacy: true
+- name: rgw_allow_notification_secrets_in_cleartext
+ type: bool
+ level: advanced
+ desc: Allows sending secrets (e.g. passwords) over non encrypted HTTP messages.
+ long_desc: When bucket notification endpoint require secrets (e.g. passwords),
+ we allow the topic creation only over HTTPS messages.
+ This parameter can be set to "true" to bypass this check.
+ Use this only if radosgw is on a trusted private network, and the message
+ broker cannot be configured without password authentication. Otherwise, this will
+ leak the credentials of your message broker and compromise its security.
+ default: false
+ services:
+ - rgw
+ see_also:
+ - rgw_trust_forwarded_https
+- name: daos_pool
+ type: str
+ level: advanced
+ desc: DAOS Pool to use
+ default: tank
+ services:
+ - rgw
+- name: rgw_policy_reject_invalid_principals
+ type: bool
+ level: basic
+ desc: Whether to reject policies with invalid principals
+ long_desc: If true, policies with invalid principals will be
+ rejected. We don't support Canonical User identifiers or some
+ other form of policies that Amazon does, so if you are mirroring
+ policies between RGW and AWS, you may wish to set this to false.
+ default: true
+ services:
+ - rgw
diff --git a/src/common/options/validate-options.py b/src/common/options/validate-options.py
new file mode 100755
index 000000000..5bc5d4d46
--- /dev/null
+++ b/src/common/options/validate-options.py
@@ -0,0 +1,49 @@
+#!/usr/bin/env python3
+
+import argparse
+import fileinput
+import sys
+import yaml
+from typing import Any, Dict
+
+
+class ValidationError(Exception):
+ pass
+
+
+OptionType = Dict[str, Any]
+
+
+def validate_see_also(opt: OptionType, opts: Dict[str, OptionType]) -> None:
+ see_also = opt.get('see_also')
+ if see_also is None:
+ return
+ for ref in see_also:
+ if ref not in opts:
+ msg = f'see_also contains "{ref}". But it is not found.'
+ raise ValidationError(msg)
+
+
+def main() -> None:
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter)
+ parser.add_argument('yamls', nargs='*')
+ opts = parser.parse_args()
+ options = {}
+ for yaml_file in opts.yamls:
+ with open(yaml_file) as f:
+ yml = yaml.load(f, yaml.SafeLoader)
+ options.update({opt['name']: opt for opt in yml['options']})
+ for name, opt in options.items():
+ try:
+ validate_see_also(opt, options)
+ except ValidationError as e:
+ raise Exception(f'failed to validate "{name}": {e}')
+
+
+if __name__ == '__main__':
+ try:
+ main()
+ except Exception as e:
+ print(e, file=sys.stderr)
+ sys.exit(1)
diff --git a/src/common/options/y2c.py b/src/common/options/y2c.py
new file mode 100755
index 000000000..0b64bec58
--- /dev/null
+++ b/src/common/options/y2c.py
@@ -0,0 +1,366 @@
+#!/usr/bin/env python3
+
+import yaml
+import argparse
+import math
+import os
+import sys
+
+# flake8: noqa: E127
+
+def type_to_cxx(t):
+ return f'Option::TYPE_{t.upper()}'
+
+
+def level_to_cxx(lv):
+ return f'Option::LEVEL_{lv.upper()}'
+
+
+def eval_str(v):
+ if v == "":
+ return v
+ v = v.strip('"').replace('"', '\\"')
+ return f'"{v}"'
+
+
+def eval_value(v, typ):
+ try:
+ if typ == 'str':
+ return eval_str(v)
+ if typ == 'float':
+ return float(v)
+ if typ in ('uint', 'int', 'size', 'secs', 'millisecs'):
+ return int(v)
+ if typ == 'bool':
+ return 'true' if v else 'false'
+ else:
+ return f'"{v}"'
+ except ValueError:
+ times = dict(_min=60,
+ _hr=60*60,
+ _day=24*60*60,
+ _K=1 << 10,
+ _M=1 << 20,
+ _G=1 << 30,
+ _T=1 << 40)
+ for unit, m in times.items():
+ if v.endswith(unit):
+ int(v[:-len(unit)])
+ # user defined literals
+ return v
+ raise ValueError(f'unknown value: {v}')
+
+
+def set_default(default, typ):
+ if default is None:
+ return ''
+ v = eval_value(default, typ)
+ return f'.set_default({v})\n'
+
+
+def set_daemon_default(default, typ):
+ if default is None:
+ return ''
+ v = eval_value(default, typ)
+ return f'.set_daemon_default({v})\n'
+
+
+def add_tags(tags):
+ if tags is None:
+ return ''
+ cxx = ''
+ for tag in tags:
+ v = eval_str(tag)
+ cxx += f'.add_tag({v})\n'
+ return cxx
+
+
+def add_services(services):
+ if services is None:
+ return ''
+ if len(services) == 1:
+ return f'.add_service("{services[0]}")\n'
+ else:
+ param = ', '.join(f'"{s}"' for s in services)
+ return f'.add_service({{{param}}})\n'
+
+
+def add_see_also(see_also):
+ if see_also is None:
+ return ''
+ param = ', '.join(f'"{v}"' for v in see_also)
+ return f'.add_see_also({{{param}}})\n'
+
+
+def set_desc(desc):
+ if desc is None:
+ return ''
+ v = eval_str(desc)
+ return f'.set_description({v})\n'
+
+
+def set_long_desc(desc):
+ if desc is None:
+ return ''
+ v = eval_str(desc)
+ return f'.set_long_description({v})\n'
+
+
+def set_min_max(mi, ma, typ):
+ if mi is None and ma is None:
+ return ''
+ if mi is not None and ma is not None:
+ min_v = eval_value(mi, typ)
+ max_v = eval_value(ma, typ)
+ if isinstance(min_v, str) and isinstance(max_v, int):
+ return f'.set_min_max({min_v}, {max_v}ULL)\n'
+ elif isinstance(min_v, int) and isinstance(max_v, str):
+ return f'.set_min_max({min_v}ULL, {max_v})\n'
+ else:
+ return f'.set_min_max({min_v}, {max_v})\n'
+ if mi is not None:
+ min_v = eval_value(mi, typ)
+ return f'.set_min({min_v})\n'
+ raise ValueError('set_max() is not implemented')
+
+
+def set_enum_allowed(values):
+ if values is None:
+ return ''
+ param = ', '.join(f'"{v}"' for v in values)
+ return f'.set_enum_allowed({{{param}}})\n'
+
+
+def add_flags(flags):
+ if flags is None:
+ return ''
+ cxx = ''
+ for flag in flags:
+ cxx += f'.set_flag(Option::FLAG_{flag.upper()})\n'
+ return cxx
+
+
+def set_validator(validator):
+ if validator is None:
+ return ''
+ validator = validator.rstrip()
+ return f'.set_validator({validator})\n'
+
+
+def add_verbatim(verbatim):
+ if verbatim is None:
+ return ''
+ return verbatim + '\n'
+
+
+def yaml_to_cxx(opt, indent):
+ name = opt['name']
+ typ = opt['type']
+ ctyp = type_to_cxx(typ)
+ level = level_to_cxx(opt['level'])
+ cxx = f'Option("{name}", {ctyp}, {level})\n'
+ cxx += set_desc(opt.get('desc'))
+ cxx += set_long_desc(opt.get('long_desc'))
+ cxx += set_default(opt.get('default'), typ)
+ cxx += set_daemon_default(opt.get('daemon_default'), typ)
+ cxx += set_min_max(opt.get('min'), opt.get('max'), typ)
+ cxx += set_enum_allowed(opt.get('enum_values'))
+ cxx += set_validator(opt.get('validator'))
+ cxx += add_flags(opt.get('flags'))
+ cxx += add_services(opt.get('services'))
+ cxx += add_tags(opt.get('tags'))
+ cxx += add_see_also(opt.get('see_also'))
+ verbatim = add_verbatim(opt.get('verbatim'))
+ cxx += verbatim
+ if verbatim:
+ cxx += '\n'
+ else:
+ cxx = cxx.rstrip()
+ cxx += ',\n'
+ if indent > 0:
+ indented = []
+ for line in cxx.split('\n'):
+ if line:
+ indented.append(' ' * indent + line + '\n')
+ cxx = ''.join(indented)
+ return cxx
+
+
+def type_to_h(t):
+ if t == 'uint':
+ return 'OPT_U32'
+ return f'OPT_{t.upper()}'
+
+
+def yaml_to_h(opt):
+ if opt.get('with_legacy', False):
+ name = opt['name']
+ typ = opt['type']
+ htyp = type_to_h(typ)
+ return f'OPTION({name}, {htyp})'
+ else:
+ return ''
+
+
+TEMPLATE_CC = '''#include "common/options.h"
+{headers}
+
+std::vector<Option> get_{name}_options() {{
+ return std::vector<Option>({{
+@body@
+ }});
+}}
+'''
+
+
+# PyYAML doesn't check for duplicates even though the YAML spec says
+# that mapping keys must be unique and that duplicates must be treated
+# as an error. See https://github.com/yaml/pyyaml/issues/165.
+#
+# This workaround breaks merge keys -- in "<<: *xyz", duplicate keys
+# from xyz mapping raise an error instead of being discarded.
+class UniqueKeySafeLoader(yaml.SafeLoader):
+ def construct_mapping(self, node, deep=False):
+ mapping = super().construct_mapping(node, deep)
+ keys = set()
+ for key_node, _ in node.value:
+ key = self.construct_object(key_node, deep=deep)
+ if key in keys:
+ raise yaml.constructor.ConstructorError(None, None,
+ "found duplicate key",
+ key_node.start_mark)
+ keys.add(key)
+ return mapping
+
+
+def translate(opts):
+ if opts.raw:
+ prelude, epilogue = '', ''
+ else:
+ prelude, epilogue = TEMPLATE_CC.split('@body@')
+
+ if opts.name:
+ name = opts.name
+ else:
+ name = os.path.split(opts.input)[-1]
+ name = name.rsplit('.', 1)[0]
+ name = name.replace('-', '_')
+ # noqa: E127
+ with open(opts.input) as infile, \
+ open(opts.output, 'w') as cc_file, \
+ open(opts.legacy, 'w') as h_file:
+ yml = yaml.load(infile, Loader=UniqueKeySafeLoader)
+ headers = yml.get('headers', '')
+ cc_file.write(prelude.format(name=name, headers=headers))
+ options = yml['options']
+ for option in options:
+ try:
+ cc_file.write(yaml_to_cxx(option, opts.indent) + '\n')
+ if option.get('with_legacy', False):
+ h_file.write(yaml_to_h(option) + '\n')
+ except ValueError as e:
+ print(f'failed to translate option "{name}": {e}',
+ file=sys.stderr)
+ return 1
+ cc_file.write(epilogue.replace("}}", "}"))
+
+
+def readable_size(value, typ):
+ times = dict(T=1 << 40,
+ G=1 << 30,
+ M=1 << 20,
+ K=1 << 10)
+ if isinstance(value, str):
+ value = value.strip('"')
+ try:
+ v = int(value)
+ if v == 0:
+ return 0
+ for unit, m in times.items():
+ if v % m == 0:
+ v = int(v / m)
+ return f'{v}_{unit}'
+ return v
+ except ValueError:
+ return value
+
+
+def readable_duration(value, typ):
+ times = dict(day=24*60*60,
+ hr=60*60,
+ min=60)
+ if isinstance(value, str):
+ value = value.strip('"')
+ try:
+ v = float(value)
+ if math.floor(v) != v:
+ return v
+ v = int(v)
+ if v == 0:
+ return 0
+ for unit, m in times.items():
+ if v % m == 0:
+ v = int(v / m)
+ return f'{v}_{unit}'
+ return v
+ except ValueError:
+ return value
+
+
+def readable_millisecs(value, typ):
+ return int(value)
+
+
+def readable(opts):
+ with open(opts.input) as infile, open(opts.output, 'w') as outfile:
+ yml = yaml.load(infile, Loader=UniqueKeySafeLoader)
+ options = yml['options']
+ for option in options:
+ typ = option['type']
+ if typ in ('size', 'uint'):
+ do_readable = readable_size
+ elif typ in ('float', 'int', 'secs'):
+ do_readable = readable_duration
+ elif typ == 'millisecs':
+ do_readable = readable_millisecs
+ else:
+ continue
+ for field in ['default', 'min', 'max', 'daemon_default']:
+ v = option.get(field)
+ if v is not None:
+ option[field] = do_readable(v, typ)
+ yml['options'] = options
+ yaml.dump(yml, outfile, sort_keys=False, indent=2)
+
+
+def main():
+ parser = argparse.ArgumentParser(
+ formatter_class=argparse.ArgumentDefaultsHelpFormatter)
+ parser.add_argument('-i', '--input', dest='input',
+ default='options.yaml',
+ help='the YAML file to be processed')
+ parser.add_argument('-o', '--output', dest='output',
+ default='options',
+ help='the path to the generated .cc file')
+ parser.add_argument('--legacy', dest='legacy',
+ default='legacy_options',
+ help='the path to the generated legacy .h file')
+ parser.add_argument('--indent', type=int,
+ default=4,
+ help='the number of spaces added before each line')
+ parser.add_argument('--name',
+ help='the name of the option group')
+ parser.add_argument('--raw', action='store_true',
+ help='output the array without the full function')
+ parser.add_argument('--op', choices=('readable', 'translate'),
+ default='translate',
+ help='operation to perform.')
+ opts = parser.parse_args(sys.argv[1:])
+ if opts.op == 'translate':
+ translate(opts)
+ else:
+ readable(opts)
+
+
+if __name__ == '__main__':
+ main()