summaryrefslogtreecommitdiffstats
path: root/src/sample.ceph.conf
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:45:59 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:45:59 +0000
commit19fcec84d8d7d21e796c7624e521b60d28ee21ed (patch)
tree42d26aa27d1e3f7c0b8bd3fd14e7d7082f5008dc /src/sample.ceph.conf
parentInitial commit. (diff)
downloadceph-19fcec84d8d7d21e796c7624e521b60d28ee21ed.tar.xz
ceph-19fcec84d8d7d21e796c7624e521b60d28ee21ed.zip
Adding upstream version 16.2.11+ds.upstream/16.2.11+dsupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/sample.ceph.conf')
-rw-r--r--src/sample.ceph.conf531
1 files changed, 531 insertions, 0 deletions
diff --git a/src/sample.ceph.conf b/src/sample.ceph.conf
new file mode 100644
index 000000000..13394d31f
--- /dev/null
+++ b/src/sample.ceph.conf
@@ -0,0 +1,531 @@
+##
+# Sample ceph ceph.conf file.
+##
+# This file defines cluster membership, the various locations
+# that Ceph stores data, and any other runtime options.
+
+# If a 'host' is defined for a daemon, the init.d start/stop script will
+# verify that it matches the hostname (or else ignore it). If it is
+# not defined, it is assumed that the daemon is intended to start on
+# the current host (e.g., in a setup with a startup.conf on each
+# node).
+
+## Metavariables
+# $cluster ; Expands to the Ceph Storage Cluster name. Useful
+# ; when running multiple Ceph Storage Clusters
+# ; on the same hardware.
+# ; Example: /etc/ceph/$cluster.keyring
+# ; (Default: ceph)
+#
+# $type ; Expands to one of mds, osd, or mon, depending on
+# ; the type of the instant daemon.
+# ; Example: /var/lib/ceph/$type
+#
+# $id ; Expands to the daemon identifier. For osd.0, this
+# ; would be 0; for mds.a, it would be a.
+# ; Example: /var/lib/ceph/$type/$cluster-$id
+#
+# $host ; Expands to the host name of the instant daemon.
+#
+# $name ; Expands to $type.$id.
+# ; Example: /var/run/ceph/$cluster-$name.asok
+
+[global]
+### http://docs.ceph.com/en/latest/rados/configuration/general-config-ref/
+
+ ;fsid = {UUID} # use `uuidgen` to generate your own UUID
+ ;public network = 192.168.0.0/24
+ ;cluster network = 192.168.0.0/24
+
+ # Each running Ceph daemon has a running process identifier (PID) file.
+ # The PID file is generated upon start-up.
+ # Type: String (optional)
+ # (Default: N/A). The default path is /var/run/$cluster/$name.pid.
+ pid file = /var/run/ceph/$name.pid
+
+ # If set, when the Ceph Storage Cluster starts, Ceph sets the max open fds
+ # at the OS level (i.e., the max # of file descriptors).
+ # It helps prevents Ceph OSD Daemons from running out of file descriptors.
+ # Type: 64-bit Integer (optional)
+ # (Default: 0)
+ ;max open files = 131072
+
+
+### http://docs.ceph.com/en/latest/rados/operations/
+### http://docs.ceph.com/en/latest/rados/configuration/auth-config-ref/
+
+ # If enabled, the Ceph Storage Cluster daemons (i.e., ceph-mon, ceph-osd,
+ # and ceph-mds) must authenticate with each other.
+ # Type: String (optional); Valid settings are "cephx" or "none".
+ # (Default: cephx)
+ auth cluster required = cephx
+
+ # If enabled, the Ceph Storage Cluster daemons require Ceph Clients to
+ # authenticate with the Ceph Storage Cluster in order to access Ceph
+ # services.
+ # Type: String (optional); Valid settings are "cephx" or "none".
+ # (Default: cephx)
+ auth service required = cephx
+
+ # If enabled, the Ceph Client requires the Ceph Storage Cluster to
+ # authenticate with the Ceph Client.
+ # Type: String (optional); Valid settings are "cephx" or "none".
+ # (Default: cephx)
+ auth client required = cephx
+
+ # If set to true, Ceph requires signatures on all message traffic between
+ # the Ceph Client and the Ceph Storage Cluster, and between daemons
+ # comprising the Ceph Storage Cluster.
+ # Type: Boolean (optional)
+ # (Default: false)
+ ;cephx require signatures = true
+
+ # kernel RBD client do not support authentication yet:
+ cephx cluster require signatures = true
+ cephx service require signatures = false
+
+ # The path to the keyring file.
+ # Type: String (optional)
+ # Default: /etc/ceph/$cluster.$name.keyring,/etc/ceph/$cluster.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin
+ ;keyring = /etc/ceph/$cluster.$name.keyring
+
+
+### http://docs.ceph.com/en/latest/rados/configuration/pool-pg-config-ref/
+
+
+ ## Replication level, number of data copies.
+ # Type: 32-bit Integer
+ # (Default: 3)
+ ;osd pool default size = 3
+
+ ## Replication level in degraded state, less than 'osd pool default size' value.
+ # Sets the minimum number of written replicas for objects in the
+ # pool in order to acknowledge a write operation to the client. If
+ # minimum is not met, Ceph will not acknowledge the write to the
+ # client. This setting ensures a minimum number of replicas when
+ # operating in degraded mode.
+ # Type: 32-bit Integer
+ # (Default: 0), which means no particular minimum. If 0, minimum is size - (size / 2).
+ ;osd pool default min size = 2
+
+ ## Ensure you have a realistic number of placement groups. We recommend
+ ## approximately 100 per OSD. E.g., total number of OSDs multiplied by 100
+ ## divided by the number of replicas (i.e., osd pool default size). So for
+ ## 10 OSDs and osd pool default size = 3, we'd recommend approximately
+ ## (100 * 10) / 3 = 333
+ ## always round to the nearest power of 2
+
+ # Description: The default number of placement groups for a pool. The
+ # default value is the same as pg_num with mkpool.
+ # Type: 32-bit Integer
+ # (Default: 8)
+ ;osd pool default pg num = 128
+
+ # Description: The default number of placement groups for placement for a
+ # pool. The default value is the same as pgp_num with mkpool.
+ # PG and PGP should be equal (for now).
+ # Type: 32-bit Integer
+ # (Default: 8)
+ ;osd pool default pgp num = 128
+
+ # The default CRUSH ruleset to use when creating a pool
+ # Type: 32-bit Integer
+ # (Default: 0)
+ ;osd pool default crush rule = 0
+
+ # The bucket type to use for chooseleaf in a CRUSH rule.
+ # Uses ordinal rank rather than name.
+ # Type: 32-bit Integer
+ # (Default: 1) Typically a host containing one or more Ceph OSD Daemons.
+ ;osd crush chooseleaf type = 1
+
+
+### http://docs.ceph.com/en/latest/rados/troubleshooting/log-and-debug/
+
+ # The location of the logging file for your cluster.
+ # Type: String
+ # Required: No
+ # Default: /var/log/ceph/$cluster-$name.log
+ ;log file = /var/log/ceph/$cluster-$name.log
+
+ # Determines if logging messages should appear in syslog.
+ # Type: Boolean
+ # Required: No
+ # (Default: false)
+ ;log to syslog = true
+
+
+### http://docs.ceph.com/en/latest/rados/configuration/ms-ref/
+
+ # Enable if you want your daemons to bind to IPv6 address instead of
+ # IPv4 ones. (Not required if you specify a daemon or cluster IP.)
+ # Type: Boolean
+ # (Default: false)
+ ;ms bind ipv6 = true
+
+##################
+## Monitors
+## You need at least one. You need at least three if you want to
+## tolerate any node failures. Always create an odd number.
+[mon]
+### http://docs.ceph.com/en/latest/rados/configuration/mon-config-ref/
+### http://docs.ceph.com/en/latest/rados/configuration/mon-osd-interaction/
+
+ # The IDs of initial monitors in a cluster during startup.
+ # If specified, Ceph requires an odd number of monitors to form an
+ # initial quorum (e.g., 3).
+ # Type: String
+ # (Default: None)
+ ;mon initial members = mycephhost
+
+ ;mon host = cephhost01,cephhost02
+ ;mon addr = 192.168.0.101,192.168.0.102
+
+ # The monitor's data location
+ # Default: /var/lib/ceph/mon/$cluster-$id
+ ;mon data = /var/lib/ceph/mon/$name
+
+ # The clock drift in seconds allowed between monitors.
+ # Type: Float
+ # (Default: .050)
+ ;mon clock drift allowed = .15
+
+ # Exponential backoff for clock drift warnings
+ # Type: Float
+ # (Default: 5)
+ ;mon clock drift warn backoff = 30 # Tell the monitor to backoff from this warning for 30 seconds
+
+ # The percentage of disk space used before an OSD is considered full.
+ # Type: Float
+ # (Default: .95)
+ ;mon osd full ratio = .95
+
+ # The percentage of disk space used before an OSD is considered nearfull.
+ # Type: Float
+ # (Default: .85)
+ ;mon osd nearfull ratio = .85
+
+ # The number of seconds Ceph waits before marking a Ceph OSD
+ # Daemon "down" and "out" if it doesn't respond.
+ # Type: 32-bit Integer
+ # (Default: 600)
+ ;mon osd down out interval = 600
+
+ # The grace period in seconds before declaring unresponsive Ceph OSD
+ # Daemons "down".
+ # Type: 32-bit Integer
+ # (Default: 900)
+ ;mon osd report timeout = 300
+
+### http://docs.ceph.com/en/latest/rados/troubleshooting/log-and-debug/
+
+ # logging, for debugging monitor crashes, in order of
+ # their likelihood of being helpful :)
+ ;debug ms = 1
+ ;debug mon = 20
+ ;debug paxos = 20
+ ;debug auth = 20
+
+
+;[mon.alpha]
+; host = alpha
+; mon addr = 192.168.0.10:6789
+
+;[mon.beta]
+; host = beta
+; mon addr = 192.168.0.11:6789
+
+;[mon.gamma]
+; host = gamma
+; mon addr = 192.168.0.12:6789
+
+
+##################
+## Metadata servers
+# You must deploy at least one metadata server to use CephFS. There is
+# experimental support for running multiple metadata servers. Do not run
+# multiple metadata servers in production.
+[mds]
+### http://docs.ceph.com/en/latest/cephfs/mds-config-ref/
+
+ # where the mds keeps it's secret encryption keys
+ ;keyring = /var/lib/ceph/mds/$name/keyring
+
+ # Determines whether a 'ceph-mds' daemon should poll and
+ # replay the log of an active MDS (hot standby).
+ # Type: Boolean
+ # (Default: false)
+ ;mds standby replay = true
+
+ ; mds logging to debug issues.
+ ;debug ms = 1
+ ;debug mds = 20
+ ;debug journaler = 20
+
+ # The memory limit the MDS should enforce for its cache.
+ # (Default: 1G)
+ ;mds cache memory limit = 2G
+
+;[mds.alpha]
+; host = alpha
+
+;[mds.beta]
+; host = beta
+
+##################
+## osd
+# You need at least one. Two or more if you want data to be replicated.
+# Define as many as you like.
+[osd]
+### http://docs.ceph.com/en/latest/rados/configuration/osd-config-ref/
+
+ # The path to the OSDs data.
+ # You must create the directory when deploying Ceph.
+ # You should mount a drive for OSD data at this mount point.
+ # We do not recommend changing the default.
+ # Type: String
+ # Default: /var/lib/ceph/osd/$cluster-$id
+ ;osd data = /var/lib/ceph/osd/$name
+
+ ## You can change the number of recovery operations to speed up recovery
+ ## or slow it down if your machines can't handle it
+
+ # The number of active recovery requests per OSD at one time.
+ # More requests will accelerate recovery, but the requests
+ # places an increased load on the cluster.
+ # Type: 32-bit Integer
+ # (Default: 5)
+ ;osd recovery max active = 3
+
+ # The maximum number of backfills allowed to or from a single OSD.
+ # Type: 64-bit Integer
+ # (Default: 10)
+ ;osd max backfills = 5
+
+ # The maximum number of simultaneous scrub operations for a Ceph OSD Daemon.
+ # Type: 32-bit Int
+ # (Default: 1)
+ ;osd max scrubs = 2
+
+ # You may add settings for ceph-deploy so that it will create and mount
+ # the correct type of file system. Remove the comment `#` character for
+ # the following settings and replace the values in parenthesis
+ # with appropriate values, or leave the following settings commented
+ # out to accept the default values.
+
+ #osd mkfs type = {fs-type}
+ #osd mkfs options {fs-type} = {mkfs options} # default for xfs is "-f"
+ #osd mount options {fs-type} = {mount options} # default mount option is "rw, noatime"
+ ;osd mkfs type = btrfs
+ ;osd mount options btrfs = noatime,nodiratime
+
+ ## Ideally, make this a separate disk or partition. A few
+ ## hundred MB should be enough; more if you have fast or many
+ ## disks. You can use a file under the osd data dir if need be
+ ## (e.g. /data/$name/journal), but it will be slower than a
+ ## separate disk or partition.
+ # The path to the OSD's journal. This may be a path to a file or a block
+ # device (such as a partition of an SSD). If it is a file, you must
+ # create the directory to contain it.
+ # We recommend using a drive separate from the osd data drive.
+ # Type: String
+ # Default: /var/lib/ceph/osd/$cluster-$id/journal
+ ;osd journal = /var/lib/ceph/osd/$name/journal
+
+ # Check log files for corruption. Can be computationally expensive.
+ # Type: Boolean
+ # (Default: false)
+ ;osd check for log corruption = true
+
+### http://docs.ceph.com/en/latest/rados/configuration/journal-ref/
+
+ # The size of the journal in megabytes. If this is 0,
+ # and the journal is a block device, the entire block device is used.
+ # Since v0.54, this is ignored if the journal is a block device,
+ # and the entire block device is used.
+ # Type: 32-bit Integer
+ # (Default: 5120)
+ # Recommended: Begin with 1GB. Should be at least twice the product
+ # of the expected speed multiplied by "filestore max sync interval".
+ ;osd journal size = 2048 ; journal size, in megabytes
+
+ ## If you want to run the journal on a tmpfs, disable DirectIO
+ # Enables direct i/o to the journal.
+ # Requires "journal block align" set to "true".
+ # Type: Boolean
+ # Required: Yes when using aio.
+ # (Default: true)
+ ;journal dio = false
+
+ # osd logging to debug osd issues, in order of likelihood of being helpful
+ ;debug ms = 1
+ ;debug osd = 20
+ ;debug filestore = 20
+ ;debug journal = 20
+
+### http://docs.ceph.com/en/latest/rados/configuration/filestore-config-ref/
+
+ # The maximum interval in seconds for synchronizing the filestore.
+ # Type: Double (optional)
+ # (Default: 5)
+ ;filestore max sync interval = 5
+
+ # Enable snapshots for a btrfs filestore.
+ # Type: Boolean
+ # Required: No. Only used for btrfs.
+ # (Default: true)
+ ;filestore btrfs snap = false
+
+ # Enables the filestore flusher.
+ # Type: Boolean
+ # Required: No
+ # (Default: false)
+ ;filestore flusher = true
+
+ # Defines the maximum number of in progress operations the file store
+ # accepts before blocking on queuing new operations.
+ # Type: Integer
+ # Required: No. Minimal impact on performance.
+ # (Default: 500)
+ ;filestore queue max ops = 500
+
+ ## Filestore and OSD settings can be tweak to achieve better performance
+
+### http://docs.ceph.com/en/latest/rados/configuration/filestore-config-ref/#misc
+
+ # Min number of files in a subdir before merging into parent NOTE: A negative value means to disable subdir merging
+ # Type: Integer
+ # Required: No
+ # Default: -10
+ ;filestore merge threshold = -10
+
+ # filestore_split_multiple * abs(filestore_merge_threshold) * 16 is the maximum number of files in a subdirectory before splitting into child directories.
+ # Type: Integer
+ # Required: No
+ # Default: 2
+ ;filestore split multiple = 2
+
+ # The number of filesystem operation threads that execute in parallel.
+ # Type: Integer
+ # Required: No
+ # Default: 2
+ ;filestore op threads = 4
+
+ ## CRUSH
+
+ # By default OSDs update their details (location, weight and root) on the CRUSH map during startup
+ # Type: Boolean
+ # Required: No;
+ # (Default: true)
+ ;osd crush update on start = false
+
+;[osd.0]
+; host = delta
+
+;[osd.1]
+; host = epsilon
+
+;[osd.2]
+; host = zeta
+
+;[osd.3]
+; host = eta
+
+
+##################
+## client settings
+[client]
+
+### http://docs.ceph.com/en/latest/rbd/rbd-config-ref/
+
+ # Enable caching for RADOS Block Device (RBD).
+ # Type: Boolean
+ # Required: No
+ # (Default: true)
+ rbd cache = true
+
+ # The RBD cache size in bytes.
+ # Type: 64-bit Integer
+ # Required: No
+ # (Default: 32 MiB)
+ ;rbd cache size = 33554432
+
+ # The dirty limit in bytes at which the cache triggers write-back.
+ # If 0, uses write-through caching.
+ # Type: 64-bit Integer
+ # Required: No
+ # Constraint: Must be less than rbd cache size.
+ # (Default: 24 MiB)
+ ;rbd cache max dirty = 25165824
+
+ # The dirty target before the cache begins writing data to the data storage.
+ # Does not block writes to the cache.
+ # Type: 64-bit Integer
+ # Required: No
+ # Constraint: Must be less than rbd cache max dirty.
+ # (Default: 16 MiB)
+ ;rbd cache target dirty = 16777216
+
+ # The number of seconds dirty data is in the cache before writeback starts.
+ # Type: Float
+ # Required: No
+ # (Default: 1.0)
+ ;rbd cache max dirty age = 1.0
+
+ # Start out in write-through mode, and switch to write-back after the
+ # first flush request is received. Enabling this is a conservative but
+ # safe setting in case VMs running on rbd are too old to send flushes,
+ # like the virtio driver in Linux before 2.6.32.
+ # Type: Boolean
+ # Required: No
+ # (Default: true)
+ ;rbd cache writethrough until flush = true
+
+ # The Ceph admin socket allows you to query a daemon via a socket interface
+ # From a client perspective this can be a virtual machine using librbd
+ # Type: String
+ # Required: No
+ ;admin socket = /var/run/ceph/$cluster-$type.$id.$pid.$cctid.asok
+
+
+##################
+## radosgw client settings
+[client.radosgw.gateway]
+
+### http://docs.ceph.com/en/latest/radosgw/config-ref/
+
+ # Sets the location of the data files for Ceph Object Gateway.
+ # You must create the directory when deploying Ceph.
+ # We do not recommend changing the default.
+ # Type: String
+ # Default: /var/lib/ceph/radosgw/$cluster-$id
+ ;rgw data = /var/lib/ceph/radosgw/$name
+
+ # Client's hostname
+ ;host = ceph-radosgw
+
+ # where the radosgw keeps it's secret encryption keys
+ ;keyring = /etc/ceph/ceph.client.radosgw.keyring
+
+ # FastCgiExternalServer uses this socket.
+ # If you do not specify a socket path, Ceph Object Gateway will not run as an external server.
+ # The path you specify here must be the same as the path specified in the rgw.conf file.
+ # Type: String
+ # Default: None
+ ;rgw socket path = /var/run/ceph/ceph.radosgw.gateway.fastcgi.sock
+
+ # The location of the logging file for your radosgw.
+ # Type: String
+ # Required: No
+ # Default: /var/log/ceph/$cluster-$name.log
+ ;log file = /var/log/ceph/client.radosgw.gateway.log
+
+ # Enable 100-continue if it is operational.
+ # Type: Boolean
+ # Default: true
+ ;rgw print continue = false
+
+ # The DNS name of the served domain.
+ # Type: String
+ # Default: None
+ ;rgw dns name = radosgw.ceph.internal