summaryrefslogtreecommitdiffstats
path: root/src/sample.ceph.conf
blob: 611e3b41c02f82082c0765c5483b4157460df2cb (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
##
# Sample ceph ceph.conf file.
##
# This file defines cluster membership, the various locations
# that Ceph stores data, and any other runtime options.

# If a 'host' is defined for a daemon, the init.d start/stop script will
# verify that it matches the hostname (or else ignore it).  If it is
# not defined, it is assumed that the daemon is intended to start on
# the current host (e.g., in a setup with a startup.conf on each
# node).

## Metavariables
# $cluster    ; Expands to the Ceph Storage Cluster name. Useful
#             ; when running multiple Ceph Storage Clusters
#             ; on the same hardware.
#             ; Example: /etc/ceph/$cluster.keyring
#             ; (Default: ceph)
#
# $type       ; Expands to one of mds, osd, or mon, depending on
#             ; the type of the instant daemon.
#             ; Example: /var/lib/ceph/$type
#
# $id         ; Expands to the daemon identifier. For osd.0, this
#             ; would be 0; for mds.a, it would be a.
#             ; Example: /var/lib/ceph/$type/$cluster-$id
#
# $host       ; Expands to the host name of the instant daemon.
#
# $name       ; Expands to $type.$id.
#             ; Example: /var/run/ceph/$cluster-$name.asok

[global]
### http://docs.ceph.com/en/latest/rados/configuration/general-config-ref/

    ;fsid                       = {UUID}    # use `uuidgen` to generate your own UUID
    ;public network             = 192.168.0.0/24
    ;cluster network            = 192.168.0.0/24

    # Each running Ceph daemon has a running process identifier (PID) file.
    # The PID file is generated upon start-up.
    # Type: String (optional)
    # (Default: N/A). The default path is /var/run/$cluster/$name.pid.
    pid file                   = /var/run/ceph/$name.pid

    # If set, when the Ceph Storage Cluster starts, Ceph sets the max open fds
    # at the OS level (i.e., the max # of file descriptors).
    # It helps prevents Ceph OSD Daemons from running out of file descriptors.
    # Type: 64-bit Integer (optional)
    # (Default: 0)
    ;max open files             = 131072


### http://docs.ceph.com/en/latest/rados/operations/
### http://docs.ceph.com/en/latest/rados/configuration/auth-config-ref/

    # If enabled, the Ceph Storage Cluster daemons (i.e., ceph-mon, ceph-osd,
    # and ceph-mds) must authenticate with each other.
    # Type: String (optional); Valid settings are "cephx" or "none".
    # (Default: cephx)
    auth cluster required      = cephx

    # If enabled, the Ceph Storage Cluster daemons require Ceph Clients to
    # authenticate with the Ceph Storage Cluster in order to access Ceph
    # services.
    # Type: String (optional); Valid settings are "cephx" or "none".
    # (Default: cephx)
    auth service required      = cephx

    # If enabled, the Ceph Client requires the Ceph Storage Cluster to
    # authenticate with the Ceph Client.
    # Type: String (optional); Valid settings are "cephx" or "none".
    # (Default: cephx)
    auth client required       = cephx

    # If set to true, Ceph requires signatures on all message traffic between
    # the Ceph Client and the Ceph Storage Cluster, and between daemons
    # comprising the Ceph Storage Cluster.
    # Type: Boolean (optional)
    # (Default: false)
    ;cephx require signatures   = true

    # kernel RBD client do not support authentication yet:
    cephx cluster require signatures = true
    cephx service require signatures = false

    # The path to the keyring file.
    # Type: String (optional)
    # Default: /etc/ceph/$cluster.$name.keyring,/etc/ceph/$cluster.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin
    ;keyring                  = /etc/ceph/$cluster.$name.keyring


### http://docs.ceph.com/en/latest/rados/configuration/pool-pg-config-ref/


    ## Replication level, number of data copies.
    # Type: 32-bit Integer
    # (Default: 3)
    ;osd pool default size      = 3

    ## Replication level in degraded state, less than 'osd pool default size' value.
    # Sets the minimum number of written replicas for objects in the
    # pool in order to acknowledge a write operation to the client. If
    # minimum is not met, Ceph will not acknowledge the write to the
    # client. This setting ensures a minimum number of replicas when
    # operating in degraded mode.
    # Type: 32-bit Integer
    # (Default: 0), which means no particular minimum. If 0, minimum is size - (size / 2).
    ;osd pool default min size  = 2

    ## Ensure you have a realistic number of placement groups. We recommend
    ## approximately 100 per OSD. E.g., total number of OSDs multiplied by 100
    ## divided by the number of replicas (i.e., osd pool default size). So for
    ## 10 OSDs and osd pool default size = 3, we'd recommend approximately
    ## (100 * 10) / 3 = 333
    ## always round to the nearest power of 2

    # Description: The default number of placement groups for a pool. The
    #              default value is the same as pg_num with mkpool.
    # Type: 32-bit Integer
    # (Default: 8)
    ;osd pool default pg num    = 128

    # Description: The default number of placement groups for placement for a
    #              pool. The default value is the same as pgp_num with mkpool.
    #              PG and PGP should be equal (for now).
    # Type: 32-bit Integer
    # (Default: 8)
    ;osd pool default pgp num   = 128

    # The default CRUSH rule to use when creating a pool
    # Type: 32-bit Integer
    # (Default: 0)
    ;osd pool default crush rule = 0

    # The bucket type to use for chooseleaf in a CRUSH rule.
    # Uses ordinal rank rather than name.
    # Type: 32-bit Integer
    # (Default: 1) Typically a host containing one or more Ceph OSD Daemons.
    ;osd crush chooseleaf type = 1


### http://docs.ceph.com/en/latest/rados/troubleshooting/log-and-debug/

    # The location of the logging file for your cluster.
    # Type: String
    # Required: No
    # Default: /var/log/ceph/$cluster-$name.log
    ;log file                   = /var/log/ceph/$cluster-$name.log

    # Determines if logging messages should appear in syslog.
    # Type: Boolean
    # Required: No
    # (Default: false)
    ;log to syslog              = true


### http://docs.ceph.com/en/latest/rados/configuration/ms-ref/

    # Enable if you want your daemons to bind to IPv6 address instead of
    # IPv4 ones. (Not required if you specify a daemon or cluster IP.)
    # Type: Boolean
    # (Default: false)
    ;ms bind ipv6               = true

##################
## Monitors
## You need at least one. You need at least three if you want to
## tolerate any node failures. Always create an odd number.
[mon]
### http://docs.ceph.com/en/latest/rados/configuration/mon-config-ref/
### http://docs.ceph.com/en/latest/rados/configuration/mon-osd-interaction/

    # The IDs of initial monitors in a cluster during startup.
    # If specified, Ceph requires an odd number of monitors to form an
    # initial quorum (e.g., 3).
    # Type: String
    # (Default: None)
    ;mon initial members        = mycephhost

    ;mon host                   = cephhost01,cephhost02
    ;mon addr                   = 192.168.0.101,192.168.0.102

    # The monitor's data location
    # Default: /var/lib/ceph/mon/$cluster-$id
    ;mon data                   = /var/lib/ceph/mon/$name

    # The clock drift in seconds allowed between monitors.
    # Type: Float
    # (Default: .050)
    ;mon clock drift allowed    = .15

    # Exponential backoff for clock drift warnings
    # Type: Float
    # (Default: 5)
    ;mon clock drift warn backoff = 30    # Tell the monitor to backoff from this warning for 30 seconds

    # The percentage of disk space used before an OSD is considered full.
    # Type: Float
    # (Default: .95)
    ;mon osd full ratio         = .95

    # The percentage of disk space used before an OSD is considered nearfull.
    # Type: Float
    # (Default: .85)
    ;mon osd nearfull ratio     = .85

    # The number of seconds Ceph waits before marking a Ceph OSD
    # Daemon "down" and "out" if it doesn't respond.
    # Type: 32-bit Integer
    # (Default: 600)
    ;mon osd down out interval  = 600

    # The grace period in seconds before declaring unresponsive Ceph OSD
    # Daemons "down".
    # Type: 32-bit Integer
    # (Default: 900)
    ;mon osd report timeout          = 300

### http://docs.ceph.com/en/latest/rados/troubleshooting/log-and-debug/

    # logging, for debugging monitor crashes, in order of
    # their likelihood of being helpful :)
    ;debug ms                   = 1
    ;debug mon                  = 20
    ;debug paxos                = 20
    ;debug auth                 = 20


;[mon.alpha]
;    host                       = alpha
;    mon addr                   = 192.168.0.10:6789

;[mon.beta]
;    host                       = beta
;    mon addr                   = 192.168.0.11:6789

;[mon.gamma]
;    host                       = gamma
;    mon addr                   = 192.168.0.12:6789


##################
## Metadata servers
# You must deploy at least one metadata server to use CephFS. There is
# experimental support for running multiple metadata servers. Do not run
# multiple metadata servers in production.
[mds]
### http://docs.ceph.com/en/latest/cephfs/mds-config-ref/

    # where the mds keeps it's secret encryption keys
    ;keyring                    = /var/lib/ceph/mds/$name/keyring

    # Determines whether a 'ceph-mds' daemon should poll and
    # replay the log of an active MDS (hot standby).
    # Type:  Boolean
    # (Default: false)
    ;mds standby replay          = true

    ; mds logging to debug issues.
    ;debug ms                   = 1
    ;debug mds                  = 20
    ;debug journaler            = 20

    # The memory limit the MDS should enforce for its cache.
    # (Default: 1G)
    ;mds cache memory limit     = 2G

;[mds.alpha]
;    host                       = alpha

;[mds.beta]
;    host                       = beta

##################
## osd
# You need at least one.  Two or more if you want data to be replicated.
# Define as many as you like.
[osd]
### http://docs.ceph.com/en/latest/rados/configuration/osd-config-ref/

    # The path to the OSDs data.
    # You must create the directory when deploying Ceph.
    # You should mount a drive for OSD data at this mount point.
    # We do not recommend changing the default.
    # Type: String
    # Default: /var/lib/ceph/osd/$cluster-$id
    ;osd data                     = /var/lib/ceph/osd/$name

    ## You can change the number of recovery operations to speed up recovery
    ## or slow it down if your machines can't handle it

    # The number of active recovery requests per OSD at one time.
    # More requests will accelerate recovery, but the requests
    # places an increased load on the cluster.
    # Type: 32-bit Integer
    # (Default: 5)
    ;osd recovery max active      = 3

    # The maximum number of backfills allowed to or from a single OSD.
    # Type: 64-bit Integer
    # (Default: 10)
    ;osd max backfills            = 5

    # The maximum number of simultaneous scrub operations for a Ceph OSD Daemon.
    # Type: 32-bit Int
    # (Default: 1)
    ;osd max scrubs               = 2

    # You may add settings for ceph-deploy so that it will create and mount
    # the correct type of file system. Remove the comment `#` character for
    # the following settings and replace the values in parenthesis
    # with appropriate values, or leave the following settings commented
    # out to accept the default values.

    #osd mkfs type = {fs-type}
    #osd mkfs options {fs-type}   = {mkfs options}   # default for xfs is "-f"
    #osd mount options {fs-type}  = {mount options} # default mount option is "rw, noatime"
    ;osd mkfs type                = btrfs
    ;osd mount options btrfs      = noatime,nodiratime

    ## Ideally, make this a separate disk or partition.  A few
    ## hundred MB should be enough; more if you have fast or many
    ## disks.  You can use a file under the osd data dir if need be
    ## (e.g. /data/$name/journal), but it will be slower than a
    ## separate disk or partition.
    # The path to the OSD's journal. This may be a path to a file or a block
    # device (such as a partition of an SSD). If it is a file, you must
    # create the directory to contain it.
    # We recommend using a drive separate from the osd data drive.
    # Type: String
    # Default: /var/lib/ceph/osd/$cluster-$id/journal
    ;osd journal                  = /var/lib/ceph/osd/$name/journal

    # Check log files for corruption. Can be computationally expensive.
    # Type: Boolean
    # (Default: false)
    ;osd check for log corruption = true

### http://docs.ceph.com/en/latest/rados/configuration/journal-ref/

    # The size of the journal in megabytes. If this is 0,
    # and the journal is a block device, the entire block device is used.
    # Since v0.54, this is ignored if the journal is a block device,
    # and the entire block device is used.
    # Type: 32-bit Integer
    # (Default: 5120)
    # Recommended: Begin with 1GB. Should be at least twice the product
    # of the expected speed multiplied by "filestore max sync interval".
    ;osd journal size             = 2048     ; journal size, in megabytes

    ## If you want to run the journal on a tmpfs, disable DirectIO
    # Enables direct i/o to the journal.
    # Requires "journal block align" set to "true".
    # Type: Boolean
    # Required: Yes when using aio.
    # (Default: true)
    ;journal dio                  = false

    # osd logging to debug osd issues, in order of likelihood of being helpful
    ;debug ms                     = 1
    ;debug osd                    = 20
    ;debug filestore              = 20
    ;debug journal                = 20

### http://docs.ceph.com/en/latest/rados/configuration/filestore-config-ref/

    # The maximum interval in seconds for synchronizing the filestore.
    # Type: Double (optional)
    # (Default: 5)
    ;filestore max sync interval = 5

    # Enable snapshots for a btrfs filestore.
    # Type: Boolean
    # Required: No. Only used for btrfs.
    # (Default: true)
    ;filestore btrfs snap        = false

    # Enables the filestore flusher.
    # Type: Boolean
    # Required: No
    # (Default: false)
    ;filestore flusher            = true

    # Defines the maximum number of in progress operations the file store
    # accepts before blocking on queuing new operations.
    # Type: Integer
    # Required: No. Minimal impact on performance.
    # (Default: 500)
    ;filestore queue max ops      = 500

    ## Filestore and OSD settings can be tweak to achieve better performance

### http://docs.ceph.com/en/latest/rados/configuration/filestore-config-ref/#misc

    # Min number of files in a subdir before merging into parent NOTE: A negative value means to disable subdir merging
    # Type: Integer
    # Required: No
    # Default:  -10
    ;filestore merge threshold    = -10

    # filestore_split_multiple * abs(filestore_merge_threshold) * 16 is the maximum number of files in a subdirectory before splitting into child directories.
    # Type: Integer
    # Required: No
    # Default:  2
    ;filestore split multiple     = 2

    # The number of filesystem operation threads that execute in parallel.
    # Type: Integer
    # Required: No
    # Default:  2
    ;filestore op threads         = 4

    ## CRUSH

    # By default OSDs update their details (location, weight and root) on the CRUSH map during startup
    # Type: Boolean
    # Required: No;
    # (Default: true)
    ;osd crush update on start    = false

;[osd.0]
;    host                         = delta

;[osd.1]
;    host                         = epsilon

;[osd.2]
;    host                         = zeta

;[osd.3]
;    host                         = eta


##################
## client settings
[client]

### http://docs.ceph.com/en/latest/rbd/rbd-config-ref/

    # Enable caching for RADOS Block Device (RBD).
    # Type: Boolean
    # Required: No
    # (Default: true)
    rbd cache                           = true

    # The RBD cache size in bytes.
    # Type: 64-bit Integer
    # Required: No
    # (Default: 32 MiB)
    ;rbd cache size                     = 33554432

    # The dirty limit in bytes at which the cache triggers write-back.
    # If 0, uses write-through caching.
    # Type: 64-bit Integer
    # Required: No
    # Constraint: Must be less than rbd cache size.
    # (Default: 24 MiB)
    ;rbd cache max dirty                = 25165824

    # The dirty target before the cache begins writing data to the data storage.
    # Does not block writes to the cache.
    # Type: 64-bit Integer
    # Required: No
    # Constraint: Must be less than rbd cache max dirty.
    # (Default: 16 MiB)
    ;rbd cache target dirty             = 16777216

    # The number of seconds dirty data is in the cache before writeback starts.
    # Type: Float
    # Required: No
    # (Default: 1.0)
    ;rbd cache max dirty age            = 1.0

    # Start out in write-through mode, and switch to write-back after the
    # first flush request is received. Enabling this is a conservative but
    # safe setting in case VMs running on rbd are too old to send flushes,
    # like the virtio driver in Linux before 2.6.32.
    # Type: Boolean
    # Required: No
    # (Default: true)
    ;rbd cache writethrough until flush = true

    # The Ceph admin socket allows you to query a daemon via a socket interface
    # From a client perspective this can be a virtual machine using librbd
    # Type: String
    # Required: No
    ;admin socket                       = /var/run/ceph/$cluster-$type.$id.$pid.$cctid.asok


##################
## radosgw client settings
[client.radosgw.gateway]

### http://docs.ceph.com/en/latest/radosgw/config-ref/

    # Sets the location of the data files for Ceph Object Gateway.
    # You must create the directory when deploying Ceph.
    # We do not recommend changing the default.
    # Type: String
    # Default: /var/lib/ceph/radosgw/$cluster-$id
    ;rgw data                     = /var/lib/ceph/radosgw/$name

    # Client's hostname
    ;host                         =  ceph-radosgw

    # where the radosgw keeps it's secret encryption keys
    ;keyring                      = /etc/ceph/ceph.client.radosgw.keyring

    # FastCgiExternalServer uses this socket.
    # If you do not specify a socket path, Ceph Object Gateway will not run as an external server.
    # The path you specify here must be the same as the path specified in the rgw.conf file.
    # Type: String
    # Default: None
    ;rgw socket path              = /var/run/ceph/ceph.radosgw.gateway.fastcgi.sock

    # The location of the logging file for your radosgw.
    # Type: String
    # Required: No
    # Default: /var/log/ceph/$cluster-$name.log
    ;log file                     = /var/log/ceph/client.radosgw.gateway.log

    # Enable 100-continue if it is operational.
    # Type: Boolean
    # Default:  true
    ;rgw print continue           = false

    # The DNS name of the served domain.
    # Type: String
    # Default:  None
    ;rgw dns name                 = radosgw.ceph.internal