summaryrefslogtreecommitdiffstats
path: root/doc/man
diff options
context:
space:
mode:
Diffstat (limited to 'doc/man')
-rw-r--r--doc/man/8/CMakeLists.txt89
-rw-r--r--doc/man/8/ceph-authtool.rst206
-rw-r--r--doc/man/8/ceph-bluestore-tool.rst212
-rw-r--r--doc/man/8/ceph-clsinfo.rst49
-rw-r--r--doc/man/8/ceph-conf.rst149
-rw-r--r--doc/man/8/ceph-create-keys.rst67
-rw-r--r--doc/man/8/ceph-debugpack.rst50
-rw-r--r--doc/man/8/ceph-dencoder.rst151
-rw-r--r--doc/man/8/ceph-deploy.rst529
-rw-r--r--doc/man/8/ceph-diff-sorted.rst71
-rw-r--r--doc/man/8/ceph-fuse.rst85
-rw-r--r--doc/man/8/ceph-immutable-object-cache.rst76
-rw-r--r--doc/man/8/ceph-kvstore-tool.rst98
-rw-r--r--doc/man/8/ceph-mds.rst82
-rw-r--r--doc/man/8/ceph-mon.rst99
-rw-r--r--doc/man/8/ceph-objectstore-tool.rst488
-rw-r--r--doc/man/8/ceph-osd.rst140
-rw-r--r--doc/man/8/ceph-post-file.rst71
-rw-r--r--doc/man/8/ceph-rbdnamer.rst36
-rw-r--r--doc/man/8/ceph-run.rst45
-rw-r--r--doc/man/8/ceph-syn.rst99
-rw-r--r--doc/man/8/ceph-volume-systemd.rst55
-rw-r--r--doc/man/8/ceph-volume.rst425
-rw-r--r--doc/man/8/ceph.rst1649
-rw-r--r--doc/man/8/cephadm.rst526
-rw-r--r--doc/man/8/cephfs-mirror.rst66
-rw-r--r--doc/man/8/cephfs-top.rst121
-rw-r--r--doc/man/8/crushtool.rst302
-rw-r--r--doc/man/8/librados-config.rst46
-rw-r--r--doc/man/8/monmaptool.rst140
-rw-r--r--doc/man/8/mount.ceph.rst256
-rw-r--r--doc/man/8/mount.fuse.ceph.rst71
-rw-r--r--doc/man/8/osdmaptool.rst331
-rw-r--r--doc/man/8/rados.rst404
-rw-r--r--doc/man/8/radosgw-admin.rst1023
-rw-r--r--doc/man/8/radosgw.rst253
-rw-r--r--doc/man/8/rbd-fuse.rst61
-rw-r--r--doc/man/8/rbd-ggate.rst79
-rw-r--r--doc/man/8/rbd-mirror.rst75
-rw-r--r--doc/man/8/rbd-nbd.rst89
-rw-r--r--doc/man/8/rbd-replay-many.rst73
-rw-r--r--doc/man/8/rbd-replay-prep.rst55
-rw-r--r--doc/man/8/rbd-replay.rst78
-rw-r--r--doc/man/8/rbd.rst1036
-rw-r--r--doc/man/8/rbdmap.rst128
-rw-r--r--doc/man/8/rgw-orphan-list.rst69
-rw-r--r--doc/man/CMakeLists.txt15
47 files changed, 10318 insertions, 0 deletions
diff --git a/doc/man/8/CMakeLists.txt b/doc/man/8/CMakeLists.txt
new file mode 100644
index 000000000..8e1b9373f
--- /dev/null
+++ b/doc/man/8/CMakeLists.txt
@@ -0,0 +1,89 @@
+set(client_srcs
+ ceph-syn.rst
+ ceph-conf.rst
+ ceph.rst
+ ceph-authtool.rst
+ ceph-kvstore-tool.rst
+ rados.rst
+ ceph-post-file.rst
+ ceph-dencoder.rst)
+
+set(server_srcs
+ ceph-deploy.rst
+ crushtool.rst
+ ceph-run.rst
+ mount.ceph.rst
+ mount.fuse.ceph.rst
+ ceph-create-keys.rst)
+if(WITH_TESTS)
+list(APPEND server_srcs
+ ceph-debugpack.rst)
+endif(WITH_TESTS)
+
+set(osd_srcs
+ ceph-clsinfo.rst
+ ceph-volume.rst
+ ceph-volume-systemd.rst
+ ceph-osd.rst
+ osdmaptool.rst
+ ceph-bluestore-tool.rst)
+
+set(mon_srcs
+ ceph-mon.rst
+ monmaptool.rst)
+
+list(APPEND man_srcs
+ ${client_srcs}
+ ${server_srcs}
+ ${osd_srcs}
+ ${mon_srcs}
+ ceph-mds.rst
+ cephfs-top.rst
+ librados-config.rst
+ cephadm.rst
+ cephfs-mirror.rst)
+
+if(HAVE_LIBFUSE)
+ list(APPEND man_srcs
+ ceph-fuse.rst
+ rbd-fuse.rst)
+endif()
+
+if(WITH_RADOSGW)
+ list(APPEND man_srcs
+ radosgw.rst
+ radosgw-admin.rst
+ rgw-orphan-list.rst
+ ceph-diff-sorted.rst)
+endif()
+
+if(WITH_RBD)
+ list(APPEND man_srcs
+ ceph-rbdnamer.rst
+ rbd-mirror.rst
+ rbd-replay-many.rst
+ rbd-replay-prep.rst
+ rbd-replay.rst
+ rbdmap.rst
+ rbd.rst)
+ if(LINUX)
+ list(APPEND man_srcs rbd-nbd.rst)
+ endif()
+ if(FREEBSD)
+ list(APPEND man_srcs rbd-ggate.rst)
+ endif()
+endif()
+
+list(APPEND man_srcs ceph-immutable-object-cache.rst)
+foreach(man ${man_srcs})
+ list(APPEND sphinx_input ${CMAKE_CURRENT_SOURCE_DIR}/${man})
+ # mount.ceph.rst => mount if we use
+ # get_filename_component(cmd ${man} NAME_WE)
+ string(REGEX REPLACE ".rst$" "" cmd ${man})
+ list(APPEND sphinx_output ${sphinx_output_dir}/${cmd}.8)
+ install(FILES ${sphinx_output_dir}/${cmd}.8
+ DESTINATION ${CEPH_MAN_DIR}/man8)
+endforeach()
+
+set(sphinx_input ${sphinx_input} PARENT_SCOPE)
+set(sphinx_output ${sphinx_output} PARENT_SCOPE)
diff --git a/doc/man/8/ceph-authtool.rst b/doc/man/8/ceph-authtool.rst
new file mode 100644
index 000000000..af9ee7123
--- /dev/null
+++ b/doc/man/8/ceph-authtool.rst
@@ -0,0 +1,206 @@
+:orphan:
+
+=================================================
+ ceph-authtool -- ceph keyring manipulation tool
+=================================================
+
+.. program:: ceph-authtool
+
+Synopsis
+========
+
+| **ceph-authtool** *keyringfile*
+ [ -l | --list ]
+ [ -p | --print-key ]
+ [ -C | --create-keyring ]
+ [ -g | --gen-key ]
+ [ --gen-print-key ]
+ [ --import-keyring *otherkeyringfile* ]
+ [ -n | --name *entityname* ]
+ [ -a | --add-key *base64_key* ]
+ [ --cap *subsystem* *capability* ]
+ [ --caps *capfile* ]
+ [ --mode *mode* ]
+
+
+Description
+===========
+
+**ceph-authtool** is a utility to create, view, and modify a Ceph keyring
+file. A keyring file stores one or more Ceph authentication keys and
+possibly an associated capability specification. Each key is
+associated with an entity name, of the form
+``{client,mon,mds,osd}.name``.
+
+**WARNING** Ceph provides authentication and protection against
+man-in-the-middle attacks once secret keys are in place. However,
+data over the wire is not encrypted, which may include the messages
+used to configure said keys. The system is primarily intended to be
+used in trusted environments.
+
+Options
+=======
+
+.. option:: -l, --list
+
+ will list all keys and capabilities present in the keyring
+
+.. option:: -p, --print-key
+
+ will print an encoded key for the specified entityname. This is
+ suitable for the ``mount -o secret=`` argument
+
+.. option:: -C, --create-keyring
+
+ will create a new keyring, overwriting any existing keyringfile
+
+.. option:: -g, --gen-key
+
+ will generate a new secret key for the specified entityname
+
+.. option:: --gen-print-key
+
+ will generate a new secret key for the specified entityname,
+ without altering the keyringfile, printing the secret to stdout
+
+.. option:: --import-keyring *secondkeyringfile*
+
+ will import the content of a given keyring to the keyringfile
+
+.. option:: -n, --name *name*
+
+ specify entityname to operate on
+
+.. option:: -a, --add-key *base64_key*
+
+ will add an encoded key to the keyring
+
+.. option:: --cap *subsystem* *capability*
+
+ will set the capability for given subsystem
+
+.. option:: --caps *capsfile*
+
+ will set all of capabilities associated with a given key, for all subsystems
+
+ .. option:: --mode *mode*
+
+ will set the desired file mode to the keyring e.g: 0644, defaults to 0600
+
+
+Capabilities
+============
+
+The subsystem is the name of a Ceph subsystem: ``mon``, ``mds``, or
+``osd``.
+
+The capability is a string describing what the given user is allowed
+to do. This takes the form of a comma separated list of allow
+clauses with a permission specifier containing one or more of rwx for
+read, write, and execute permission. The ``allow *`` grants full
+superuser permissions for the given subsystem.
+
+For example::
+
+ # can read, write, and execute objects
+ osd = "allow rwx"
+
+ # can access mds server
+ mds = "allow"
+
+ # can modify cluster state (i.e., is a server daemon)
+ mon = "allow rwx"
+
+A librados user restricted to a single pool might look like::
+
+ mon = "allow r"
+
+ osd = "allow rw pool foo"
+
+A client using rbd with read access to one pool and read/write access to another::
+
+ mon = "allow r"
+
+ osd = "allow class-read object_prefix rbd_children, allow pool templates r class-read, allow pool vms rwx"
+
+A client mounting the file system with minimal permissions would need caps like::
+
+ mds = "allow"
+
+ osd = "allow rw pool data"
+
+ mon = "allow r"
+
+
+OSD Capabilities
+================
+
+In general, an osd capability follows the grammar::
+
+ osdcap := grant[,grant...]
+ grant := allow (match capspec | capspec match)
+ match := [ pool[=]<poolname> | object_prefix <prefix>
+ | namespace[=]<rados-namespace>
+ | tag <application-name> <key>=<value> ]
+ capspec := * | [r][w][x] [class-read] [class-write]
+
+The capspec determines what kind of operations the entity can perform::
+
+ r = read access to objects
+ w = write access to objects
+ x = can call any class method (same as class-read class-write)
+ class-read = can call class methods that are reads
+ class-write = can call class methods that are writes
+ * or "all" = equivalent to rwx, plus the ability to run osd admin commands,
+ i.e. ceph osd tell ...
+
+The match criteria restrict a grant based on the pool being accessed.
+Grants are additive if the client fulfills the match condition. For
+example, if a client has the osd capabilities: "allow r object_prefix
+prefix, allow w pool foo, allow x pool bar", then it has rw access to
+pool foo, rx access to pool bar, and r access to objects whose
+names begin with 'prefix' in any pool.
+
+Caps file format
+================
+
+The caps file format consists of zero or more key/value pairs, one per
+line. The key and value are separated by an ``=``, and the value must
+be quoted (with ``'`` or ``"``) if it contains any whitespace. The key
+is the name of the Ceph subsystem (``osd``, ``mds``, ``mon``), and the
+value is the capability string (see above).
+
+
+Example
+=======
+
+To create a new keyring containing a key for client.foo with a 0644 file mode::
+
+ ceph-authtool -C -n client.foo --gen-key keyring --mode 0644
+
+To associate some capabilities with the key (namely, the ability to
+mount a Ceph file system)::
+
+ ceph-authtool -n client.foo --cap mds 'allow' --cap osd 'allow rw pool=data' --cap mon 'allow r' keyring
+
+To display the contents of the keyring::
+
+ ceph-authtool -l keyring
+
+When mounting a Ceph file system, you can grab the appropriately encoded secret key with::
+
+ mount -t ceph serverhost:/ mountpoint -o name=foo,secret=`ceph-authtool -p -n client.foo keyring`
+
+
+Availability
+============
+
+**ceph-authtool** is part of Ceph, a massively scalable, open-source, distributed storage system. Please
+refer to the Ceph documentation at http://ceph.com/docs for more
+information.
+
+
+See also
+========
+
+:doc:`ceph <ceph>`\(8)
diff --git a/doc/man/8/ceph-bluestore-tool.rst b/doc/man/8/ceph-bluestore-tool.rst
new file mode 100644
index 000000000..bb67ccc71
--- /dev/null
+++ b/doc/man/8/ceph-bluestore-tool.rst
@@ -0,0 +1,212 @@
+:orphan:
+
+======================================================
+ ceph-bluestore-tool -- bluestore administrative tool
+======================================================
+
+.. program:: ceph-bluestore-tool
+
+Synopsis
+========
+
+| **ceph-bluestore-tool** *command*
+ [ --dev *device* ... ]
+ [ --path *osd path* ]
+ [ --out-dir *dir* ]
+ [ --log-file | -l *filename* ]
+ [ --deep ]
+| **ceph-bluestore-tool** fsck|repair --path *osd path* [ --deep ]
+| **ceph-bluestore-tool** show-label --dev *device* ...
+| **ceph-bluestore-tool** prime-osd-dir --dev *device* --path *osd path*
+| **ceph-bluestore-tool** bluefs-export --path *osd path* --out-dir *dir*
+| **ceph-bluestore-tool** bluefs-bdev-new-wal --path *osd path* --dev-target *new-device*
+| **ceph-bluestore-tool** bluefs-bdev-new-db --path *osd path* --dev-target *new-device*
+| **ceph-bluestore-tool** bluefs-bdev-migrate --path *osd path* --dev-target *new-device* --devs-source *device1* [--devs-source *device2*]
+| **ceph-bluestore-tool** free-dump|free-score --path *osd path* [ --allocator block/bluefs-wal/bluefs-db/bluefs-slow ]
+| **ceph-bluestore-tool** reshard --path *osd path* --sharding *new sharding* [ --sharding-ctrl *control string* ]
+| **ceph-bluestore-tool** show-sharding --path *osd path*
+
+
+Description
+===========
+
+**ceph-bluestore-tool** is a utility to perform low-level administrative
+operations on a BlueStore instance.
+
+Commands
+========
+
+:command:`help`
+
+ show help
+
+:command:`fsck` [ --deep ]
+
+ run consistency check on BlueStore metadata. If *--deep* is specified, also read all object data and verify checksums.
+
+:command:`repair`
+
+ Run a consistency check *and* repair any errors we can.
+
+:command:`bluefs-export`
+
+ Export the contents of BlueFS (i.e., RocksDB files) to an output directory.
+
+:command:`bluefs-bdev-sizes` --path *osd path*
+
+ Print the device sizes, as understood by BlueFS, to stdout.
+
+:command:`bluefs-bdev-expand` --path *osd path*
+
+ Instruct BlueFS to check the size of its block devices and, if they have
+ expanded, make use of the additional space. Please note that only the new
+ files created by BlueFS will be allocated on the preferred block device if
+ it has enough free space, and the existing files that have spilled over to
+ the slow device will be gradually removed when RocksDB performs compaction.
+ In other words, if there is any data spilled over to the slow device, it
+ will be moved to the fast device over time.
+
+:command:`bluefs-bdev-new-wal` --path *osd path* --dev-target *new-device*
+
+ Adds WAL device to BlueFS, fails if WAL device already exists.
+
+:command:`bluefs-bdev-new-db` --path *osd path* --dev-target *new-device*
+
+ Adds DB device to BlueFS, fails if DB device already exists.
+
+:command:`bluefs-bdev-migrate` --dev-target *new-device* --devs-source *device1* [--devs-source *device2*]
+
+ Moves BlueFS data from source device(s) to the target one, source devices
+ (except the main one) are removed on success. Target device can be both
+ already attached or new device. In the latter case it's added to OSD
+ replacing one of the source devices. Following replacement rules apply
+ (in the order of precedence, stop on the first match):
+
+ - if source list has DB volume - target device replaces it.
+ - if source list has WAL volume - target device replace it.
+ - if source list has slow volume only - operation isn't permitted, requires explicit allocation via new-db/new-wal command.
+
+:command:`show-label` --dev *device* [...]
+
+ Show device label(s).
+
+:command:`free-dump` --path *osd path* [ --allocator block/bluefs-wal/bluefs-db/bluefs-slow ]
+
+ Dump all free regions in allocator.
+
+:command:`free-score` --path *osd path* [ --allocator block/bluefs-wal/bluefs-db/bluefs-slow ]
+
+ Give a [0-1] number that represents quality of fragmentation in allocator.
+ 0 represents case when all free space is in one chunk. 1 represents worst possible fragmentation.
+
+:command:`reshard` --path *osd path* --sharding *new sharding* [ --resharding-ctrl *control string* ]
+
+ Changes sharding of BlueStore's RocksDB. Sharding is build on top of RocksDB column families.
+ This option allows to test performance of *new sharding* without need to redeploy OSD.
+ Resharding is usually a long process, which involves walking through entire RocksDB key space
+ and moving some of them to different column families.
+ Option --resharding-ctrl provides performance control over resharding process.
+ Interrupted resharding will prevent OSD from running.
+ Interrupted resharding does not corrupt data. It is always possible to continue previous resharding,
+ or select any other sharding scheme, including reverting to original one.
+
+:command:`show-sharding` --path *osd path*
+
+ Show sharding that is currently applied to BlueStore's RocksDB.
+
+Options
+=======
+
+.. option:: --dev *device*
+
+ Add *device* to the list of devices to consider
+
+.. option:: --devs-source *device*
+
+ Add *device* to the list of devices to consider as sources for migrate operation
+
+.. option:: --dev-target *device*
+
+ Specify target *device* migrate operation or device to add for adding new DB/WAL.
+
+.. option:: --path *osd path*
+
+ Specify an osd path. In most cases, the device list is inferred from the symlinks present in *osd path*. This is usually simpler than explicitly specifying the device(s) with --dev.
+
+.. option:: --out-dir *dir*
+
+ Output directory for bluefs-export
+
+.. option:: -l, --log-file *log file*
+
+ file to log to
+
+.. option:: --log-level *num*
+
+ debug log level. Default is 30 (extremely verbose), 20 is very
+ verbose, 10 is verbose, and 1 is not very verbose.
+
+.. option:: --deep
+
+ deep scrub/repair (read and validate object data, not just metadata)
+
+.. option:: --allocator *name*
+
+ Useful for *free-dump* and *free-score* actions. Selects allocator(s).
+
+.. option:: --resharding-ctrl *control string*
+
+ Provides control over resharding process. Specifies how often refresh RocksDB iterator,
+ and how large should commit batch be before committing to RocksDB. Option format is:
+ <iterator_refresh_bytes>/<iterator_refresh_keys>/<batch_commit_bytes>/<batch_commit_keys>
+ Default: 10000000/10000/1000000/1000
+
+Device labels
+=============
+
+Every BlueStore block device has a single block label at the beginning of the
+device. You can dump the contents of the label with::
+
+ ceph-bluestore-tool show-label --dev *device*
+
+The main device will have a lot of metadata, including information
+that used to be stored in small files in the OSD data directory. The
+auxiliary devices (db and wal) will only have the minimum required
+fields (OSD UUID, size, device type, birth time).
+
+OSD directory priming
+=====================
+
+You can generate the content for an OSD data directory that can start up a
+BlueStore OSD with the *prime-osd-dir* command::
+
+ ceph-bluestore-tool prime-osd-dir --dev *main device* --path /var/lib/ceph/osd/ceph-*id*
+
+BlueFS log rescue
+=====================
+
+Some versions of BlueStore were susceptible to BlueFS log growing extremaly large -
+beyond the point of making booting OSD impossible. This state is indicated by
+booting that takes very long and fails in _replay function.
+
+This can be fixed by::
+ ceph-bluestore-tool fsck --path *osd path* --bluefs_replay_recovery=true
+
+It is advised to first check if rescue process would be successfull::
+ ceph-bluestore-tool fsck --path *osd path* \
+ --bluefs_replay_recovery=true --bluefs_replay_recovery_disable_compact=true
+
+If above fsck is successful fix procedure can be applied.
+
+Availability
+============
+
+**ceph-bluestore-tool** is part of Ceph, a massively scalable,
+open-source, distributed storage system. Please refer to the Ceph
+documentation at http://ceph.com/docs for more information.
+
+
+See also
+========
+
+:doc:`ceph-osd <ceph-osd>`\(8)
diff --git a/doc/man/8/ceph-clsinfo.rst b/doc/man/8/ceph-clsinfo.rst
new file mode 100644
index 000000000..0188ce131
--- /dev/null
+++ b/doc/man/8/ceph-clsinfo.rst
@@ -0,0 +1,49 @@
+:orphan:
+
+===============================================
+ ceph-clsinfo -- show class object information
+===============================================
+
+.. program:: ceph-clsinfo
+
+Synopsis
+========
+
+| **ceph-clsinfo** [ *options* ] ... *filename*
+
+
+Description
+===========
+
+**ceph-clsinfo** can show name, version, and architecture information
+about a specific class object.
+
+
+Options
+=======
+
+.. option:: -n, --name
+
+ Shows the class name
+
+.. option:: -v, --version
+
+ Shows the class version
+
+.. option:: -a, --arch
+
+ Shows the class architecture
+
+
+Availability
+============
+
+**ceph-clsinfo** is part of Ceph, a massively scalable, open-source, distributed storage system. Please
+refer to the Ceph documentation at http://ceph.com/docs for more
+information.
+
+
+See also
+========
+
+:doc:`ceph <ceph>`\(8)
diff --git a/doc/man/8/ceph-conf.rst b/doc/man/8/ceph-conf.rst
new file mode 100644
index 000000000..b8b99c2aa
--- /dev/null
+++ b/doc/man/8/ceph-conf.rst
@@ -0,0 +1,149 @@
+:orphan:
+
+==================================
+ ceph-conf -- ceph conf file tool
+==================================
+
+.. program:: ceph-conf
+
+Synopsis
+========
+
+| **ceph-conf** -c *conffile* --list-all-sections
+| **ceph-conf** -c *conffile* -L
+| **ceph-conf** -c *conffile* -l *prefix*
+| **ceph-conf** *key* -s *section1* ...
+| **ceph-conf** [-s *section* ] [-r] --lookup *key*
+| **ceph-conf** [-s *section* ] *key*
+
+
+Description
+===========
+
+**ceph-conf** is a utility for getting information from a ceph
+configuration file. As with most Ceph programs, you can specify which
+Ceph configuration file to use with the ``-c`` flag.
+
+Note that unlike other ceph tools, **ceph-conf** will *only* read from
+config files (or return compiled-in default values)--it will *not*
+fetch config values from the monitor cluster. For this reason it is
+recommended that **ceph-conf** only be used in legacy environments
+that are strictly config-file based. New deployments and tools should
+instead rely on either querying the monitor explicitly for
+configuration (e.g., ``ceph config get <daemon> <option>``) or use
+daemons themselves to fetch effective config options (e.g.,
+``ceph-osd -i 123 --show-config-value osd_data``). The latter option
+has the advantages of drawing from compiled-in defaults (which
+occasionally vary between daemons), config files, and the monitor's
+config database, providing the exact value that that daemon would be
+using if it were started.
+
+Actions
+=======
+
+**ceph-conf** performs one of the following actions:
+
+.. option:: -L, --list-all-sections
+
+ list all sections in the configuration file.
+
+.. option:: -l, --list-sections *prefix*
+
+ list the sections with the given *prefix*. For example, ``--list-sections mon``
+ would list all sections beginning with ``mon``.
+
+.. option:: --lookup *key*
+
+ search and print the specified configuration setting. Note: ``--lookup`` is
+ the default action. If no other actions are given on the command line, we will
+ default to doing a lookup.
+
+.. option:: -h, --help
+
+ print a summary of usage.
+
+
+Options
+=======
+
+.. option:: -c *conffile*
+
+ the Ceph configuration file.
+
+.. option:: --filter-key *key*
+
+ filter section list to only include sections with given *key* defined.
+
+.. option:: --filter-key-value *key* ``=`` *value*
+
+ filter section list to only include sections with given *key*/*value* pair.
+
+.. option:: --name *type.id*
+
+ the Ceph name in which the sections are searched (default 'client.admin').
+ For example, if we specify ``--name osd.0``, the following sections will be
+ searched: [osd.0], [osd], [global]
+
+.. option:: --pid *pid*
+
+ override the ``$pid`` when expanding options. For example, if an option is
+ configured like ``/var/log/$name.$pid.log``, the ``$pid`` portion in its
+ value will be substituded using the PID of **ceph-conf** instead of the
+ PID of the process specfied using the ``--name`` option.
+
+.. option:: -r, --resolve-search
+
+ search for the first file that exists and can be opened in the resulted
+ comma delimited search list.
+
+.. option:: -s, --section
+
+ additional sections to search. These additional sections will be searched
+ before the sections that would normally be searched. As always, the first
+ matching entry we find will be returned.
+
+
+Examples
+========
+
+To find out what value osd 0 will use for the "osd data" option::
+
+ ceph-conf -c foo.conf --name osd.0 --lookup "osd data"
+
+To find out what value will mds a use for the "log file" option::
+
+ ceph-conf -c foo.conf --name mds.a "log file"
+
+To list all sections that begin with "osd"::
+
+ ceph-conf -c foo.conf -l osd
+
+To list all sections::
+
+ ceph-conf -c foo.conf -L
+
+To print the path of the "keyring" used by "client.0"::
+
+ ceph-conf --name client.0 -r -l keyring
+
+
+Files
+=====
+
+``/etc/ceph/$cluster.conf``, ``~/.ceph/$cluster.conf``, ``$cluster.conf``
+
+the Ceph configuration files to use if not specified.
+
+
+Availability
+============
+
+**ceph-conf** is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer
+to the Ceph documentation at http://ceph.com/docs for more
+information.
+
+
+See also
+========
+
+:doc:`ceph <ceph>`\(8),
diff --git a/doc/man/8/ceph-create-keys.rst b/doc/man/8/ceph-create-keys.rst
new file mode 100644
index 000000000..20b6560b2
--- /dev/null
+++ b/doc/man/8/ceph-create-keys.rst
@@ -0,0 +1,67 @@
+:orphan:
+
+===============================================
+ceph-create-keys -- ceph keyring generate tool
+===============================================
+
+.. program:: ceph-create-keys
+
+Synopsis
+========
+
+| **ceph-create-keys** [-h] [-v] [-t seconds] [--cluster *name*] --id *id*
+
+
+Description
+===========
+
+:program:`ceph-create-keys` is a utility to generate bootstrap keyrings using
+the given monitor when it is ready.
+
+It creates following auth entities (or users)
+
+``client.admin``
+
+ and its key for your client host.
+
+``client.bootstrap-{osd, rgw, mds}``
+
+ and their keys for bootstrapping corresponding services
+
+To list all users in the cluster::
+
+ ceph auth ls
+
+
+Options
+=======
+
+.. option:: --cluster
+
+ name of the cluster (default 'ceph').
+
+.. option:: -t
+
+ time out after **seconds** (default: 600) waiting for a response from the monitor
+
+.. option:: -i, --id
+
+ id of a ceph-mon that is coming up. **ceph-create-keys** will wait until it joins quorum.
+
+.. option:: -v, --verbose
+
+ be more verbose.
+
+
+Availability
+============
+
+**ceph-create-keys** is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer
+to the Ceph documentation at http://ceph.com/docs for more
+information.
+
+
+See also
+========
+
+:doc:`ceph <ceph>`\(8)
diff --git a/doc/man/8/ceph-debugpack.rst b/doc/man/8/ceph-debugpack.rst
new file mode 100644
index 000000000..4f2c4f2f6
--- /dev/null
+++ b/doc/man/8/ceph-debugpack.rst
@@ -0,0 +1,50 @@
+:orphan:
+
+=============================================
+ ceph-debugpack -- ceph debug packer utility
+=============================================
+
+.. program:: ceph-debugpack
+
+Synopsis
+========
+
+| **ceph-debugpack** [ *options* ] *filename.tar.gz*
+
+
+Description
+===========
+
+**ceph-debugpack** will build a tarball containing various items that are
+useful for debugging crashes. The resulting tarball can be shared with
+Ceph developers when debugging a problem.
+
+The tarball will include the binaries for ceph-mds, ceph-osd, and ceph-mon, radosgw, any
+log files, the ceph.conf configuration file, any core files we can
+find, and (if the system is running) dumps of the current cluster state
+as reported by 'ceph report'.
+
+
+Options
+=======
+
+.. option:: -c ceph.conf, --conf=ceph.conf
+
+ Use *ceph.conf* configuration file instead of the default
+ ``/etc/ceph/ceph.conf`` to determine monitor addresses during
+ startup.
+
+
+Availability
+============
+
+**ceph-debugpack** is part of Ceph, a massively scalable, open-source, distributed storage system. Please
+refer to the Ceph documentation at http://ceph.com/docs for more
+information.
+
+
+See also
+========
+
+:doc:`ceph <ceph>`\(8)
+:doc:`ceph-post-file <ceph-post-file>`\(8)
diff --git a/doc/man/8/ceph-dencoder.rst b/doc/man/8/ceph-dencoder.rst
new file mode 100644
index 000000000..09032aa75
--- /dev/null
+++ b/doc/man/8/ceph-dencoder.rst
@@ -0,0 +1,151 @@
+:orphan:
+
+==============================================
+ ceph-dencoder -- ceph encoder/decoder utility
+==============================================
+
+.. program:: ceph-dencoder
+
+Synopsis
+========
+
+| **ceph-dencoder** [commands...]
+
+
+Description
+===========
+
+**ceph-dencoder** is a utility to encode, decode, and dump ceph data
+structures. It is used for debugging and for testing inter-version
+compatibility.
+
+**ceph-dencoder** takes a simple list of commands and performs them
+in order.
+
+Commands
+========
+
+.. option:: version
+
+ Print the version string for the **ceph-dencoder** binary.
+
+.. option:: import <file>
+
+ Read a binary blob of encoded data from the given file. It will be
+ placed in an in-memory buffer.
+
+.. option:: export <file>
+
+ Write the contents of the current in-memory buffer to the given
+ file.
+
+.. option:: list_types
+
+ List the data types known to this build of **ceph-dencoder**.
+
+.. option:: type <name>
+
+ Select the given type for future ``encode`` or ``decode`` operations.
+
+.. option:: skip <bytes>
+
+ Seek <bytes> into the imported file before reading data structure, use
+ this with objects that have a preamble/header before the object of interest.
+
+.. option:: decode
+
+ Decode the contents of the in-memory buffer into an instance of the
+ previously selected type. If there is an error, report it.
+
+.. option:: encode
+
+ Encode the contents of the in-memory instance of the previously
+ selected type to the in-memory buffer.
+
+.. option:: dump_json
+
+ Print a JSON-formatted description of the in-memory object.
+
+.. option:: count_tests
+
+ Print the number of built-in test instances of the previously
+ selected type that **ceph-dencoder** is able to generate.
+
+.. option:: select_test <n>
+
+ Select the given build-in test instance as a the in-memory instance
+ of the type.
+
+.. option:: get_features
+
+ Print the decimal value of the feature set supported by this version
+ of **ceph-dencoder**. Each bit represents a feature. These correspond to
+ CEPH_FEATURE_* defines in src/include/ceph_features.h.
+
+.. option:: set_features <f>
+
+ Set the feature bits provided to ``encode`` to *f*. This allows
+ you to encode objects such that they can be understood by old
+ versions of the software (for those types that support it).
+
+Example
+=======
+
+Say you want to examine an attribute on an object stored by ``ceph-osd``. You can do this:
+
+::
+
+ $ cd /mnt/osd.12/current/2.b_head
+ $ attr -l foo_bar_head_EFE6384B
+ Attribute "ceph.snapset" has a 31 byte value for foo_bar_head_EFE6384B
+ Attribute "ceph._" has a 195 byte value for foo_bar_head_EFE6384B
+ $ attr foo_bar_head_EFE6384B -g ceph._ -q > /tmp/a
+ $ ceph-dencoder type object_info_t import /tmp/a decode dump_json
+ { "oid": { "oid": "foo",
+ "key": "bar",
+ "snapid": -2,
+ "hash": 4024842315,
+ "max": 0},
+ "locator": { "pool": 2,
+ "preferred": -1,
+ "key": "bar"},
+ "category": "",
+ "version": "9'1",
+ "prior_version": "0'0",
+ "last_reqid": "client.4116.0:1",
+ "size": 1681,
+ "mtime": "2012-02-21 08:58:23.666639",
+ "lost": 0,
+ "wrlock_by": "unknown.0.0:0",
+ "snaps": [],
+ "truncate_seq": 0,
+ "truncate_size": 0,
+ "watchers": {}}
+
+Alternatively, perhaps you wish to dump an internal CephFS metadata object, you might
+do that like this:
+
+::
+
+ $ rados -p metadata get mds_snaptable mds_snaptable.bin
+ $ ceph-dencoder type SnapServer skip 8 import mds_snaptable.bin decode dump_json
+ { "snapserver": { "last_snap": 1,
+ "pending_noop": [],
+ "snaps": [],
+ "need_to_purge": {},
+ "pending_create": [],
+ "pending_destroy": []}}
+
+
+Availability
+============
+
+**ceph-dencoder** is part of Ceph, a massively scalable, open-source, distributed storage system. Please
+refer to the Ceph documentation at http://ceph.com/docs for more
+information.
+
+
+See also
+========
+
+:doc:`ceph <ceph>`\(8)
diff --git a/doc/man/8/ceph-deploy.rst b/doc/man/8/ceph-deploy.rst
new file mode 100644
index 000000000..3d099252a
--- /dev/null
+++ b/doc/man/8/ceph-deploy.rst
@@ -0,0 +1,529 @@
+:orphan:
+
+.. _ceph-deploy:
+
+=====================================
+ ceph-deploy -- Ceph deployment tool
+=====================================
+
+.. program:: ceph-deploy
+
+Synopsis
+========
+
+| **ceph-deploy** **new** [*initial-monitor-node(s)*]
+
+| **ceph-deploy** **install** [*ceph-node*] [*ceph-node*...]
+
+| **ceph-deploy** **mon** *create-initial*
+
+| **ceph-deploy** **osd** *create* *--data* *device* *ceph-node*
+
+| **ceph-deploy** **admin** [*admin-node*][*ceph-node*...]
+
+| **ceph-deploy** **purgedata** [*ceph-node*][*ceph-node*...]
+
+| **ceph-deploy** **forgetkeys**
+
+Description
+===========
+
+:program:`ceph-deploy` is a tool which allows easy and quick deployment of a
+Ceph cluster without involving complex and detailed manual configuration. It
+uses ssh to gain access to other Ceph nodes from the admin node, sudo for
+administrator privileges on them and the underlying Python scripts automates
+the manual process of Ceph installation on each node from the admin node itself.
+It can be easily run on an workstation and doesn't require servers, databases or
+any other automated tools. With :program:`ceph-deploy`, it is really easy to set
+up and take down a cluster. However, it is not a generic deployment tool. It is
+a specific tool which is designed for those who want to get Ceph up and running
+quickly with only the unavoidable initial configuration settings and without the
+overhead of installing other tools like ``Chef``, ``Puppet`` or ``Juju``. Those
+who want to customize security settings, partitions or directory locations and
+want to set up a cluster following detailed manual steps, should use other tools
+i.e, ``Chef``, ``Puppet``, ``Juju`` or ``Crowbar``.
+
+With :program:`ceph-deploy`, you can install Ceph packages on remote nodes,
+create a cluster, add monitors, gather/forget keys, add OSDs and metadata
+servers, configure admin hosts or take down the cluster.
+
+Commands
+========
+
+new
+---
+
+Start deploying a new cluster and write a configuration file and keyring for it.
+It tries to copy ssh keys from admin node to gain passwordless ssh to monitor
+node(s), validates host IP, creates a cluster with a new initial monitor node or
+nodes for monitor quorum, a ceph configuration file, a monitor secret keyring and
+a log file for the new cluster. It populates the newly created Ceph configuration
+file with ``fsid`` of cluster, hostnames and IP addresses of initial monitor
+members under ``[global]`` section.
+
+Usage::
+
+ ceph-deploy new [MON][MON...]
+
+Here, [MON] is the initial monitor hostname (short hostname i.e, ``hostname -s``).
+
+Other options like :option:`--no-ssh-copykey`, :option:`--fsid`,
+:option:`--cluster-network` and :option:`--public-network` can also be used with
+this command.
+
+If more than one network interface is used, ``public network`` setting has to be
+added under ``[global]`` section of Ceph configuration file. If the public subnet
+is given, ``new`` command will choose the one IP from the remote host that exists
+within the subnet range. Public network can also be added at runtime using
+:option:`--public-network` option with the command as mentioned above.
+
+
+install
+-------
+
+Install Ceph packages on remote hosts. As a first step it installs
+``yum-plugin-priorities`` in admin and other nodes using passwordless ssh and sudo
+so that Ceph packages from upstream repository get more priority. It then detects
+the platform and distribution for the hosts and installs Ceph normally by
+downloading distro compatible packages if adequate repo for Ceph is already added.
+``--release`` flag is used to get the latest release for installation. During
+detection of platform and distribution before installation, if it finds the
+``distro.init`` to be ``sysvinit`` (Fedora, CentOS/RHEL etc), it doesn't allow
+installation with custom cluster name and uses the default name ``ceph`` for the
+cluster.
+
+If the user explicitly specifies a custom repo url with :option:`--repo-url` for
+installation, anything detected from the configuration will be overridden and
+the custom repository location will be used for installation of Ceph packages.
+If required, valid custom repositories are also detected and installed. In case
+of installation from a custom repo a boolean is used to determine the logic
+needed to proceed with a custom repo installation. A custom repo install helper
+is used that goes through config checks to retrieve repos (and any extra repos
+defined) and installs them. ``cd_conf`` is the object built from ``argparse``
+that holds the flags and information needed to determine what metadata from the
+configuration is to be used.
+
+A user can also opt to install only the repository without installing Ceph and
+its dependencies by using :option:`--repo` option.
+
+Usage::
+
+ ceph-deploy install [HOST][HOST...]
+
+Here, [HOST] is/are the host node(s) where Ceph is to be installed.
+
+An option ``--release`` is used to install a release known as CODENAME
+(default: firefly).
+
+Other options like :option:`--testing`, :option:`--dev`, :option:`--adjust-repos`,
+:option:`--no-adjust-repos`, :option:`--repo`, :option:`--local-mirror`,
+:option:`--repo-url` and :option:`--gpg-url` can also be used with this command.
+
+
+mds
+---
+
+Deploy Ceph mds on remote hosts. A metadata server is needed to use CephFS and
+the ``mds`` command is used to create one on the desired host node. It uses the
+subcommand ``create`` to do so. ``create`` first gets the hostname and distro
+information of the desired mds host. It then tries to read the ``bootstrap-mds``
+key for the cluster and deploy it in the desired host. The key generally has a
+format of ``{cluster}.bootstrap-mds.keyring``. If it doesn't finds a keyring,
+it runs ``gatherkeys`` to get the keyring. It then creates a mds on the desired
+host under the path ``/var/lib/ceph/mds/`` in ``/var/lib/ceph/mds/{cluster}-{name}``
+format and a bootstrap keyring under ``/var/lib/ceph/bootstrap-mds/`` in
+``/var/lib/ceph/bootstrap-mds/{cluster}.keyring`` format. It then runs appropriate
+commands based on ``distro.init`` to start the ``mds``.
+
+Usage::
+
+ ceph-deploy mds create [HOST[:DAEMON-NAME]] [HOST[:DAEMON-NAME]...]
+
+The [DAEMON-NAME] is optional.
+
+
+mon
+---
+
+Deploy Ceph monitor on remote hosts. ``mon`` makes use of certain subcommands
+to deploy Ceph monitors on other nodes.
+
+Subcommand ``create-initial`` deploys for monitors defined in
+``mon initial members`` under ``[global]`` section in Ceph configuration file,
+wait until they form quorum and then gatherkeys, reporting the monitor status
+along the process. If monitors don't form quorum the command will eventually
+time out.
+
+Usage::
+
+ ceph-deploy mon create-initial
+
+Subcommand ``create`` is used to deploy Ceph monitors by explicitly specifying
+the hosts which are desired to be made monitors. If no hosts are specified it
+will default to use the ``mon initial members`` defined under ``[global]``
+section of Ceph configuration file. ``create`` first detects platform and distro
+for desired hosts and checks if hostname is compatible for deployment. It then
+uses the monitor keyring initially created using ``new`` command and deploys the
+monitor in desired host. If multiple hosts were specified during ``new`` command
+i.e, if there are multiple hosts in ``mon initial members`` and multiple keyrings
+were created then a concatenated keyring is used for deployment of monitors. In
+this process a keyring parser is used which looks for ``[entity]`` sections in
+monitor keyrings and returns a list of those sections. A helper is then used to
+collect all keyrings into a single blob that will be used to inject it to monitors
+with :option:`--mkfs` on remote nodes. All keyring files are concatenated to be
+in a directory ending with ``.keyring``. During this process the helper uses list
+of sections returned by keyring parser to check if an entity is already present
+in a keyring and if not, adds it. The concatenated keyring is used for deployment
+of monitors to desired multiple hosts.
+
+Usage::
+
+ ceph-deploy mon create [HOST] [HOST...]
+
+Here, [HOST] is hostname of desired monitor host(s).
+
+Subcommand ``add`` is used to add a monitor to an existing cluster. It first
+detects platform and distro for desired host and checks if hostname is compatible
+for deployment. It then uses the monitor keyring, ensures configuration for new
+monitor host and adds the monitor to the cluster. If the section for the monitor
+exists and defines a monitor address that will be used, otherwise it will fallback by
+resolving the hostname to an IP. If :option:`--address` is used it will override
+all other options. After adding the monitor to the cluster, it gives it some time
+to start. It then looks for any monitor errors and checks monitor status. Monitor
+errors arise if the monitor is not added in ``mon initial members``, if it doesn't
+exist in ``monmap`` and if neither ``public_addr`` nor ``public_network`` keys
+were defined for monitors. Under such conditions, monitors may not be able to
+form quorum. Monitor status tells if the monitor is up and running normally. The
+status is checked by running ``ceph daemon mon.hostname mon_status`` on remote
+end which provides the output and returns a boolean status of what is going on.
+``False`` means a monitor that is not fine even if it is up and running, while
+``True`` means the monitor is up and running correctly.
+
+Usage::
+
+ ceph-deploy mon add [HOST]
+
+ ceph-deploy mon add [HOST] --address [IP]
+
+Here, [HOST] is the hostname and [IP] is the IP address of the desired monitor
+node. Please note, unlike other ``mon`` subcommands, only one node can be
+specified at a time.
+
+Subcommand ``destroy`` is used to completely remove monitors on remote hosts.
+It takes hostnames as arguments. It stops the monitor, verifies if ``ceph-mon``
+daemon really stopped, creates an archive directory ``mon-remove`` under
+``/var/lib/ceph/``, archives old monitor directory in
+``{cluster}-{hostname}-{stamp}`` format in it and removes the monitor from
+cluster by running ``ceph remove...`` command.
+
+Usage::
+
+ ceph-deploy mon destroy [HOST] [HOST...]
+
+Here, [HOST] is hostname of monitor that is to be removed.
+
+
+gatherkeys
+----------
+
+Gather authentication keys for provisioning new nodes. It takes hostnames as
+arguments. It checks for and fetches ``client.admin`` keyring, monitor keyring
+and ``bootstrap-mds/bootstrap-osd`` keyring from monitor host. These
+authentication keys are used when new ``monitors/OSDs/MDS`` are added to the
+cluster.
+
+Usage::
+
+ ceph-deploy gatherkeys [HOST] [HOST...]
+
+Here, [HOST] is hostname of the monitor from where keys are to be pulled.
+
+
+disk
+----
+
+Manage disks on a remote host. It actually triggers the ``ceph-volume`` utility
+and its subcommands to manage disks.
+
+Subcommand ``list`` lists disk partitions and Ceph OSDs.
+
+Usage::
+
+ ceph-deploy disk list HOST
+
+
+Subcommand ``zap`` zaps/erases/destroys a device's partition table and
+contents. It actually uses ``ceph-volume lvm zap`` remotely, alternatively
+allowing someone to remove the Ceph metadata from the logical volume.
+
+osd
+---
+
+Manage OSDs by preparing data disk on remote host. ``osd`` makes use of certain
+subcommands for managing OSDs.
+
+Subcommand ``create`` prepares a device for Ceph OSD. It first checks against
+multiple OSDs getting created and warns about the possibility of more than the
+recommended which would cause issues with max allowed PIDs in a system. It then
+reads the bootstrap-osd key for the cluster or writes the bootstrap key if not
+found.
+It then uses :program:`ceph-volume` utility's ``lvm create`` subcommand to
+prepare the disk, (and journal if using filestore) and deploy the OSD on the desired host.
+Once prepared, it gives some time to the OSD to start and checks for any
+possible errors and if found, reports to the user.
+
+Bluestore Usage::
+
+ ceph-deploy osd create --data DISK HOST
+
+Filestore Usage::
+
+ ceph-deploy osd create --data DISK --journal JOURNAL HOST
+
+
+.. note:: For other flags available, please see the man page or the --help menu
+ on ceph-deploy osd create
+
+Subcommand ``list`` lists devices associated to Ceph as part of an OSD.
+It uses the ``ceph-volume lvm list`` output that has a rich output, mapping
+OSDs to devices and other interesting information about the OSD setup.
+
+Usage::
+
+ ceph-deploy osd list HOST
+
+
+admin
+-----
+
+Push configuration and ``client.admin`` key to a remote host. It takes
+the ``{cluster}.client.admin.keyring`` from admin node and writes it under
+``/etc/ceph`` directory of desired node.
+
+Usage::
+
+ ceph-deploy admin [HOST] [HOST...]
+
+Here, [HOST] is desired host to be configured for Ceph administration.
+
+
+config
+------
+
+Push/pull configuration file to/from a remote host. It uses ``push`` subcommand
+to takes the configuration file from admin host and write it to remote host under
+``/etc/ceph`` directory. It uses ``pull`` subcommand to do the opposite i.e, pull
+the configuration file under ``/etc/ceph`` directory of remote host to admin node.
+
+Usage::
+
+ ceph-deploy config push [HOST] [HOST...]
+
+ ceph-deploy config pull [HOST] [HOST...]
+
+Here, [HOST] is the hostname of the node where config file will be pushed to or
+pulled from.
+
+
+uninstall
+---------
+
+Remove Ceph packages from remote hosts. It detects the platform and distro of
+selected host and uninstalls Ceph packages from it. However, some dependencies
+like ``librbd1`` and ``librados2`` will not be removed because they can cause
+issues with ``qemu-kvm``.
+
+Usage::
+
+ ceph-deploy uninstall [HOST] [HOST...]
+
+Here, [HOST] is hostname of the node from where Ceph will be uninstalled.
+
+
+purge
+-----
+
+Remove Ceph packages from remote hosts and purge all data. It detects the
+platform and distro of selected host, uninstalls Ceph packages and purges all
+data. However, some dependencies like ``librbd1`` and ``librados2`` will not be
+removed because they can cause issues with ``qemu-kvm``.
+
+Usage::
+
+ ceph-deploy purge [HOST] [HOST...]
+
+Here, [HOST] is hostname of the node from where Ceph will be purged.
+
+
+purgedata
+---------
+
+Purge (delete, destroy, discard, shred) any Ceph data from ``/var/lib/ceph``.
+Once it detects the platform and distro of desired host, it first checks if Ceph
+is still installed on the selected host and if installed, it won't purge data
+from it. If Ceph is already uninstalled from the host, it tries to remove the
+contents of ``/var/lib/ceph``. If it fails then probably OSDs are still mounted
+and needs to be unmounted to continue. It unmount the OSDs and tries to remove
+the contents of ``/var/lib/ceph`` again and checks for errors. It also removes
+contents of ``/etc/ceph``. Once all steps are successfully completed, all the
+Ceph data from the selected host are removed.
+
+Usage::
+
+ ceph-deploy purgedata [HOST] [HOST...]
+
+Here, [HOST] is hostname of the node from where Ceph data will be purged.
+
+
+forgetkeys
+----------
+
+Remove authentication keys from the local directory. It removes all the
+authentication keys i.e, monitor keyring, client.admin keyring, bootstrap-osd
+and bootstrap-mds keyring from the node.
+
+Usage::
+
+ ceph-deploy forgetkeys
+
+
+pkg
+---
+
+Manage packages on remote hosts. It is used for installing or removing packages
+from remote hosts. The package names for installation or removal are to be
+specified after the command. Two options :option:`--install` and
+:option:`--remove` are used for this purpose.
+
+Usage::
+
+ ceph-deploy pkg --install [PKGs] [HOST] [HOST...]
+
+ ceph-deploy pkg --remove [PKGs] [HOST] [HOST...]
+
+Here, [PKGs] is comma-separated package names and [HOST] is hostname of the
+remote node where packages are to be installed or removed from.
+
+
+Options
+=======
+
+.. option:: --address
+
+ IP address of the host node to be added to the cluster.
+
+.. option:: --adjust-repos
+
+ Install packages modifying source repos.
+
+.. option:: --ceph-conf
+
+ Use (or reuse) a given ``ceph.conf`` file.
+
+.. option:: --cluster
+
+ Name of the cluster.
+
+.. option:: --dev
+
+ Install a bleeding edge built from Git branch or tag (default: master).
+
+.. option:: --cluster-network
+
+ Specify the (internal) cluster network.
+
+.. option:: --dmcrypt
+
+ Encrypt [data-path] and/or journal devices with ``dm-crypt``.
+
+.. option:: --dmcrypt-key-dir
+
+ Directory where ``dm-crypt`` keys are stored.
+
+.. option:: --install
+
+ Comma-separated package(s) to install on remote hosts.
+
+.. option:: --fs-type
+
+ Filesystem to use to format disk ``(xfs, btrfs or ext4)``. Note that support for btrfs and ext4 is no longer tested or recommended; please use xfs.
+
+.. option:: --fsid
+
+ Provide an alternate FSID for ``ceph.conf`` generation.
+
+.. option:: --gpg-url
+
+ Specify a GPG key url to be used with custom repos (defaults to ceph.com).
+
+.. option:: --keyrings
+
+ Concatenate multiple keyrings to be seeded on new monitors.
+
+.. option:: --local-mirror
+
+ Fetch packages and push them to hosts for a local repo mirror.
+
+.. option:: --mkfs
+
+ Inject keys to MONs on remote nodes.
+
+.. option:: --no-adjust-repos
+
+ Install packages without modifying source repos.
+
+.. option:: --no-ssh-copykey
+
+ Do not attempt to copy ssh keys.
+
+.. option:: --overwrite-conf
+
+ Overwrite an existing conf file on remote host (if present).
+
+.. option:: --public-network
+
+ Specify the public network for a cluster.
+
+.. option:: --remove
+
+ Comma-separated package(s) to remove from remote hosts.
+
+.. option:: --repo
+
+ Install repo files only (skips package installation).
+
+.. option:: --repo-url
+
+ Specify a repo url that mirrors/contains Ceph packages.
+
+.. option:: --testing
+
+ Install the latest development release.
+
+.. option:: --username
+
+ The username to connect to the remote host.
+
+.. option:: --version
+
+ The current installed version of :program:`ceph-deploy`.
+
+.. option:: --zap-disk
+
+ Destroy the partition table and content of a disk.
+
+
+Availability
+============
+
+:program:`ceph-deploy` is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer to
+the documentation at https://ceph.com/ceph-deploy/docs for more information.
+
+
+See also
+========
+
+:doc:`ceph-mon <ceph-mon>`\(8),
+:doc:`ceph-osd <ceph-osd>`\(8),
+:doc:`ceph-volume <ceph-volume>`\(8),
+:doc:`ceph-mds <ceph-mds>`\(8)
diff --git a/doc/man/8/ceph-diff-sorted.rst b/doc/man/8/ceph-diff-sorted.rst
new file mode 100644
index 000000000..99e958336
--- /dev/null
+++ b/doc/man/8/ceph-diff-sorted.rst
@@ -0,0 +1,71 @@
+:orphan:
+
+==========================================================
+ ceph-diff-sorted -- compare two sorted files line by line
+==========================================================
+
+.. program:: ceph-diff-sorted
+
+Synopsis
+========
+
+| **ceph-diff-sorted** *file1* *file2*
+
+Description
+===========
+
+:program:`ceph-diff-sorted` is a simplifed *diff* utility optimized
+for comparing two files with lines that are lexically sorted.
+
+The output is simplified in comparison to that of the standard `diff`
+tool available in POSIX systems. Angle brackets ('<' and '>') are used
+to show lines that appear in one file but not the other. The output is
+not compatible with the `patch` tool.
+
+This tool was created in order to perform diffs of large files (e.g.,
+containing billions of lines) that the standard `diff` tool cannot
+handle efficiently. Knowing that the lines are sorted allows this to
+be done efficiently with minimal memory overhead.
+
+The sorting of each file needs to be done lexcially. Most POSIX
+systems use the *LANG* environment variable to determine the `sort`
+tool's sorting order. To sort lexically we would need something such
+as:
+
+ $ LANG=C sort some-file.txt >some-file-sorted.txt
+
+Examples
+========
+
+Compare two files::
+
+ $ ceph-diff-sorted fileA.txt fileB.txt
+
+Exit Status
+===========
+
+When complete, the exit status will be set to one of the following:
+
+0
+ files same
+1
+ files different
+2
+ usage problem (e.g., wrong number of command-line arguments)
+3
+ problem opening input file
+4
+ bad file content (e.g., unsorted order or empty lines)
+
+
+Availability
+============
+
+:program:`ceph-diff-sorted` is part of Ceph, a massively scalable,
+open-source, distributed storage system. Please refer to the Ceph
+documentation at http://ceph.com/docs for more information.
+
+See also
+========
+
+:doc:`rgw-orphan-list <rgw-orphan-list>`\(8)
diff --git a/doc/man/8/ceph-fuse.rst b/doc/man/8/ceph-fuse.rst
new file mode 100644
index 000000000..f230626b8
--- /dev/null
+++ b/doc/man/8/ceph-fuse.rst
@@ -0,0 +1,85 @@
+:orphan:
+
+=========================================
+ ceph-fuse -- FUSE-based client for ceph
+=========================================
+
+.. program:: ceph-fuse
+
+Synopsis
+========
+
+| **ceph-fuse** [-n *client.username*] [ -m *monaddr*:*port* ] *mountpoint* [ *fuse options* ]
+
+
+Description
+===========
+
+**ceph-fuse** is a FUSE ("Filesystem in USErspace") client for Ceph
+distributed file system. It will mount a ceph file system specified via the -m
+option or described by ceph.conf (see below) at the specific mount point. See
+`Mount CephFS using FUSE`_ for detailed information.
+
+The file system can be unmounted with::
+
+ fusermount -u mountpoint
+
+or by sending ``SIGINT`` to the ``ceph-fuse`` process.
+
+
+Options
+=======
+
+Any options not recognized by ceph-fuse will be passed on to libfuse.
+
+.. option:: -o opt,[opt...]
+
+ Mount options.
+
+.. option:: -d
+
+ Run in foreground, send all log output to stderr and enable FUSE debugging (-o debug).
+
+.. option:: -c ceph.conf, --conf=ceph.conf
+
+ Use *ceph.conf* configuration file instead of the default
+ ``/etc/ceph/ceph.conf`` to determine monitor addresses during startup.
+
+.. option:: -m monaddress[:port]
+
+ Connect to specified monitor (instead of looking through ceph.conf).
+
+.. option:: -n client.{cephx-username}
+
+ Pass the name of CephX user whose secret key is be to used for mounting.
+
+.. option:: -k <path-to-keyring>
+
+ Provide path to keyring; useful when it's absent in standard locations.
+
+.. option:: --client_mountpoint/-r root_directory
+
+ Use root_directory as the mounted root, rather than the full Ceph tree.
+
+.. option:: -f
+
+ Foreground: do not daemonize after startup (run in foreground). Do not generate a pid file.
+
+.. option:: -s
+
+ Disable multi-threaded operation.
+
+Availability
+============
+
+**ceph-fuse** is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer to
+the Ceph documentation at http://ceph.com/docs for more information.
+
+
+See also
+========
+
+fusermount(8),
+:doc:`ceph <ceph>`\(8)
+
+.. _Mount CephFS using FUSE: ../../../cephfs/mount-using-fuse/
diff --git a/doc/man/8/ceph-immutable-object-cache.rst b/doc/man/8/ceph-immutable-object-cache.rst
new file mode 100644
index 000000000..2e971abdc
--- /dev/null
+++ b/doc/man/8/ceph-immutable-object-cache.rst
@@ -0,0 +1,76 @@
+:orphan:
+
+======================================================================
+ ceph-immutable-object-cache -- Ceph daemon for immutable object cache
+======================================================================
+
+.. program:: ceph-immutable-object-cache
+
+Synopsis
+========
+
+| **ceph-immutable-object-cache**
+
+
+Description
+===========
+
+:program:`ceph-immutable-object-cache` is a daemon for object cache of RADOS
+objects among Ceph clusters. It will promote the objects to a local directory
+upon promote requests and future reads will be serviced from these cached
+objects.
+
+It connects to local clusters via the RADOS protocol, relying on
+default search paths to find ceph.conf files, monitor addresses and
+authentication information for them, i.e. ``/etc/ceph/$cluster.conf``,
+``/etc/ceph/$cluster.keyring``, and
+``/etc/ceph/$cluster.$name.keyring``, where ``$cluster`` is the
+human-friendly name of the cluster, and ``$name`` is the rados user to
+connect as, e.g. ``client.ceph-immutable-object-cache``.
+
+
+Options
+=======
+
+.. option:: -c ceph.conf, --conf=ceph.conf
+
+ Use ``ceph.conf`` configuration file instead of the default
+ ``/etc/ceph/ceph.conf`` to determine monitor addresses during startup.
+
+.. option:: -m monaddress[:port]
+
+ Connect to specified monitor (instead of looking through ``ceph.conf``).
+
+.. option:: -i ID, --id ID
+
+ Set the ID portion of name for ceph-immutable-object-cache
+
+.. option:: -n TYPE.ID, --name TYPE.ID
+
+ Set the rados user name for the gateway (eg. client.ceph-immutable-object-cache)
+
+.. option:: --cluster NAME
+
+ Set the cluster name (default: ceph)
+
+.. option:: -d
+
+ Run in foreground, log to stderr
+
+.. option:: -f
+
+ Run in foreground, log to usual location
+
+
+Availability
+============
+
+:program:`ceph-immutable-object-cache` is part of Ceph, a massively scalable, open-source, distributed
+storage system. Please refer to the Ceph documentation at http://ceph.com/docs for
+more information.
+
+
+See also
+========
+
+:doc:`rbd <rbd>`\(8)
diff --git a/doc/man/8/ceph-kvstore-tool.rst b/doc/man/8/ceph-kvstore-tool.rst
new file mode 100644
index 000000000..1eb99c030
--- /dev/null
+++ b/doc/man/8/ceph-kvstore-tool.rst
@@ -0,0 +1,98 @@
+:orphan:
+
+=====================================================
+ ceph-kvstore-tool -- ceph kvstore manipulation tool
+=====================================================
+
+.. program:: ceph-kvstore-tool
+
+Synopsis
+========
+
+| **ceph-kvstore-tool** <leveldb|rocksdb|bluestore-kv> <store path> *command* [args...]
+
+
+Description
+===========
+
+:program:`ceph-kvstore-tool` is a kvstore manipulation tool. It allows users to manipulate
+leveldb/rocksdb's data (like OSD's omap) offline.
+
+Commands
+========
+
+:program:`ceph-kvstore-tool` utility uses many commands for debugging purpose
+which are as follows:
+
+:command:`list [prefix]`
+ Print key of all KV pairs stored with the URL encoded prefix.
+
+:command:`list-crc [prefix]`
+ Print CRC of all KV pairs stored with the URL encoded prefix.
+
+:command:`dump [prefix]`
+ Print key and value of all KV pairs stored with the URL encoded prefix.
+
+:command:`exists <prefix> [key]`
+ Check if there is any KV pair stored with the URL encoded prefix. If key
+ is also specified, check for the key with the prefix instead.
+
+:command:`get <prefix> <key> [out <file>]`
+ Get the value of the KV pair stored with the URL encoded prefix and key.
+ If file is also specified, write the value to the file.
+
+:command:`crc <prefix> <key>`
+ Get the CRC of the KV pair stored with the URL encoded prefix and key.
+
+:command:`get-size [<prefix> <key>]`
+ Get estimated store size or size of value specified by prefix and key.
+
+:command:`set <prefix> <key> [ver <N>|in <file>]`
+ Set the value of the KV pair stored with the URL encoded prefix and key.
+ The value could be *version_t* or text.
+
+:command:`rm <prefix> <key>`
+ Remove the KV pair stored with the URL encoded prefix and key.
+
+:command:`rm-prefix <prefix>`
+ Remove all KV pairs stored with the URL encoded prefix.
+
+:command:`store-copy <path> [num-keys-per-tx]`
+ Copy all KV pairs to another directory specified by ``path``.
+ [num-keys-per-tx] is the number of KV pairs copied for a transaction.
+
+:command:`store-crc <path>`
+ Store CRC of all KV pairs to a file specified by ``path``.
+
+:command:`compact`
+ Subcommand ``compact`` is used to compact all data of kvstore. It will open
+ the database, and trigger a database's compaction. After compaction, some
+ disk space may be released.
+
+:command:`compact-prefix <prefix>`
+ Compact all entries specified by the URL encoded prefix.
+
+:command:`compact-range <prefix> <start> <end>`
+ Compact some entries specified by the URL encoded prefix and range.
+
+:command:`destructive-repair`
+ Make a (potentially destructive) effort to recover a corrupted database.
+ Note that in the case of rocksdb this may corrupt an otherwise uncorrupted
+ database--use this only as a last resort!
+
+:command:`stats`
+ Prints statistics from underlying key-value database. This is only for informative purposes.
+ Format and information content may vary between releases. For RocksDB information includes
+ compactions stats, performance counters, memory usage and internal RocksDB stats.
+
+Availability
+============
+
+**ceph-kvstore-tool** is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer to
+the Ceph documentation at http://ceph.com/docs for more information.
+
+
+See also
+========
+
+:doc:`ceph <ceph>`\(8)
diff --git a/doc/man/8/ceph-mds.rst b/doc/man/8/ceph-mds.rst
new file mode 100644
index 000000000..a07b9105f
--- /dev/null
+++ b/doc/man/8/ceph-mds.rst
@@ -0,0 +1,82 @@
+:orphan:
+
+=========================================
+ ceph-mds -- ceph metadata server daemon
+=========================================
+
+.. program:: ceph-mds
+
+Synopsis
+========
+
+| **ceph-mds** -i <*ID*> [flags]
+
+
+Description
+===========
+
+**ceph-mds** is the metadata server daemon for the Ceph distributed file
+system. One or more instances of ceph-mds collectively manage the file
+system namespace, coordinating access to the shared OSD cluster.
+
+Each ceph-mds daemon instance should have a unique name. The name is used
+to identify daemon instances in the ceph.conf.
+
+Once the daemon has started, the monitor cluster will normally assign
+it a logical rank, or put it in a standby pool to take over for
+another daemon that crashes. Some of the specified options can cause
+other behaviors.
+
+
+Options
+=======
+
+.. option:: -f, --foreground
+
+ Foreground: do not daemonize after startup (run in foreground). Do
+ not generate a pid file. Useful when run via :doc:`ceph-run
+ <ceph-run>`\(8).
+
+.. option:: -d
+
+ Debug mode: like ``-f``, but also send all log output to stderr.
+
+.. option:: --setuser userorgid
+
+ Set uid after starting. If a username is specified, the user
+ record is looked up to get a uid and a gid, and the gid is also set
+ as well, unless --setgroup is also specified.
+
+.. option:: --setgroup grouporgid
+
+ Set gid after starting. If a group name is specified the group
+ record is looked up to get a gid.
+
+.. option:: -c ceph.conf, --conf=ceph.conf
+
+ Use *ceph.conf* configuration file instead of the default
+ ``/etc/ceph/ceph.conf`` to determine monitor addresses during
+ startup.
+
+.. option:: -m monaddress[:port]
+
+ Connect to specified monitor (instead of looking through
+ ``ceph.conf``).
+
+.. option:: --id/-i ID
+
+ Set ID portion of the MDS name.
+
+Availability
+============
+
+**ceph-mds** is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer to the Ceph documentation at
+http://ceph.com/docs for more information.
+
+
+See also
+========
+
+:doc:`ceph <ceph>`\(8),
+:doc:`ceph-mon <ceph-mon>`\(8),
+:doc:`ceph-osd <ceph-osd>`\(8)
diff --git a/doc/man/8/ceph-mon.rst b/doc/man/8/ceph-mon.rst
new file mode 100644
index 000000000..aeb6a38fc
--- /dev/null
+++ b/doc/man/8/ceph-mon.rst
@@ -0,0 +1,99 @@
+:orphan:
+
+=================================
+ ceph-mon -- ceph monitor daemon
+=================================
+
+.. program:: ceph-mon
+
+Synopsis
+========
+
+| **ceph-mon** -i *monid* [ --mon-data *mondatapath* ]
+
+
+Description
+===========
+
+**ceph-mon** is the cluster monitor daemon for the Ceph distributed
+file system. One or more instances of **ceph-mon** form a Paxos
+part-time parliament cluster that provides extremely reliable and
+durable storage of cluster membership, configuration, and state.
+
+The *mondatapath* refers to a directory on a local file system storing
+monitor data. It is normally specified via the ``mon data`` option in
+the configuration file.
+
+Options
+=======
+
+.. option:: -f, --foreground
+
+ Foreground: do not daemonize after startup (run in foreground). Do
+ not generate a pid file. Useful when run via :doc:`ceph-run <ceph-run>`\(8).
+
+.. option:: -d
+
+ Debug mode: like ``-f``, but also send all log output to stderr.
+
+.. option:: --setuser userorgid
+
+ Set uid after starting. If a username is specified, the user
+ record is looked up to get a uid and a gid, and the gid is also set
+ as well, unless --setgroup is also specified.
+
+.. option:: --setgroup grouporgid
+
+ Set gid after starting. If a group name is specified the group
+ record is looked up to get a gid.
+
+.. option:: -c ceph.conf, --conf=ceph.conf
+
+ Use *ceph.conf* configuration file instead of the default
+ ``/etc/ceph/ceph.conf`` to determine monitor addresses during
+ startup.
+
+.. option:: --mkfs
+
+ Initialize the ``mon data`` directory with seed information to form
+ and initial ceph file system or to join an existing monitor
+ cluster. Three pieces of information must be provided:
+
+ - The cluster fsid. This can come from a monmap (``--monmap <path>``) or
+ explicitly via ``--fsid <uuid>``.
+ - A list of monitors and their addresses. This list of monitors
+ can come from a monmap (``--monmap <path>``), the ``mon host``
+ configuration value (in *ceph.conf* or via ``-m
+ host1,host2,...``), or (for backward compatibility) the deprecated ``mon addr`` lines in *ceph.conf*. If this
+ monitor is to be part of the initial monitor quorum for a new
+ Ceph cluster, then it must be included in the initial list,
+ matching either the name or address of a monitor in the list.
+ When matching by address, either the ``public addr`` or ``public
+ subnet`` options may be used.
+ - The monitor secret key ``mon.``. This must be included in the
+ keyring provided via ``--keyring <path>``.
+
+.. option:: --keyring
+
+ Specify a keyring for use with ``--mkfs``.
+
+.. option:: --no-config-file
+
+ Signal that we don't want to rely on a *ceph.conf*, either user provided
+ or the default, to run the daemon. This will entail providing all
+ necessary options to the daemon as arguments.
+
+Availability
+============
+
+**ceph-mon** is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer
+to the Ceph documentation at http://ceph.com/docs for more
+information.
+
+
+See also
+========
+
+:doc:`ceph <ceph>`\(8),
+:doc:`ceph-mds <ceph-mds>`\(8),
+:doc:`ceph-osd <ceph-osd>`\(8)
diff --git a/doc/man/8/ceph-objectstore-tool.rst b/doc/man/8/ceph-objectstore-tool.rst
new file mode 100644
index 000000000..19acc5913
--- /dev/null
+++ b/doc/man/8/ceph-objectstore-tool.rst
@@ -0,0 +1,488 @@
+:orphan:
+
+==============================================================
+ceph-objectstore-tool -- modify or examine the state of an OSD
+==============================================================
+
+Synopsis
+========
+
+
+| **ceph-objectstore-tool** --data-path *path to osd* [--op *list* ]
+
+
+
+Possible object operations:
+
+* (get|set)-bytes [file]
+* set-(attr|omap) [file]
+* (get|rm)-attr|omap)
+* get-omaphdr
+* set-omaphdr [file]
+* list-attrs
+* list-omap
+* remove|removeall
+* dump
+* set-size
+* clear-data-digest
+* remove-clone-metadata
+
+
+Description
+===========
+
+**ceph-objectstore-tool** is a tool for modifying the state of an OSD. It facilitates manipulating an object's content, removing an object, listing the omap, manipulating the omap header, manipulating the omap key, listing object attributes, and manipulating object attribute keys.
+
+**ceph-objectstore-tool** provides two main modes: (1) a mode that specifies the "--op" argument (for example, **ceph-objectstore-tool** --data-path $PATH_TO_OSD --op $SELECT_OPERATION [--pgid $PGID] [--dry-run]), and (2) a mode for positional object operations. If the second mode is used, the object can be specified by ID or by the JSON output of the --op list.
+
+| **ceph-objectstore-tool** --data-path *path to osd* [--pgid *$PG_ID* ][--op *command*]
+| **ceph-objectstore-tool** --data-path *path to osd* [ --op *list $OBJECT_ID*]
+
+Possible -op commands::
+
+* info
+* log
+* remove
+* mkfs
+* fsck
+* repair
+* fuse
+* dup
+* export
+* export-remove
+* import
+* list
+* list-slow-omap
+* fix-lost
+* list-pgs
+* dump-journal
+* dump-super
+* meta-list
+* get-osdmap
+* set-osdmap
+* get-inc-osdmap
+* set-inc-osdmap
+* mark-complete
+* reset-last-complete
+* apply-layour-settings
+* update-mon-db
+* dump-export
+* trim-pg-log
+
+Installation
+============
+
+The `ceph-osd` package provides **ceph-objectstore-tool**.
+
+
+Examples
+========
+
+Modifying Objects
+-----------------
+These commands modify state of an OSD. The OSD must not be running when ceph-objectstore-tool is used.
+
+Listing Objects and Placement Groups
+------------------------------------
+
+Make sure that the target OSD is down::
+
+ systemctl status ceph-osd@$OSD_NUMBER
+
+Identify all objects within an OSD::
+
+ ceph-objectstore-tool --data-path $PATH_TO_OSD --op list
+
+Identify all objects within a placement group::
+
+ ceph-objectstore-tool --data-path $PATH_TO_OSD --pgid $PG_ID --op list
+
+Identify the placement group (PG) that an object belongs to::
+
+ ceph-objectstore-tool --data-path $PATH_TO_OSD --op list $OBJECT_ID
+
+
+Fixing Lost Objects
+-------------------
+
+Make sure the OSD is down::
+
+ systemctl status ceph-osd@OSD_NUMBER
+
+Fix all lost objects::
+
+ ceph-objectstore-tool --data-path $PATH_TO_OSD --op fix-lost
+
+Fix all the lost objects within a specified placement group::
+
+ ceph-objectstore-tool --data-path $PATH_TO_OSD --pgid $PG_ID --op fix-lost
+
+Fix a lost object by its identifier::
+
+ ceph-objectstore-tool --data-path $PATH_TO_OSD --op fix-lost $OBJECT_ID
+
+Fix legacy lost objects::
+
+ ceph-objectstore-tool --data-path $PATH_TO_OSD --op fix-lost
+
+
+Manipulating an object's content
+--------------------------------
+
+1. Make sure that the target OSD is down::
+
+ systemctl status ceph-osd@$OSD_NUMBER
+
+2. Find the object by listing the objects of the OSD or placement group.
+
+3. Before setting the bytes on the object, make a backup and a working copy of the object. Here is the syntactic form of that command::
+
+ ceph-objectstore-tool --data-path $PATH_TO_OSD --pgid $PG_ID $OBJECT get-bytes > $OBJECT_FILE_NAME
+
+For example::
+
+ [root@osd ~]# ceph-objectstore-tool --data-path /var/lib/ceph/osd/ceph-0 --pgid 0.1c '{"oid":"zone_info.default","key":"","snapid":-2,"hash":235010478,"max":0,"pool":11,"namespace":""}' get-bytes > zone_info.default.backup
+
+ [root@osd ~]# ceph-objectstore-tool --data-path /var/lib/ceph/osd/ceph-0 --pgid 0.1c '{"oid":"zone_info.default","key":"","snapid":-2,"hash":235010478,"max":0,"pool":11,"namespace":""}' get-bytes > zone_info.default.working-copy
+
+The first command creates the back-up copy, and the second command creates the working copy.
+
+4. Edit the working copy object file.
+
+5. Set the bytes of the object::
+
+ ceph-objectstore-tool --data-path $PATH_TO_OSD --pgid $PG_ID $OBJECT set-bytes < $OBJECT_FILE_NAME
+
+For example::
+
+ [root@osd ~]# ceph-objectstore-tool --data-path /var/lib/ceph/osd/ceph-0 --pgid 0.1c '{"oid":"zone_info.default","key":"","snapid":-2,"hash":235010478,"max":0,"pool":11,"namespace":""}' set-bytes < zone_info.default.working-copy
+
+
+Removing an Object
+------------------
+
+Use **ceph-objectstore-tool** to remove objects. When an object is removed, its contents and references are removed from the placement group (PG).
+
+Remove an object (syntax)::
+
+ ceph-objectstore-tool --data-path $PATH_TO_OSD --pgid $PG_ID $OBJECT remove
+
+Remove an object (example)::
+
+[root@osd ~]# ceph-objectstore-tool --data-path /var/lib/ceph/osd/ceph-0 --pgid 0.1c '{"oid":"zone_info.default","key":"","snapid":-2,"hash":235010478,"max":0,"pool":11,"namespace":""}' remove
+
+
+Listing the Object Map
+----------------------
+
+Use the ceph-objectstore-tool to list the contents of the object map (OMAP). The output is a list of keys.
+
+
+1. Verify the appropriate OSD is down:
+
+ Syntax::
+
+ systemctl status ceph-osd@$OSD_NUMBER
+
+ Example::
+
+ [root@osd ~]# systemctl status ceph-osd@1
+
+2. List the object map:
+
+ Syntax::
+
+ ceph-objectstore-tool --data-path $PATH_TO_OSD --pgid $PG_ID $OBJECT list-omap
+
+ Example::
+
+ [root@osd ~]# ceph-objectstore-tool --data-path /var/lib/ceph/osd/ceph-0 --pgid 0.1c '{"oid":"zone_info.default","key":"","snapid":-2,"hash":235010478,"max":0,"pool":11,"namespace":""}' list-omap
+
+
+Manipulating the Object Map Header
+----------------------------------
+The **ceph-objectstore-tool** utility will output the object map (OMAP) header with the values associated with the object's keys.
+
+Note: If using FileStore as the OSD backend object store, then add the `--journal-path $PATH_TO_JOURNAL` argument when getting or setting the object map header, where the `$PATH_TO_JOURNAL` variable is the absolute path to the OSD journal; for example `/var/lib/ceph/osd/ceph-0/journal`.
+
+Prerequisites
+^^^^^^^^^^^^^
+
+ * Having root access to the Ceph OSD node.
+ * Stopping the ceph-osd daemon.
+
+Procedure
+^^^^^^^^^
+
+ Verify that the target OSD is down:
+
+ Syntax::
+
+ systemctl status ceph-osd@$OSD_NUMBER
+
+ Example::
+
+ [root@osd ~]# systemctl status ceph-osd@1
+
+ Get the object map header:
+
+ Syntax::
+
+ ceph-objectstore-tool --data-path $PATH_TO_OSD --pgid $PG_ID $OBJECT get-omaphdr > $OBJECT_MAP_FILE_NAME
+
+ Example::
+
+ [root@osd ~]# ceph-objectstore-tool --data-path /var/lib/ceph/osd/ceph-0 --pgid 0.1c '{"oid":"zone_info.default","key":"","snapid":-2,"hash":235010478,"max":0,"pool":11,"namespace":""}' get-omaphdr > zone_info.default.omaphdr.txt
+
+ Set the object map header:
+
+ Syntax::
+
+ ceph-objectstore-tool --data-path $PATH_TO_OSD --pgid $PG_ID $OBJECT get-omaphdr < $OBJECT_MAP_FILE_NAME
+
+ Example::
+
+ [root@osd ~]# ceph-objectstore-tool --data-path /var/lib/ceph/osd/ceph-0 --pgid 0.1c '{"oid":"zone_info.default","key":"","snapid":-2,"hash":235010478,"max":0,"pool":11,"namespace":""}' set-omaphdr < zone_info.default.omaphdr.txt
+
+
+Manipulating the Object Map Key
+-------------------------------
+
+Use the **ceph-objectstore-tool** utility to change the object map (OMAP) key. You need to provide the data path, the placement group identifier (PG ID), the object, and the key in the OMAP.
+Note
+
+If using FileStore as the OSD backend object store, then add the `--journal-path $PATH_TO_JOURNAL` argument when getting, setting or removing the object map key, where the `$PATH_TO_JOURNAL` variable is the absolute path to the OSD journal; for example `/var/lib/ceph/osd/ceph-0/journal`.
+
+Prerequisites
+
+ * Having root access to the Ceph OSD node.
+ * Stopping the ceph-osd daemon.
+
+Procedure
+
+ Get the object map key:
+
+ Syntax::
+
+ ceph-objectstore-tool --data-path $PATH_TO_OSD --pgid $PG_ID $OBJECT get-omap $KEY > $OBJECT_MAP_FILE_NAME
+
+ Example::
+
+ [root@osd ~]# ceph-objectstore-tool --data-path /var/lib/ceph/osd/ceph-0 --pgid 0.1c '{"oid":"zone_info.default","key":"","snapid":-2,"hash":235010478,"max":0,"pool":11,"namespace":""}' get-omap "" > zone_info.default.omap.txt
+
+ Set the object map key:
+
+ Syntax::
+
+ ceph-objectstore-tool --data-path $PATH_TO_OSD --pgid $PG_ID $OBJECT set-omap $KEY < $OBJECT_MAP_FILE_NAME
+
+ Example::
+
+ [root@osd ~]# ceph-objectstore-tool --data-path /var/lib/ceph/osd/ceph-0 --pgid 0.1c '{"oid":"zone_info.default","key":"","snapid":-2,"hash":235010478,"max":0,"pool":11,"namespace":""}' set-omap "" < zone_info.default.omap.txt
+
+ Remove the object map key:
+
+ Syntax::
+
+ ceph-objectstore-tool --data-path $PATH_TO_OSD --pgid $PG_ID $OBJECT rm-omap $KEY
+
+ Example::
+
+ [root@osd ~]# ceph-objectstore-tool --data-path /var/lib/ceph/osd/ceph-0 --pgid 0.1c '{"oid":"zone_info.default","key":"","snapid":-2,"hash":235010478,"max":0,"pool":11,"namespace":""}' rm-omap ""
+
+
+Listing an Object's Attributes
+-------------------------------
+
+Use the **ceph-objectstore-tool** utility to list an object's attributes. The output provides you with the object's keys and values.
+Note
+
+If you are using FileStore as the OSD backend object store and the journal is on a different disk, you must add the `--journal-path $PATH_TO_JOURNAL` argument when listing an object's attributes, where the `$PATH_TO_JOURNAL` variable is the absolute path to the OSD journal; for example `/var/lib/ceph/osd/ceph-0/journal`.
+
+Prerequisites
+^^^^^^^^^^^^^
+
+ * Having root access to the Ceph OSD node.
+ * Stopping the ceph-osd daemon.
+
+Procedure
+^^^^^^^^^
+
+ Verify that the target OSD is down:
+
+ Syntax::
+
+ systemctl status ceph-osd@$OSD_NUMBER
+
+ Example::
+
+ [root@osd ~]# systemctl status ceph-osd@1
+
+ List the object's attributes:
+
+ Syntax::
+
+ ceph-objectstore-tool --data-path $PATH_TO_OSD --pgid $PG_ID $OBJECT list-attrs
+
+ Example::
+
+ [root@osd ~]# ceph-objectstore-tool --data-path /var/lib/ceph/osd/ceph-0 --pgid 0.1c '{"oid":"zone_info.default","key":"","snapid":-2,"hash":235010478,"max":0,"pool":11,"namespace":""}' list-attrs
+
+
+MANIPULATING THE OBJECT ATTRIBUTE KEY
+-------------------------------------
+
+Use the ceph-objectstore-tool utility to change an object's attributes. To manipulate the object's attributes you need the data and journal paths, the placement group identifier (PG ID), the object, and the key in the object's attribute.
+Note
+
+If you are using FileStore as the OSD backend object store and the journal is on a different disk, you must add the `--journal-path $PATH_TO_JOURNAL` argument when getting, setting or removing the object's attributes. Where the `$PATH_TO_JOURNAL` variable is the absolute path to the OSD journal, for example `/var/lib/ceph/osd/ceph-0/journal`.
+
+Prerequisites
+
+ * Having root access to the Ceph OSD node.
+ * Stopping the ceph-osd daemon.
+
+Procedure
+
+ Verify that the target OSD is down.
+
+ Syntax::
+
+ systemctl status ceph-osd@$OSD_NUMBER
+
+ Example::
+
+ [root@osd ~]# systemctl status ceph-osd@1
+
+ Get the object's attributes:
+
+ Syntax::
+
+ ceph-objectstore-tool --data-path $PATH_TO_OSD --pgid $PG_ID $OBJECT get-attrs $KEY > $OBJECT_ATTRS_FILE_NAME
+
+ Example::
+
+ [root@osd ~]# ceph-objectstore-tool --data-path /var/lib/ceph/osd/ceph-0 --pgid 0.1c '{"oid":"zone_info.default","key":"","snapid":-2,"hash":235010478,"max":0,"pool":11,"namespace":""}' get-attrs "oid" > zone_info.default.attr.txt
+
+ Set an object's attributes:
+
+ Syntax::
+
+ ceph-objectstore-tool --data-path $PATH_TO_OSD --pgid $PG_ID $OBJECT set-attrs $KEY < $OBJECT_ATTRS_FILE_NAME
+
+ Example::
+
+ [root@osd ~]# ceph-objectstore-tool --data-path /var/lib/ceph/osd/ceph-0 --pgid 0.1c '{"oid":"zone_info.default","key":"","snapid":-2,"hash":235010478,"max":0,"pool":11,"namespace":""}' set-attrs "oid" < zone_info.default.attr.txt
+
+ Remove an object's attributes:
+
+ Syntax::
+
+ ceph-objectstore-tool --data-path $PATH_TO_OSD --pgid $PG_ID $OBJECT rm-attrs $KEY
+
+ Example::
+
+ [root@osd ~]# ceph-objectstore-tool --data-path /var/lib/ceph/osd/ceph-0 --pgid 0.1c '{"oid":"zone_info.default","key":"","snapid":-2,"hash":235010478,"max":0,"pool":11,"namespace":""}' rm-attrs "oid"
+
+
+Options
+=======
+
+.. option:: --help
+
+ produce help message
+
+.. option:: --type arg
+
+ Arg is one of [bluestore (default), filestore, memstore]. This option is needed only if the tool can't tell the type from --data-path.
+
+.. option:: --data-path arg
+
+ path to object store, mandatory
+
+.. option:: --journal-path arg
+
+ path to journal, use if tool can't find it
+
+.. option:: --pgid arg
+
+ PG id, mandatory for info, log, remove, export, export-remove, mark-complete, trim-pg-log, and mandatory for apply-layout-settings if --pool is not specified
+
+.. option:: --pool arg
+
+ Pool name, mandatory for apply-layout-settings if --pgid is not specified
+
+.. option:: --op arg
+
+ Arg is one of [info, log, remove, mkfs, fsck, repair, fuse, dup, export, export-remove, import, list, fix-lost, list-pgs, dump-journal, dump-super, meta-list, get-osdmap, set-osdmap, get-inc-osdmap, set-inc-osdmap, mark-complete, reset-last-complete, apply-layout-settings, update-mon-db, dump-export, trim-pg-log]
+
+.. option:: --epoch arg
+
+ epoch# for get-osdmap and get-inc-osdmap, the current epoch in use if not specified
+
+.. option:: --file arg
+
+ path of file to export, export-remove, import, get-osdmap, set-osdmap, get-inc-osdmap or set-inc-osdmap
+
+.. option:: --mon-store-path arg
+
+ path of monstore to update-mon-db
+
+.. option:: --fsid arg
+
+ fsid for new store created by mkfs
+
+.. option:: --target-data-path arg
+
+ path of target object store (for --op dup)
+
+.. option:: --mountpoint arg
+
+ fuse mountpoint
+
+.. option:: --format arg (=json-pretty)
+
+ Output format which may be json, json-pretty, xml, xml-pretty
+
+.. option:: --debug
+
+ Enable diagnostic output to stderr
+
+.. option:: --force
+
+ Ignore some types of errors and proceed with operation - USE WITH CAUTION: CORRUPTION POSSIBLE NOW OR IN THE FUTURE
+
+.. option:: --skip-journal-replay
+
+ Disable journal replay
+
+.. option:: --skip-mount-omap
+
+ Disable mounting of omap
+
+.. option:: --head
+
+ Find head/snapdir when searching for objects by name
+
+.. option:: --dry-run
+
+ Don't modify the objectstore
+
+.. option:: --namespace arg
+
+ Specify namespace when searching for objects
+
+.. option:: --rmtype arg
+
+ Specify corrupting object removal 'snapmap' or 'nosnapmap' - TESTING USE ONLY
+
+
+
+Error Codes
+===========
+"Mount failed with '(11) Resource temporarily unavailable" - This might mean that you have attempted to run **ceph-objectstore-tool** on a running OSD.
+
+Availability
+============
+
+**ceph-objectstore-tool** is part of Ceph, a massively scalable, open-source, distributed storage system. **ceph-objectstore-tool** is provided by the package `ceph-osd`. Refer to the Ceph documentation at htpp://ceph.com/docs for more information.
diff --git a/doc/man/8/ceph-osd.rst b/doc/man/8/ceph-osd.rst
new file mode 100644
index 000000000..2e9424b11
--- /dev/null
+++ b/doc/man/8/ceph-osd.rst
@@ -0,0 +1,140 @@
+:orphan:
+
+.. _ceph_osd-daemon:
+
+========================================
+ ceph-osd -- ceph object storage daemon
+========================================
+
+.. program:: ceph-osd
+
+Synopsis
+========
+
+| **ceph-osd** -i *osdnum* [ --osd-data *datapath* ] [ --osd-journal
+ *journal* ] [ --mkfs ] [ --mkjournal ] [--flush-journal] [--check-allows-journal] [--check-wants-journal] [--check-needs-journal] [ --mkkey ] [ --osdspec-affinity ]
+
+
+Description
+===========
+
+**ceph-osd** is the object storage daemon for the Ceph distributed file
+system. It is responsible for storing objects on a local file system
+and providing access to them over the network.
+
+The datapath argument should be a directory on a xfs file system
+where the object data resides. The journal is optional, and is only
+useful performance-wise when it resides on a different disk than
+datapath with low latency (ideally, an NVRAM device).
+
+
+Options
+=======
+
+.. option:: -f, --foreground
+
+ Foreground: do not daemonize after startup (run in foreground). Do
+ not generate a pid file. Useful when run via :doc:`ceph-run <ceph-run>`\(8).
+
+.. option:: -d
+
+ Debug mode: like ``-f``, but also send all log output to stderr.
+
+.. option:: --setuser userorgid
+
+ Set uid after starting. If a username is specified, the user
+ record is looked up to get a uid and a gid, and the gid is also set
+ as well, unless --setgroup is also specified.
+
+.. option:: --setgroup grouporgid
+
+ Set gid after starting. If a group name is specified the group
+ record is looked up to get a gid.
+
+.. option:: --osd-data osddata
+
+ Use object store at *osddata*.
+
+.. option:: --osd-journal journal
+
+ Journal updates to *journal*.
+
+.. option:: --check-wants-journal
+
+ Check whether a journal is desired.
+
+.. option:: --check-allows-journal
+
+ Check whether a journal is allowed.
+
+.. option:: --check-needs-journal
+
+ Check whether a journal is required.
+
+.. option:: --mkfs
+
+ Create an empty object repository. This also initializes the journal
+ (if one is defined).
+
+.. option:: --mkkey
+
+ Generate a new secret key. This is normally used in combination
+ with ``--mkfs`` as it is more convenient than generating a key by
+ hand with :doc:`ceph-authtool <ceph-authtool>`\(8).
+
+.. option:: --mkjournal
+
+ Create a new journal file to match an existing object repository.
+ This is useful if the journal device or file is wiped out due to a
+ disk or file system failure.
+
+.. option:: --flush-journal
+
+ Flush the journal to permanent store. This runs in the foreground
+ so you know when it's completed. This can be useful if you want to
+ resize the journal or need to otherwise destroy it: this guarantees
+ you won't lose data.
+
+.. option:: --get-cluster-fsid
+
+ Print the cluster fsid (uuid) and exit.
+
+.. option:: --get-osd-fsid
+
+ Print the OSD's fsid and exit. The OSD's uuid is generated at
+ --mkfs time and is thus unique to a particular instantiation of
+ this OSD.
+
+.. option:: --get-journal-fsid
+
+ Print the journal's uuid. The journal fsid is set to match the OSD
+ fsid at --mkfs time.
+
+.. option:: -c ceph.conf, --conf=ceph.conf
+
+ Use *ceph.conf* configuration file instead of the default
+ ``/etc/ceph/ceph.conf`` for runtime configuration options.
+
+.. option:: -m monaddress[:port]
+
+ Connect to specified monitor (instead of looking through
+ ``ceph.conf``).
+
+.. option:: --osdspec-affinity
+
+ Set an affinity to a certain OSDSpec.
+ This option can only be used in conjunction with --mkfs.
+
+Availability
+============
+
+**ceph-osd** is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer to
+the Ceph documentation at http://ceph.com/docs for more information.
+
+See also
+========
+
+:doc:`ceph <ceph>`\(8),
+:doc:`ceph-mds <ceph-mds>`\(8),
+:doc:`ceph-mon <ceph-mon>`\(8),
+:doc:`ceph-authtool <ceph-authtool>`\(8)
diff --git a/doc/man/8/ceph-post-file.rst b/doc/man/8/ceph-post-file.rst
new file mode 100644
index 000000000..7e4899f5a
--- /dev/null
+++ b/doc/man/8/ceph-post-file.rst
@@ -0,0 +1,71 @@
+:orphan:
+
+==================================================
+ ceph-post-file -- post files for ceph developers
+==================================================
+
+.. program:: ceph-post-file
+
+Synopsis
+========
+
+| **ceph-post-file** [-d *description] [-u *user*] *file or dir* ...
+
+
+Description
+===========
+
+**ceph-post-file** will upload files or directories to ceph.com for
+later analysis by Ceph developers.
+
+Each invocation uploads files or directories to a separate directory
+with a unique tag. That tag can be passed to a developer or
+referenced in a bug report (http://tracker.ceph.com/). Once the
+upload completes, the directory is marked non-readable and
+non-writeable to prevent access or modification by other users.
+
+Warning
+=======
+
+Basic measures are taken to make posted data be visible only to
+developers with access to ceph.com infrastructure. However, users
+should think twice and/or take appropriate precautions before
+posting potentially sensitive data (for example, logs or data
+directories that contain Ceph secrets).
+
+
+Options
+=======
+
+.. option:: -d *description*, --description *description*
+
+ Add a short description for the upload. This is a good opportunity
+ to reference a bug number. There is no default value.
+
+.. option:: -u *user*
+
+ Set the user metadata for the upload. This defaults to `whoami`@`hostname -f`.
+
+Examples
+========
+
+To upload a single log::
+
+ ceph-post-file /var/log/ceph/ceph-mon.`hostname`.log
+
+To upload several directories::
+
+ ceph-post-file -d 'mon data directories' /var/log/ceph/mon/*
+
+
+Availability
+============
+
+**ceph-post-file** is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer to
+the Ceph documentation at http://ceph.com/docs for more information.
+
+See also
+========
+
+:doc:`ceph <ceph>`\(8),
+:doc:`ceph-debugpack <ceph-debugpack>`\(8),
diff --git a/doc/man/8/ceph-rbdnamer.rst b/doc/man/8/ceph-rbdnamer.rst
new file mode 100644
index 000000000..d31f61658
--- /dev/null
+++ b/doc/man/8/ceph-rbdnamer.rst
@@ -0,0 +1,36 @@
+:orphan:
+
+==================================================
+ ceph-rbdnamer -- udev helper to name RBD devices
+==================================================
+
+.. program:: ceph-rbdnamer
+
+
+Synopsis
+========
+
+| **ceph-rbdnamer** *num*
+
+
+Description
+===========
+
+**ceph-rbdnamer** prints the pool, namespace, image and snapshot names
+for a given RBD device to stdout. It is used by `udev` device manager
+to set up RBD device symlinks. The appropriate `udev` rules are
+provided in a file named `50-rbd.rules`.
+
+Availability
+============
+
+**ceph-rbdnamer** is part of Ceph, a massively scalable, open-source, distributed storage system. Please
+refer to the Ceph documentation at http://ceph.com/docs for more
+information.
+
+
+See also
+========
+
+:doc:`rbd <rbd>`\(8),
+:doc:`ceph <ceph>`\(8)
diff --git a/doc/man/8/ceph-run.rst b/doc/man/8/ceph-run.rst
new file mode 100644
index 000000000..ed76c2848
--- /dev/null
+++ b/doc/man/8/ceph-run.rst
@@ -0,0 +1,45 @@
+:orphan:
+
+=========================================
+ ceph-run -- restart daemon on core dump
+=========================================
+
+.. program:: ceph-run
+
+Synopsis
+========
+
+| **ceph-run** *command* ...
+
+
+Description
+===========
+
+**ceph-run** is a simple wrapper that will restart a daemon if it exits
+with a signal indicating it crashed and possibly core dumped (that is,
+signals 3, 4, 5, 6, 8, or 11).
+
+The command should run the daemon in the foreground. For Ceph daemons,
+that means the ``-f`` option.
+
+
+Options
+=======
+
+None
+
+
+Availability
+============
+
+**ceph-run** is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer to
+the Ceph documentation at http://ceph.com/docs for more information.
+
+
+See also
+========
+
+:doc:`ceph <ceph>`\(8),
+:doc:`ceph-mon <ceph-mon>`\(8),
+:doc:`ceph-mds <ceph-mds>`\(8),
+:doc:`ceph-osd <ceph-osd>`\(8)
diff --git a/doc/man/8/ceph-syn.rst b/doc/man/8/ceph-syn.rst
new file mode 100644
index 000000000..a30c460cb
--- /dev/null
+++ b/doc/man/8/ceph-syn.rst
@@ -0,0 +1,99 @@
+:orphan:
+
+===============================================
+ ceph-syn -- ceph synthetic workload generator
+===============================================
+
+.. program:: ceph-syn
+
+Synopsis
+========
+
+| **ceph-syn** [ -m *monaddr*:*port* ] --syn *command* *...*
+
+
+Description
+===========
+
+**ceph-syn** is a simple synthetic workload generator for the Ceph
+distributed file system. It uses the userspace client library to
+generate simple workloads against a currently running file system. The
+file system need not be mounted via ceph-fuse(8) or the kernel client.
+
+One or more ``--syn`` command arguments specify the particular
+workload, as documented below.
+
+
+Options
+=======
+
+.. option:: -d
+
+ Detach from console and daemonize after startup.
+
+.. option:: -c ceph.conf, --conf=ceph.conf
+
+ Use *ceph.conf* configuration file instead of the default
+ ``/etc/ceph/ceph.conf`` to determine monitor addresses during
+ startup.
+
+.. option:: -m monaddress[:port]
+
+ Connect to specified monitor (instead of looking through
+ ``ceph.conf``).
+
+.. option:: --num_client num
+
+ Run num different clients, each in a separate thread.
+
+.. option:: --syn workloadspec
+
+ Run the given workload. May be specified as many times as
+ needed. Workloads will normally run sequentially.
+
+
+Workloads
+=========
+
+Each workload should be preceded by ``--syn`` on the command
+line. This is not a complete list.
+
+:command:`mknap` *path* *snapname*
+ Create a snapshot called *snapname* on *path*.
+
+:command:`rmsnap` *path* *snapname*
+ Delete snapshot called *snapname* on *path*.
+
+:command:`rmfile` *path*
+ Delete/unlink *path*.
+
+:command:`writefile` *sizeinmb* *blocksize*
+ Create a file, named after our client id, that is *sizeinmb* MB by
+ writing *blocksize* chunks.
+
+:command:`readfile` *sizeinmb* *blocksize*
+ Read file, named after our client id, that is *sizeinmb* MB by
+ writing *blocksize* chunks.
+
+:command:`rw` *sizeinmb* *blocksize*
+ Write file, then read it back, as above.
+
+:command:`makedirs` *numsubdirs* *numfiles* *depth*
+ Create a hierarchy of directories that is *depth* levels deep. Give
+ each directory *numsubdirs* subdirectories and *numfiles* files.
+
+:command:`walk`
+ Recursively walk the file system (like find).
+
+
+Availability
+============
+
+**ceph-syn** is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer to
+the Ceph documentation at http://ceph.com/docs for more information.
+
+See also
+========
+
+:doc:`ceph <ceph>`\(8),
+:doc:`ceph-fuse <ceph-fuse>`\(8)
diff --git a/doc/man/8/ceph-volume-systemd.rst b/doc/man/8/ceph-volume-systemd.rst
new file mode 100644
index 000000000..b8578796c
--- /dev/null
+++ b/doc/man/8/ceph-volume-systemd.rst
@@ -0,0 +1,55 @@
+:orphan:
+
+=======================================================
+ ceph-volume-systemd -- systemd ceph-volume helper tool
+=======================================================
+
+.. program:: ceph-volume-systemd
+
+Synopsis
+========
+
+| **ceph-volume-systemd** *systemd instance name*
+
+
+Description
+===========
+:program:`ceph-volume-systemd` is a systemd helper tool that receives input
+from (dynamically created) systemd units so that activation of OSDs can
+proceed.
+
+It translates the input into a system call to ceph-volume for activation
+purposes only.
+
+
+Examples
+========
+Its input is the ``systemd instance name`` (represented by ``%i`` in a systemd
+unit), and it should be in the following format::
+
+ <ceph-volume subcommand>-<extra metadata>
+
+In the case of ``lvm`` a call could look like::
+
+ /usr/bin/ceph-volume-systemd lvm-0-8715BEB4-15C5-49DE-BA6F-401086EC7B41
+
+Which in turn will call ``ceph-volume`` in the following way::
+
+ ceph-volume lvm trigger 0-8715BEB4-15C5-49DE-BA6F-401086EC7B41
+
+Any other subcommand will need to have implemented a ``trigger`` command that
+can consume the extra metadata in this format.
+
+
+Availability
+============
+
+:program:`ceph-volume-systemd` is part of Ceph, a massively scalable,
+open-source, distributed storage system. Please refer to the documentation at
+http://docs.ceph.com/ for more information.
+
+
+See also
+========
+
+:doc:`ceph-osd <ceph-osd>`\(8),
diff --git a/doc/man/8/ceph-volume.rst b/doc/man/8/ceph-volume.rst
new file mode 100644
index 000000000..5a28695ae
--- /dev/null
+++ b/doc/man/8/ceph-volume.rst
@@ -0,0 +1,425 @@
+:orphan:
+
+=======================================================
+ ceph-volume -- Ceph OSD deployment and inspection tool
+=======================================================
+
+.. program:: ceph-volume
+
+Synopsis
+========
+
+| **ceph-volume** [-h] [--cluster CLUSTER] [--log-level LOG_LEVEL]
+| [--log-path LOG_PATH]
+
+| **ceph-volume** **inventory**
+
+| **ceph-volume** **lvm** [ *trigger* | *create* | *activate* | *prepare*
+| *zap* | *list* | *batch* | *new-wal* | *new-db* | *migrate* ]
+
+| **ceph-volume** **simple** [ *trigger* | *scan* | *activate* ]
+
+
+Description
+===========
+
+:program:`ceph-volume` is a single purpose command line tool to deploy logical
+volumes as OSDs, trying to maintain a similar API to ``ceph-disk`` when
+preparing, activating, and creating OSDs.
+
+It deviates from ``ceph-disk`` by not interacting or relying on the udev rules
+that come installed for Ceph. These rules allow automatic detection of
+previously setup devices that are in turn fed into ``ceph-disk`` to activate
+them.
+
+
+Commands
+========
+
+inventory
+---------
+
+This subcommand provides information about a host's physical disc inventory and
+reports metadata about these discs. Among this metadata one can find disc
+specific data items (like model, size, rotational or solid state) as well as
+data items specific to ceph using a device, such as if it is available for
+use with ceph or if logical volumes are present.
+
+Examples::
+
+ ceph-volume inventory
+ ceph-volume inventory /dev/sda
+ ceph-volume inventory --format json-pretty
+
+Optional arguments:
+
+* [-h, --help] show the help message and exit
+* [--format] report format, valid values are ``plain`` (default),
+ ``json`` and ``json-pretty``
+
+lvm
+---
+
+By making use of LVM tags, the ``lvm`` sub-command is able to store and later
+re-discover and query devices associated with OSDs so that they can later
+activated.
+
+Subcommands:
+
+**batch**
+Creates OSDs from a list of devices using a ``filestore``
+or ``bluestore`` (default) setup. It will create all necessary volume groups
+and logical volumes required to have a working OSD.
+
+Example usage with three devices::
+
+ ceph-volume lvm batch --bluestore /dev/sda /dev/sdb /dev/sdc
+
+Optional arguments:
+
+* [-h, --help] show the help message and exit
+* [--bluestore] Use the bluestore objectstore (default)
+* [--filestore] Use the filestore objectstore
+* [--yes] Skip the report and prompt to continue provisioning
+* [--prepare] Only prepare OSDs, do not activate
+* [--dmcrypt] Enable encryption for the underlying OSD devices
+* [--crush-device-class] Define a CRUSH device class to assign the OSD to
+* [--no-systemd] Do not enable or create any systemd units
+* [--osds-per-device] Provision more than 1 (the default) OSD per device
+* [--report] Report what the potential outcome would be for the current input (requires devices to be passed in)
+* [--format] Output format when reporting (used along with --report), can be one of 'pretty' (default) or 'json'
+* [--block-db-size] Set (or override) the "bluestore_block_db_size" value, in bytes
+* [--journal-size] Override the "osd_journal_size" value, in megabytes
+
+Required positional arguments:
+
+* <DEVICE> Full path to a raw device, like ``/dev/sda``. Multiple
+ ``<DEVICE>`` paths can be passed in.
+
+
+**activate**
+Enables a systemd unit that persists the OSD ID and its UUID (also called
+``fsid`` in Ceph CLI tools), so that at boot time it can understand what OSD is
+enabled and needs to be mounted.
+
+Usage::
+
+ ceph-volume lvm activate --bluestore <osd id> <osd fsid>
+
+Optional Arguments:
+
+* [-h, --help] show the help message and exit
+* [--auto-detect-objectstore] Automatically detect the objectstore by inspecting
+ the OSD
+* [--bluestore] bluestore objectstore (default)
+* [--filestore] filestore objectstore
+* [--all] Activate all OSDs found in the system
+* [--no-systemd] Skip creating and enabling systemd units and starting of OSD
+ services
+
+Multiple OSDs can be activated at once by using the (idempotent) ``--all`` flag::
+
+ ceph-volume lvm activate --all
+
+
+**prepare**
+Prepares a logical volume to be used as an OSD and journal using a ``filestore``
+or ``bluestore`` (default) setup. It will not create or modify the logical volumes
+except for adding extra metadata.
+
+Usage::
+
+ ceph-volume lvm prepare --filestore --data <data lv> --journal <journal device>
+
+Optional arguments:
+
+* [-h, --help] show the help message and exit
+* [--journal JOURNAL] A logical group name, path to a logical volume, or path to a device
+* [--bluestore] Use the bluestore objectstore (default)
+* [--block.wal] Path to a bluestore block.wal logical volume or partition
+* [--block.db] Path to a bluestore block.db logical volume or partition
+* [--filestore] Use the filestore objectstore
+* [--dmcrypt] Enable encryption for the underlying OSD devices
+* [--osd-id OSD_ID] Reuse an existing OSD id
+* [--osd-fsid OSD_FSID] Reuse an existing OSD fsid
+* [--crush-device-class] Define a CRUSH device class to assign the OSD to
+
+Required arguments:
+
+* --data A logical group name or a path to a logical volume
+
+For encrypting an OSD, the ``--dmcrypt`` flag must be added when preparing
+(also supported in the ``create`` sub-command).
+
+
+**create**
+Wraps the two-step process to provision a new osd (calling ``prepare`` first
+and then ``activate``) into a single one. The reason to prefer ``prepare`` and
+then ``activate`` is to gradually introduce new OSDs into a cluster, and
+avoiding large amounts of data being rebalanced.
+
+The single-call process unifies exactly what ``prepare`` and ``activate`` do,
+with the convenience of doing it all at once. Flags and general usage are
+equivalent to those of the ``prepare`` and ``activate`` subcommand.
+
+**trigger**
+This subcommand is not meant to be used directly, and it is used by systemd so
+that it proxies input to ``ceph-volume lvm activate`` by parsing the
+input from systemd, detecting the UUID and ID associated with an OSD.
+
+Usage::
+
+ ceph-volume lvm trigger <SYSTEMD-DATA>
+
+The systemd "data" is expected to be in the format of::
+
+ <OSD ID>-<OSD UUID>
+
+The lvs associated with the OSD need to have been prepared previously,
+so that all needed tags and metadata exist.
+
+Positional arguments:
+
+* <SYSTEMD_DATA> Data from a systemd unit containing ID and UUID of the OSD.
+
+**list**
+List devices or logical volumes associated with Ceph. An association is
+determined if a device has information relating to an OSD. This is
+verified by querying LVM's metadata and correlating it with devices.
+
+The lvs associated with the OSD need to have been prepared previously by
+ceph-volume so that all needed tags and metadata exist.
+
+Usage::
+
+ ceph-volume lvm list
+
+List a particular device, reporting all metadata about it::
+
+ ceph-volume lvm list /dev/sda1
+
+List a logical volume, along with all its metadata (vg is a volume
+group, and lv the logical volume name)::
+
+ ceph-volume lvm list {vg/lv}
+
+Positional arguments:
+
+* <DEVICE> Either in the form of ``vg/lv`` for logical volumes,
+ ``/path/to/sda1`` or ``/path/to/sda`` for regular devices.
+
+
+**zap**
+Zaps the given logical volume or partition. If given a path to a logical
+volume it must be in the format of vg/lv. Any file systems present
+on the given lv or partition will be removed and all data will be purged.
+
+However, the lv or partition will be kept intact.
+
+Usage, for logical volumes::
+
+ ceph-volume lvm zap {vg/lv}
+
+Usage, for logical partitions::
+
+ ceph-volume lvm zap /dev/sdc1
+
+For full removal of the device use the ``--destroy`` flag (allowed for all
+device types)::
+
+ ceph-volume lvm zap --destroy /dev/sdc1
+
+Multiple devices can be removed by specifying the OSD ID and/or the OSD FSID::
+
+ ceph-volume lvm zap --destroy --osd-id 1
+ ceph-volume lvm zap --destroy --osd-id 1 --osd-fsid C9605912-8395-4D76-AFC0-7DFDAC315D59
+
+
+Positional arguments:
+
+* <DEVICE> Either in the form of ``vg/lv`` for logical volumes,
+ ``/path/to/sda1`` or ``/path/to/sda`` for regular devices.
+
+
+new-wal
+^^^^^^^
+
+Attaches the given logical volume to OSD as a WAL. Logical volume
+name format is vg/lv. Fails if OSD has already got attached WAL.
+
+Usage::
+
+ ceph-volume lvm new-wal --osd-id OSD_ID --osd-fsid OSD_FSID --target <target lv>
+
+Optional arguments:
+
+.. option:: -h, --help
+
+ show the help message and exit
+
+.. option:: --no-systemd
+
+ Skip checking OSD systemd unit
+
+Required arguments:
+
+.. option:: --target
+
+ logical volume name to attach as WAL
+
+new-db
+^^^^^^
+
+Attaches the given logical volume to OSD as a DB. Logical volume
+name format is vg/lv. Fails if OSD has already got attached DB.
+
+Usage::
+
+ ceph-volume lvm new-db --osd-id OSD_ID --osd-fsid OSD_FSID --target <target lv>
+
+Optional arguments:
+
+.. option:: -h, --help
+
+ show the help message and exit
+
+.. option:: --no-systemd
+
+ Skip checking OSD systemd unit
+
+Required arguments:
+
+.. option:: --target
+
+ logical volume name to attach as DB
+
+migrate
+^^^^^^^
+
+Moves BlueFS data from source volume(s) to the target one, source volumes
+(except the main, i.e. data or block one) are removed on success. LVM volumes
+are permitted for Target only, both already attached or new one. In the latter
+case it is attached to the OSD replacing one of the source devices. Following
+replacement rules apply (in the order of precedence, stop on the first match):
+
+ - if source list has DB volume - target device replaces it.
+ - if source list has WAL volume - target device replace it.
+ - if source list has slow volume only - operation is not permitted,
+ requires explicit allocation via new-db/new-wal command.
+
+Usage::
+
+ ceph-volume lvm migrate --osd-id OSD_ID --osd-fsid OSD_FSID --target <target lv> --from {data|db|wal} [{data|db|wal} ...]
+
+Optional arguments:
+
+.. option:: -h, --help
+
+ show the help message and exit
+
+.. option:: --no-systemd
+
+ Skip checking OSD systemd unit
+
+Required arguments:
+
+.. option:: --from
+
+ list of source device type names
+
+.. option:: --target
+
+ logical volume to move data to
+
+simple
+------
+
+Scan legacy OSD directories or data devices that may have been created by
+ceph-disk, or manually.
+
+Subcommands:
+
+**activate**
+Enables a systemd unit that persists the OSD ID and its UUID (also called
+``fsid`` in Ceph CLI tools), so that at boot time it can understand what OSD is
+enabled and needs to be mounted, while reading information that was previously
+created and persisted at ``/etc/ceph/osd/`` in JSON format.
+
+Usage::
+
+ ceph-volume simple activate --bluestore <osd id> <osd fsid>
+
+Optional Arguments:
+
+* [-h, --help] show the help message and exit
+* [--bluestore] bluestore objectstore (default)
+* [--filestore] filestore objectstore
+
+Note: It requires a matching JSON file with the following format::
+
+ /etc/ceph/osd/<osd id>-<osd fsid>.json
+
+
+**scan**
+Scan a running OSD or data device for an OSD for metadata that can later be
+used to activate and manage the OSD with ceph-volume. The scan method will
+create a JSON file with the required information plus anything found in the OSD
+directory as well.
+
+Optionally, the JSON blob can be sent to stdout for further inspection.
+
+Usage on all running OSDs::
+
+ ceph-volume simple scan
+
+Usage on data devices::
+
+ ceph-volume simple scan <data device>
+
+Running OSD directories::
+
+ ceph-volume simple scan <path to osd dir>
+
+
+Optional arguments:
+
+* [-h, --help] show the help message and exit
+* [--stdout] Send the JSON blob to stdout
+* [--force] If the JSON file exists at destination, overwrite it
+
+Optional Positional arguments:
+
+* <DATA DEVICE or OSD DIR> Actual data partition or a path to the running OSD
+
+**trigger**
+This subcommand is not meant to be used directly, and it is used by systemd so
+that it proxies input to ``ceph-volume simple activate`` by parsing the
+input from systemd, detecting the UUID and ID associated with an OSD.
+
+Usage::
+
+ ceph-volume simple trigger <SYSTEMD-DATA>
+
+The systemd "data" is expected to be in the format of::
+
+ <OSD ID>-<OSD UUID>
+
+The JSON file associated with the OSD need to have been persisted previously by
+a scan (or manually), so that all needed metadata can be used.
+
+Positional arguments:
+
+* <SYSTEMD_DATA> Data from a systemd unit containing ID and UUID of the OSD.
+
+
+Availability
+============
+
+:program:`ceph-volume` is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer to
+the documentation at http://docs.ceph.com/ for more information.
+
+
+See also
+========
+
+:doc:`ceph-osd <ceph-osd>`\(8),
diff --git a/doc/man/8/ceph.rst b/doc/man/8/ceph.rst
new file mode 100644
index 000000000..7fefe58d6
--- /dev/null
+++ b/doc/man/8/ceph.rst
@@ -0,0 +1,1649 @@
+:orphan:
+
+==================================
+ ceph -- ceph administration tool
+==================================
+
+.. program:: ceph
+
+Synopsis
+========
+
+| **ceph** **auth** [ *add* \| *caps* \| *del* \| *export* \| *get* \| *get-key* \| *get-or-create* \| *get-or-create-key* \| *import* \| *list* \| *print-key* \| *print_key* ] ...
+
+| **ceph** **compact**
+
+| **ceph** **config** [ *dump* | *ls* | *help* | *get* | *show* | *show-with-defaults* | *set* | *rm* | *log* | *reset* | *assimilate-conf* | *generate-minimal-conf* ] ...
+
+| **ceph** **config-key** [ *rm* | *exists* | *get* | *ls* | *dump* | *set* ] ...
+
+| **ceph** **daemon** *<name>* \| *<path>* *<command>* ...
+
+| **ceph** **daemonperf** *<name>* \| *<path>* [ *interval* [ *count* ] ]
+
+| **ceph** **df** *{detail}*
+
+| **ceph** **fs** [ *ls* \| *new* \| *reset* \| *rm* \| *authorize* ] ...
+
+| **ceph** **fsid**
+
+| **ceph** **health** *{detail}*
+
+| **ceph** **injectargs** *<injectedargs>* [ *<injectedargs>*... ]
+
+| **ceph** **log** *<logtext>* [ *<logtext>*... ]
+
+| **ceph** **mds** [ *compat* \| *fail* \| *rm* \| *rmfailed* \| *set_state* \| *stat* \| *repaired* ] ...
+
+| **ceph** **mon** [ *add* \| *dump* \| *getmap* \| *remove* \| *stat* ] ...
+
+| **ceph** **osd** [ *blocklist* \| *blocked-by* \| *create* \| *new* \| *deep-scrub* \| *df* \| *down* \| *dump* \| *erasure-code-profile* \| *find* \| *getcrushmap* \| *getmap* \| *getmaxosd* \| *in* \| *ls* \| *lspools* \| *map* \| *metadata* \| *ok-to-stop* \| *out* \| *pause* \| *perf* \| *pg-temp* \| *force-create-pg* \| *primary-affinity* \| *primary-temp* \| *repair* \| *reweight* \| *reweight-by-pg* \| *rm* \| *destroy* \| *purge* \| *safe-to-destroy* \| *scrub* \| *set* \| *setcrushmap* \| *setmaxosd* \| *stat* \| *tree* \| *unpause* \| *unset* ] ...
+
+| **ceph** **osd** **crush** [ *add* \| *add-bucket* \| *create-or-move* \| *dump* \| *get-tunable* \| *link* \| *move* \| *remove* \| *rename-bucket* \| *reweight* \| *reweight-all* \| *reweight-subtree* \| *rm* \| *rule* \| *set* \| *set-tunable* \| *show-tunables* \| *tunables* \| *unlink* ] ...
+
+| **ceph** **osd** **pool** [ *create* \| *delete* \| *get* \| *get-quota* \| *ls* \| *mksnap* \| *rename* \| *rmsnap* \| *set* \| *set-quota* \| *stats* ] ...
+
+| **ceph** **osd** **pool** **application** [ *disable* \| *enable* \| *get* \| *rm* \| *set* ] ...
+
+| **ceph** **osd** **tier** [ *add* \| *add-cache* \| *cache-mode* \| *remove* \| *remove-overlay* \| *set-overlay* ] ...
+
+| **ceph** **pg** [ *debug* \| *deep-scrub* \| *dump* \| *dump_json* \| *dump_pools_json* \| *dump_stuck* \| *getmap* \| *ls* \| *ls-by-osd* \| *ls-by-pool* \| *ls-by-primary* \| *map* \| *repair* \| *scrub* \| *stat* ] ...
+
+| **ceph** **quorum_status**
+
+| **ceph** **report** { *<tags>* [ *<tags>...* ] }
+
+| **ceph** **status**
+
+| **ceph** **sync** **force** {--yes-i-really-mean-it} {--i-know-what-i-am-doing}
+
+| **ceph** **tell** *<name (type.id)> <command> [options...]*
+
+| **ceph** **version**
+
+Description
+===========
+
+:program:`ceph` is a control utility which is used for manual deployment and maintenance
+of a Ceph cluster. It provides a diverse set of commands that allows deployment of
+monitors, OSDs, placement groups, MDS and overall maintenance, administration
+of the cluster.
+
+Commands
+========
+
+auth
+----
+
+Manage authentication keys. It is used for adding, removing, exporting
+or updating of authentication keys for a particular entity such as a monitor or
+OSD. It uses some additional subcommands.
+
+Subcommand ``add`` adds authentication info for a particular entity from input
+file, or random key if no input is given and/or any caps specified in the command.
+
+Usage::
+
+ ceph auth add <entity> {<caps> [<caps>...]}
+
+Subcommand ``caps`` updates caps for **name** from caps specified in the command.
+
+Usage::
+
+ ceph auth caps <entity> <caps> [<caps>...]
+
+Subcommand ``del`` deletes all caps for ``name``.
+
+Usage::
+
+ ceph auth del <entity>
+
+Subcommand ``export`` writes keyring for requested entity, or master keyring if
+none given.
+
+Usage::
+
+ ceph auth export {<entity>}
+
+Subcommand ``get`` writes keyring file with requested key.
+
+Usage::
+
+ ceph auth get <entity>
+
+Subcommand ``get-key`` displays requested key.
+
+Usage::
+
+ ceph auth get-key <entity>
+
+Subcommand ``get-or-create`` adds authentication info for a particular entity
+from input file, or random key if no input given and/or any caps specified in the
+command.
+
+Usage::
+
+ ceph auth get-or-create <entity> {<caps> [<caps>...]}
+
+Subcommand ``get-or-create-key`` gets or adds key for ``name`` from system/caps
+pairs specified in the command. If key already exists, any given caps must match
+the existing caps for that key.
+
+Usage::
+
+ ceph auth get-or-create-key <entity> {<caps> [<caps>...]}
+
+Subcommand ``import`` reads keyring from input file.
+
+Usage::
+
+ ceph auth import
+
+Subcommand ``ls`` lists authentication state.
+
+Usage::
+
+ ceph auth ls
+
+Subcommand ``print-key`` displays requested key.
+
+Usage::
+
+ ceph auth print-key <entity>
+
+Subcommand ``print_key`` displays requested key.
+
+Usage::
+
+ ceph auth print_key <entity>
+
+
+compact
+-------
+
+Causes compaction of monitor's leveldb storage.
+
+Usage::
+
+ ceph compact
+
+
+config
+------
+
+Configure the cluster. By default, Ceph daemons and clients retrieve their
+configuration options from monitor when they start, and are updated if any of
+the tracked options is changed at run time. It uses following additional
+subcommand.
+
+Subcommand ``dump`` to dump all options for the cluster
+
+Usage::
+
+ ceph config dump
+
+Subcommand ``ls`` to list all option names for the cluster
+
+Usage::
+
+ ceph config ls
+
+Subcommand ``help`` to describe the specified configuration option
+
+Usage::
+
+ ceph config help <option>
+
+Subcommand ``get`` to dump the option(s) for the specified entity.
+
+Usage::
+
+ ceph config get <who> {<option>}
+
+Subcommand ``show`` to display the running configuration of the specified
+entity. Please note, unlike ``get``, which only shows the options managed
+by monitor, ``show`` displays all the configurations being actively used.
+These options are pulled from several sources, for instance, the compiled-in
+default value, the monitor's configuration database, ``ceph.conf`` file on
+the host. The options can even be overridden at runtime. So, there is chance
+that the configuration options in the output of ``show`` could be different
+from those in the output of ``get``.
+
+Usage::
+
+ ceph config show {<who>}
+
+Subcommand ``show-with-defaults`` to display the running configuration along with the compiled-in defaults of the specified entity
+
+Usage::
+
+ ceph config show {<who>}
+
+Subcommand ``set`` to set an option for one or more specified entities
+
+Usage::
+
+ ceph config set <who> <option> <value> {--force}
+
+Subcommand ``rm`` to clear an option for one or more entities
+
+Usage::
+
+ ceph config rm <who> <option>
+
+Subcommand ``log`` to show recent history of config changes. If `count` option
+is omitted it defeaults to 10.
+
+Usage::
+
+ ceph config log {<count>}
+
+Subcommand ``reset`` to revert configuration to the specified historical version
+
+Usage::
+
+ ceph config reset <version>
+
+
+Subcommand ``assimilate-conf`` to assimilate options from stdin, and return a
+new, minimal conf file
+
+Usage::
+
+ ceph config assimilate-conf -i <input-config-path> > <output-config-path>
+ ceph config assimilate-conf < <input-config-path>
+
+Subcommand ``generate-minimal-conf`` to generate a minimal ``ceph.conf`` file,
+which can be used for bootstrapping a daemon or a client.
+
+Usage::
+
+ ceph config generate-minimal-conf > <minimal-config-path>
+
+
+config-key
+----------
+
+Manage configuration key. Config-key is a general purpose key/value service
+offered by the monitors. This service is mainly used by Ceph tools and daemons
+for persisting various settings. Among which, ceph-mgr modules uses it for
+storing their options. It uses some additional subcommands.
+
+Subcommand ``rm`` deletes configuration key.
+
+Usage::
+
+ ceph config-key rm <key>
+
+Subcommand ``exists`` checks for configuration keys existence.
+
+Usage::
+
+ ceph config-key exists <key>
+
+Subcommand ``get`` gets the configuration key.
+
+Usage::
+
+ ceph config-key get <key>
+
+Subcommand ``ls`` lists configuration keys.
+
+Usage::
+
+ ceph config-key ls
+
+Subcommand ``dump`` dumps configuration keys and values.
+
+Usage::
+
+ ceph config-key dump
+
+Subcommand ``set`` puts configuration key and value.
+
+Usage::
+
+ ceph config-key set <key> {<val>}
+
+
+daemon
+------
+
+Submit admin-socket commands.
+
+Usage::
+
+ ceph daemon {daemon_name|socket_path} {command} ...
+
+Example::
+
+ ceph daemon osd.0 help
+
+
+daemonperf
+----------
+
+Watch performance counters from a Ceph daemon.
+
+Usage::
+
+ ceph daemonperf {daemon_name|socket_path} [{interval} [{count}]]
+
+
+df
+--
+
+Show cluster's free space status.
+
+Usage::
+
+ ceph df {detail}
+
+.. _ceph features:
+
+features
+--------
+
+Show the releases and features of all connected daemons and clients connected
+to the cluster, along with the numbers of them in each bucket grouped by the
+corresponding features/releases. Each release of Ceph supports a different set
+of features, expressed by the features bitmask. New cluster features require
+that clients support the feature, or else they are not allowed to connect to
+these new features. As new features or capabilities are enabled after an
+upgrade, older clients are prevented from connecting.
+
+Usage::
+
+ ceph features
+
+fs
+--
+
+Manage cephfs file systems. It uses some additional subcommands.
+
+Subcommand ``ls`` to list file systems
+
+Usage::
+
+ ceph fs ls
+
+Subcommand ``new`` to make a new file system using named pools <metadata> and <data>
+
+Usage::
+
+ ceph fs new <fs_name> <metadata> <data>
+
+Subcommand ``reset`` is used for disaster recovery only: reset to a single-MDS map
+
+Usage::
+
+ ceph fs reset <fs_name> {--yes-i-really-mean-it}
+
+Subcommand ``rm`` to disable the named file system
+
+Usage::
+
+ ceph fs rm <fs_name> {--yes-i-really-mean-it}
+
+Subcommand ``authorize`` creates a new client that will be authorized for the
+given path in ``<fs_name>``. Pass ``/`` to authorize for the entire FS.
+``<perms>`` below can be ``r``, ``rw`` or ``rwp``.
+
+Usage::
+
+ ceph fs authorize <fs_name> client.<client_id> <path> <perms> [<path> <perms>...]
+
+fsid
+----
+
+Show cluster's FSID/UUID.
+
+Usage::
+
+ ceph fsid
+
+
+health
+------
+
+Show cluster's health.
+
+Usage::
+
+ ceph health {detail}
+
+
+heap
+----
+
+Show heap usage info (available only if compiled with tcmalloc)
+
+Usage::
+
+ ceph tell <name (type.id)> heap dump|start_profiler|stop_profiler|stats
+
+Subcommand ``release`` to make TCMalloc to releases no-longer-used memory back to the kernel at once.
+
+Usage::
+
+ ceph tell <name (type.id)> heap release
+
+Subcommand ``(get|set)_release_rate`` get or set the TCMalloc memory release rate. TCMalloc releases
+no-longer-used memory back to the kernel gradually. the rate controls how quickly this happens.
+Increase this setting to make TCMalloc to return unused memory more frequently. 0 means never return
+memory to system, 1 means wait for 1000 pages after releasing a page to system. It is ``1.0`` by default..
+
+Usage::
+
+ ceph tell <name (type.id)> heap get_release_rate|set_release_rate {<val>}
+
+injectargs
+----------
+
+Inject configuration arguments into monitor.
+
+Usage::
+
+ ceph injectargs <injected_args> [<injected_args>...]
+
+
+log
+---
+
+Log supplied text to the monitor log.
+
+Usage::
+
+ ceph log <logtext> [<logtext>...]
+
+
+mds
+---
+
+Manage metadata server configuration and administration. It uses some
+additional subcommands.
+
+Subcommand ``compat`` manages compatible features. It uses some additional
+subcommands.
+
+Subcommand ``rm_compat`` removes compatible feature.
+
+Usage::
+
+ ceph mds compat rm_compat <int[0-]>
+
+Subcommand ``rm_incompat`` removes incompatible feature.
+
+Usage::
+
+ ceph mds compat rm_incompat <int[0-]>
+
+Subcommand ``show`` shows mds compatibility settings.
+
+Usage::
+
+ ceph mds compat show
+
+Subcommand ``fail`` forces mds to status fail.
+
+Usage::
+
+ ceph mds fail <role|gid>
+
+Subcommand ``rm`` removes inactive mds.
+
+Usage::
+
+ ceph mds rm <int[0-]> <name> (type.id)>
+
+Subcommand ``rmfailed`` removes failed mds.
+
+Usage::
+
+ ceph mds rmfailed <int[0-]>
+
+Subcommand ``set_state`` sets mds state of <gid> to <numeric-state>.
+
+Usage::
+
+ ceph mds set_state <int[0-]> <int[0-20]>
+
+Subcommand ``stat`` shows MDS status.
+
+Usage::
+
+ ceph mds stat
+
+Subcommand ``repaired`` mark a damaged MDS rank as no longer damaged.
+
+Usage::
+
+ ceph mds repaired <role>
+
+mon
+---
+
+Manage monitor configuration and administration. It uses some additional
+subcommands.
+
+Subcommand ``add`` adds new monitor named <name> at <addr>.
+
+Usage::
+
+ ceph mon add <name> <IPaddr[:port]>
+
+Subcommand ``dump`` dumps formatted monmap (optionally from epoch)
+
+Usage::
+
+ ceph mon dump {<int[0-]>}
+
+Subcommand ``getmap`` gets monmap.
+
+Usage::
+
+ ceph mon getmap {<int[0-]>}
+
+Subcommand ``remove`` removes monitor named <name>.
+
+Usage::
+
+ ceph mon remove <name>
+
+Subcommand ``stat`` summarizes monitor status.
+
+Usage::
+
+ ceph mon stat
+
+mgr
+---
+
+Ceph manager daemon configuration and management.
+
+Subcommand ``dump`` dumps the latest MgrMap, which describes the active
+and standby manager daemons.
+
+Usage::
+
+ ceph mgr dump
+
+Subcommand ``fail`` will mark a manager daemon as failed, removing it
+from the manager map. If it is the active manager daemon a standby
+will take its place.
+
+Usage::
+
+ ceph mgr fail <name>
+
+Subcommand ``module ls`` will list currently enabled manager modules (plugins).
+
+Usage::
+
+ ceph mgr module ls
+
+Subcommand ``module enable`` will enable a manager module. Available modules are included in MgrMap and visible via ``mgr dump``.
+
+Usage::
+
+ ceph mgr module enable <module>
+
+Subcommand ``module disable`` will disable an active manager module.
+
+Usage::
+
+ ceph mgr module disable <module>
+
+Subcommand ``metadata`` will report metadata about all manager daemons or, if the name is specified, a single manager daemon.
+
+Usage::
+
+ ceph mgr metadata [name]
+
+Subcommand ``versions`` will report a count of running daemon versions.
+
+Usage::
+
+ ceph mgr versions
+
+Subcommand ``count-metadata`` will report a count of any daemon metadata field.
+
+Usage::
+
+ ceph mgr count-metadata <field>
+
+.. _ceph-admin-osd:
+
+osd
+---
+
+Manage OSD configuration and administration. It uses some additional
+subcommands.
+
+Subcommand ``blocklist`` manage blocklisted clients. It uses some additional
+subcommands.
+
+Subcommand ``add`` add <addr> to blocklist (optionally until <expire> seconds
+from now)
+
+Usage::
+
+ ceph osd blocklist add <EntityAddr> {<float[0.0-]>}
+
+Subcommand ``ls`` show blocklisted clients
+
+Usage::
+
+ ceph osd blocklist ls
+
+Subcommand ``rm`` remove <addr> from blocklist
+
+Usage::
+
+ ceph osd blocklist rm <EntityAddr>
+
+Subcommand ``blocked-by`` prints a histogram of which OSDs are blocking their peers
+
+Usage::
+
+ ceph osd blocked-by
+
+Subcommand ``create`` creates new osd (with optional UUID and ID).
+
+This command is DEPRECATED as of the Luminous release, and will be removed in
+a future release.
+
+Subcommand ``new`` should instead be used.
+
+Usage::
+
+ ceph osd create {<uuid>} {<id>}
+
+Subcommand ``new`` can be used to create a new OSD or to recreate a previously
+destroyed OSD with a specific *id*. The new OSD will have the specified *uuid*,
+and the command expects a JSON file containing the base64 cephx key for auth
+entity *client.osd.<id>*, as well as optional base64 cepx key for dm-crypt
+lockbox access and a dm-crypt key. Specifying a dm-crypt requires specifying
+the accompanying lockbox cephx key.
+
+Usage::
+
+ ceph osd new {<uuid>} {<id>} -i {<params.json>}
+
+The parameters JSON file is optional but if provided, is expected to maintain
+a form of the following format::
+
+ {
+ "cephx_secret": "AQBWtwhZdBO5ExAAIDyjK2Bh16ZXylmzgYYEjg==",
+ "crush_device_class": "myclass"
+ }
+
+Or::
+
+ {
+ "cephx_secret": "AQBWtwhZdBO5ExAAIDyjK2Bh16ZXylmzgYYEjg==",
+ "cephx_lockbox_secret": "AQDNCglZuaeVCRAAYr76PzR1Anh7A0jswkODIQ==",
+ "dmcrypt_key": "<dm-crypt key>",
+ "crush_device_class": "myclass"
+ }
+
+Or::
+
+ {
+ "crush_device_class": "myclass"
+ }
+
+The "crush_device_class" property is optional. If specified, it will set the
+initial CRUSH device class for the new OSD.
+
+
+Subcommand ``crush`` is used for CRUSH management. It uses some additional
+subcommands.
+
+Subcommand ``add`` adds or updates crushmap position and weight for <name> with
+<weight> and location <args>.
+
+Usage::
+
+ ceph osd crush add <osdname (id|osd.id)> <float[0.0-]> <args> [<args>...]
+
+Subcommand ``add-bucket`` adds no-parent (probably root) crush bucket <name> of
+type <type>.
+
+Usage::
+
+ ceph osd crush add-bucket <name> <type>
+
+Subcommand ``create-or-move`` creates entry or moves existing entry for <name>
+<weight> at/to location <args>.
+
+Usage::
+
+ ceph osd crush create-or-move <osdname (id|osd.id)> <float[0.0-]> <args>
+ [<args>...]
+
+Subcommand ``dump`` dumps crush map.
+
+Usage::
+
+ ceph osd crush dump
+
+Subcommand ``get-tunable`` get crush tunable straw_calc_version
+
+Usage::
+
+ ceph osd crush get-tunable straw_calc_version
+
+Subcommand ``link`` links existing entry for <name> under location <args>.
+
+Usage::
+
+ ceph osd crush link <name> <args> [<args>...]
+
+Subcommand ``move`` moves existing entry for <name> to location <args>.
+
+Usage::
+
+ ceph osd crush move <name> <args> [<args>...]
+
+Subcommand ``remove`` removes <name> from crush map (everywhere, or just at
+<ancestor>).
+
+Usage::
+
+ ceph osd crush remove <name> {<ancestor>}
+
+Subcommand ``rename-bucket`` renames bucket <srcname> to <dstname>
+
+Usage::
+
+ ceph osd crush rename-bucket <srcname> <dstname>
+
+Subcommand ``reweight`` change <name>'s weight to <weight> in crush map.
+
+Usage::
+
+ ceph osd crush reweight <name> <float[0.0-]>
+
+Subcommand ``reweight-all`` recalculate the weights for the tree to
+ensure they sum correctly
+
+Usage::
+
+ ceph osd crush reweight-all
+
+Subcommand ``reweight-subtree`` changes all leaf items beneath <name>
+to <weight> in crush map
+
+Usage::
+
+ ceph osd crush reweight-subtree <name> <weight>
+
+Subcommand ``rm`` removes <name> from crush map (everywhere, or just at
+<ancestor>).
+
+Usage::
+
+ ceph osd crush rm <name> {<ancestor>}
+
+Subcommand ``rule`` is used for creating crush rules. It uses some additional
+subcommands.
+
+Subcommand ``create-erasure`` creates crush rule <name> for erasure coded pool
+created with <profile> (default default).
+
+Usage::
+
+ ceph osd crush rule create-erasure <name> {<profile>}
+
+Subcommand ``create-simple`` creates crush rule <name> to start from <root>,
+replicate across buckets of type <type>, using a choose mode of <firstn|indep>
+(default firstn; indep best for erasure pools).
+
+Usage::
+
+ ceph osd crush rule create-simple <name> <root> <type> {firstn|indep}
+
+Subcommand ``dump`` dumps crush rule <name> (default all).
+
+Usage::
+
+ ceph osd crush rule dump {<name>}
+
+Subcommand ``ls`` lists crush rules.
+
+Usage::
+
+ ceph osd crush rule ls
+
+Subcommand ``rm`` removes crush rule <name>.
+
+Usage::
+
+ ceph osd crush rule rm <name>
+
+Subcommand ``set`` used alone, sets crush map from input file.
+
+Usage::
+
+ ceph osd crush set
+
+Subcommand ``set`` with osdname/osd.id update crushmap position and weight
+for <name> to <weight> with location <args>.
+
+Usage::
+
+ ceph osd crush set <osdname (id|osd.id)> <float[0.0-]> <args> [<args>...]
+
+Subcommand ``set-tunable`` set crush tunable <tunable> to <value>. The only
+tunable that can be set is straw_calc_version.
+
+Usage::
+
+ ceph osd crush set-tunable straw_calc_version <value>
+
+Subcommand ``show-tunables`` shows current crush tunables.
+
+Usage::
+
+ ceph osd crush show-tunables
+
+Subcommand ``tree`` shows the crush buckets and items in a tree view.
+
+Usage::
+
+ ceph osd crush tree
+
+Subcommand ``tunables`` sets crush tunables values to <profile>.
+
+Usage::
+
+ ceph osd crush tunables legacy|argonaut|bobtail|firefly|hammer|optimal|default
+
+Subcommand ``unlink`` unlinks <name> from crush map (everywhere, or just at
+<ancestor>).
+
+Usage::
+
+ ceph osd crush unlink <name> {<ancestor>}
+
+Subcommand ``df`` shows OSD utilization
+
+Usage::
+
+ ceph osd df {plain|tree}
+
+Subcommand ``deep-scrub`` initiates deep scrub on specified osd.
+
+Usage::
+
+ ceph osd deep-scrub <who>
+
+Subcommand ``down`` sets osd(s) <id> [<id>...] down.
+
+Usage::
+
+ ceph osd down <ids> [<ids>...]
+
+Subcommand ``dump`` prints summary of OSD map.
+
+Usage::
+
+ ceph osd dump {<int[0-]>}
+
+Subcommand ``erasure-code-profile`` is used for managing the erasure code
+profiles. It uses some additional subcommands.
+
+Subcommand ``get`` gets erasure code profile <name>.
+
+Usage::
+
+ ceph osd erasure-code-profile get <name>
+
+Subcommand ``ls`` lists all erasure code profiles.
+
+Usage::
+
+ ceph osd erasure-code-profile ls
+
+Subcommand ``rm`` removes erasure code profile <name>.
+
+Usage::
+
+ ceph osd erasure-code-profile rm <name>
+
+Subcommand ``set`` creates erasure code profile <name> with [<key[=value]> ...]
+pairs. Add a --force at the end to override an existing profile (IT IS RISKY).
+
+Usage::
+
+ ceph osd erasure-code-profile set <name> {<profile> [<profile>...]}
+
+Subcommand ``find`` find osd <id> in the CRUSH map and shows its location.
+
+Usage::
+
+ ceph osd find <int[0-]>
+
+Subcommand ``getcrushmap`` gets CRUSH map.
+
+Usage::
+
+ ceph osd getcrushmap {<int[0-]>}
+
+Subcommand ``getmap`` gets OSD map.
+
+Usage::
+
+ ceph osd getmap {<int[0-]>}
+
+Subcommand ``getmaxosd`` shows largest OSD id.
+
+Usage::
+
+ ceph osd getmaxosd
+
+Subcommand ``in`` sets osd(s) <id> [<id>...] in.
+
+Usage::
+
+ ceph osd in <ids> [<ids>...]
+
+Subcommand ``lost`` marks osd as permanently lost. THIS DESTROYS DATA IF NO
+MORE REPLICAS EXIST, BE CAREFUL.
+
+Usage::
+
+ ceph osd lost <int[0-]> {--yes-i-really-mean-it}
+
+Subcommand ``ls`` shows all OSD ids.
+
+Usage::
+
+ ceph osd ls {<int[0-]>}
+
+Subcommand ``lspools`` lists pools.
+
+Usage::
+
+ ceph osd lspools {<int>}
+
+Subcommand ``map`` finds pg for <object> in <pool>.
+
+Usage::
+
+ ceph osd map <poolname> <objectname>
+
+Subcommand ``metadata`` fetches metadata for osd <id>.
+
+Usage::
+
+ ceph osd metadata {int[0-]} (default all)
+
+Subcommand ``out`` sets osd(s) <id> [<id>...] out.
+
+Usage::
+
+ ceph osd out <ids> [<ids>...]
+
+Subcommand ``ok-to-stop`` checks whether the list of OSD(s) can be
+stopped without immediately making data unavailable. That is, all
+data should remain readable and writeable, although data redundancy
+may be reduced as some PGs may end up in a degraded (but active)
+state. It will return a success code if it is okay to stop the
+OSD(s), or an error code and informative message if it is not or if no
+conclusion can be drawn at the current time. When ``--max <num>`` is
+provided, up to <num> OSDs IDs will return (including the provided
+OSDs) that can all be stopped simultaneously. This allows larger sets
+of stoppable OSDs to be generated easily by providing a single
+starting OSD and a max. Additional OSDs are drawn from adjacent locations
+in the CRUSH hierarchy.
+
+Usage::
+
+ ceph osd ok-to-stop <id> [<ids>...] [--max <num>]
+
+Subcommand ``pause`` pauses osd.
+
+Usage::
+
+ ceph osd pause
+
+Subcommand ``perf`` prints dump of OSD perf summary stats.
+
+Usage::
+
+ ceph osd perf
+
+Subcommand ``pg-temp`` set pg_temp mapping pgid:[<id> [<id>...]] (developers
+only).
+
+Usage::
+
+ ceph osd pg-temp <pgid> {<id> [<id>...]}
+
+Subcommand ``force-create-pg`` forces creation of pg <pgid>.
+
+Usage::
+
+ ceph osd force-create-pg <pgid>
+
+
+Subcommand ``pool`` is used for managing data pools. It uses some additional
+subcommands.
+
+Subcommand ``create`` creates pool.
+
+Usage::
+
+ ceph osd pool create <poolname> {<int[0-]>} {<int[0-]>} {replicated|erasure}
+ {<erasure_code_profile>} {<rule>} {<int>} {--autoscale-mode=<on,off,warn>}
+
+Subcommand ``delete`` deletes pool.
+
+Usage::
+
+ ceph osd pool delete <poolname> {<poolname>} {--yes-i-really-really-mean-it}
+
+Subcommand ``get`` gets pool parameter <var>.
+
+Usage::
+
+ ceph osd pool get <poolname> size|min_size|pg_num|pgp_num|crush_rule|write_fadvise_dontneed
+
+Only for tiered pools::
+
+ ceph osd pool get <poolname> hit_set_type|hit_set_period|hit_set_count|hit_set_fpp|
+ target_max_objects|target_max_bytes|cache_target_dirty_ratio|cache_target_dirty_high_ratio|
+ cache_target_full_ratio|cache_min_flush_age|cache_min_evict_age|
+ min_read_recency_for_promote|hit_set_grade_decay_rate|hit_set_search_last_n
+
+Only for erasure coded pools::
+
+ ceph osd pool get <poolname> erasure_code_profile
+
+Use ``all`` to get all pool parameters that apply to the pool's type::
+
+ ceph osd pool get <poolname> all
+
+Subcommand ``get-quota`` obtains object or byte limits for pool.
+
+Usage::
+
+ ceph osd pool get-quota <poolname>
+
+Subcommand ``ls`` list pools
+
+Usage::
+
+ ceph osd pool ls {detail}
+
+Subcommand ``mksnap`` makes snapshot <snap> in <pool>.
+
+Usage::
+
+ ceph osd pool mksnap <poolname> <snap>
+
+Subcommand ``rename`` renames <srcpool> to <destpool>.
+
+Usage::
+
+ ceph osd pool rename <poolname> <poolname>
+
+Subcommand ``rmsnap`` removes snapshot <snap> from <pool>.
+
+Usage::
+
+ ceph osd pool rmsnap <poolname> <snap>
+
+Subcommand ``set`` sets pool parameter <var> to <val>.
+
+Usage::
+
+ ceph osd pool set <poolname> size|min_size|pg_num|
+ pgp_num|crush_rule|hashpspool|nodelete|nopgchange|nosizechange|
+ hit_set_type|hit_set_period|hit_set_count|hit_set_fpp|debug_fake_ec_pool|
+ target_max_bytes|target_max_objects|cache_target_dirty_ratio|
+ cache_target_dirty_high_ratio|
+ cache_target_full_ratio|cache_min_flush_age|cache_min_evict_age|
+ min_read_recency_for_promote|write_fadvise_dontneed|hit_set_grade_decay_rate|
+ hit_set_search_last_n
+ <val> {--yes-i-really-mean-it}
+
+Subcommand ``set-quota`` sets object or byte limit on pool.
+
+Usage::
+
+ ceph osd pool set-quota <poolname> max_objects|max_bytes <val>
+
+Subcommand ``stats`` obtain stats from all pools, or from specified pool.
+
+Usage::
+
+ ceph osd pool stats {<name>}
+
+Subcommand ``application`` is used for adding an annotation to the given
+pool. By default, the possible applications are object, block, and file
+storage (corresponding app-names are "rgw", "rbd", and "cephfs"). However,
+there might be other applications as well. Based on the application, there
+may or may not be some processing conducted.
+
+Subcommand ``disable`` disables the given application on the given pool.
+
+Usage::
+
+ ceph osd pool application disable <pool-name> <app> {--yes-i-really-mean-it}
+
+Subcommand ``enable`` adds an annotation to the given pool for the mentioned
+application.
+
+Usage::
+
+ ceph osd pool application enable <pool-name> <app> {--yes-i-really-mean-it}
+
+Subcommand ``get`` displays the value for the given key that is associated
+with the given application of the given pool. Not passing the optional
+arguments would display all key-value pairs for all applications for all
+pools.
+
+Usage::
+
+ ceph osd pool application get {<pool-name>} {<app>} {<key>}
+
+Subcommand ``rm`` removes the key-value pair for the given key in the given
+application of the given pool.
+
+Usage::
+
+ ceph osd pool application rm <pool-name> <app> <key>
+
+Subcommand ``set`` associates or updates, if it already exists, a key-value
+pair with the given application for the given pool.
+
+Usage::
+
+ ceph osd pool application set <pool-name> <app> <key> <value>
+
+Subcommand ``primary-affinity`` adjust osd primary-affinity from 0.0 <=<weight>
+<= 1.0
+
+Usage::
+
+ ceph osd primary-affinity <osdname (id|osd.id)> <float[0.0-1.0]>
+
+Subcommand ``primary-temp`` sets primary_temp mapping pgid:<id>|-1 (developers
+only).
+
+Usage::
+
+ ceph osd primary-temp <pgid> <id>
+
+Subcommand ``repair`` initiates repair on a specified osd.
+
+Usage::
+
+ ceph osd repair <who>
+
+Subcommand ``reweight`` reweights osd to 0.0 < <weight> < 1.0.
+
+Usage::
+
+ osd reweight <int[0-]> <float[0.0-1.0]>
+
+Subcommand ``reweight-by-pg`` reweight OSDs by PG distribution
+[overload-percentage-for-consideration, default 120].
+
+Usage::
+
+ ceph osd reweight-by-pg {<int[100-]>} {<poolname> [<poolname...]}
+ {--no-increasing}
+
+Subcommand ``reweight-by-utilization`` reweights OSDs by utilization. It only reweights
+outlier OSDs whose utilization exceeds the average, eg. the default 120%
+limits reweight to those OSDs that are more than 20% over the average.
+[overload-threshold, default 120 [max_weight_change, default 0.05 [max_osds_to_adjust, default 4]]]
+
+Usage::
+
+ ceph osd reweight-by-utilization {<int[100-]> {<float[0.0-]> {<int[0-]>}}}
+ {--no-increasing}
+
+Subcommand ``rm`` removes osd(s) <id> [<id>...] from the OSD map.
+
+
+Usage::
+
+ ceph osd rm <ids> [<ids>...]
+
+Subcommand ``destroy`` marks OSD *id* as *destroyed*, removing its cephx
+entity's keys and all of its dm-crypt and daemon-private config key
+entries.
+
+This command will not remove the OSD from crush, nor will it remove the
+OSD from the OSD map. Instead, once the command successfully completes,
+the OSD will show marked as *destroyed*.
+
+In order to mark an OSD as destroyed, the OSD must first be marked as
+**lost**.
+
+Usage::
+
+ ceph osd destroy <id> {--yes-i-really-mean-it}
+
+
+Subcommand ``purge`` performs a combination of ``osd destroy``,
+``osd rm`` and ``osd crush remove``.
+
+Usage::
+
+ ceph osd purge <id> {--yes-i-really-mean-it}
+
+Subcommand ``safe-to-destroy`` checks whether it is safe to remove or
+destroy an OSD without reducing overall data redundancy or durability.
+It will return a success code if it is definitely safe, or an error
+code and informative message if it is not or if no conclusion can be
+drawn at the current time.
+
+Usage::
+
+ ceph osd safe-to-destroy <id> [<ids>...]
+
+Subcommand ``scrub`` initiates scrub on specified osd.
+
+Usage::
+
+ ceph osd scrub <who>
+
+Subcommand ``set`` sets cluster-wide <flag> by updating OSD map.
+The ``full`` flag is not honored anymore since the Mimic release, and
+``ceph osd set full`` is not supported in the Octopus release.
+
+Usage::
+
+ ceph osd set pause|noup|nodown|noout|noin|nobackfill|
+ norebalance|norecover|noscrub|nodeep-scrub|notieragent
+
+Subcommand ``setcrushmap`` sets crush map from input file.
+
+Usage::
+
+ ceph osd setcrushmap
+
+Subcommand ``setmaxosd`` sets new maximum osd value.
+
+Usage::
+
+ ceph osd setmaxosd <int[0-]>
+
+Subcommand ``set-require-min-compat-client`` enforces the cluster to be backward
+compatible with the specified client version. This subcommand prevents you from
+making any changes (e.g., crush tunables, or using new features) that
+would violate the current setting. Please note, This subcommand will fail if
+any connected daemon or client is not compatible with the features offered by
+the given <version>. To see the features and releases of all clients connected
+to cluster, please see `ceph features`_.
+
+Usage::
+
+ ceph osd set-require-min-compat-client <version>
+
+Subcommand ``stat`` prints summary of OSD map.
+
+Usage::
+
+ ceph osd stat
+
+Subcommand ``tier`` is used for managing tiers. It uses some additional
+subcommands.
+
+Subcommand ``add`` adds the tier <tierpool> (the second one) to base pool <pool>
+(the first one).
+
+Usage::
+
+ ceph osd tier add <poolname> <poolname> {--force-nonempty}
+
+Subcommand ``add-cache`` adds a cache <tierpool> (the second one) of size <size>
+to existing pool <pool> (the first one).
+
+Usage::
+
+ ceph osd tier add-cache <poolname> <poolname> <int[0-]>
+
+Subcommand ``cache-mode`` specifies the caching mode for cache tier <pool>.
+
+Usage::
+
+ ceph osd tier cache-mode <poolname> writeback|readproxy|readonly|none
+
+Subcommand ``remove`` removes the tier <tierpool> (the second one) from base pool
+<pool> (the first one).
+
+Usage::
+
+ ceph osd tier remove <poolname> <poolname>
+
+Subcommand ``remove-overlay`` removes the overlay pool for base pool <pool>.
+
+Usage::
+
+ ceph osd tier remove-overlay <poolname>
+
+Subcommand ``set-overlay`` set the overlay pool for base pool <pool> to be
+<overlaypool>.
+
+Usage::
+
+ ceph osd tier set-overlay <poolname> <poolname>
+
+Subcommand ``tree`` prints OSD tree.
+
+Usage::
+
+ ceph osd tree {<int[0-]>}
+
+Subcommand ``unpause`` unpauses osd.
+
+Usage::
+
+ ceph osd unpause
+
+Subcommand ``unset`` unsets cluster-wide <flag> by updating OSD map.
+
+Usage::
+
+ ceph osd unset pause|noup|nodown|noout|noin|nobackfill|
+ norebalance|norecover|noscrub|nodeep-scrub|notieragent
+
+
+pg
+--
+
+It is used for managing the placement groups in OSDs. It uses some
+additional subcommands.
+
+Subcommand ``debug`` shows debug info about pgs.
+
+Usage::
+
+ ceph pg debug unfound_objects_exist|degraded_pgs_exist
+
+Subcommand ``deep-scrub`` starts deep-scrub on <pgid>.
+
+Usage::
+
+ ceph pg deep-scrub <pgid>
+
+Subcommand ``dump`` shows human-readable versions of pg map (only 'all' valid
+with plain).
+
+Usage::
+
+ ceph pg dump {all|summary|sum|delta|pools|osds|pgs|pgs_brief} [{all|summary|sum|delta|pools|osds|pgs|pgs_brief...]}
+
+Subcommand ``dump_json`` shows human-readable version of pg map in json only.
+
+Usage::
+
+ ceph pg dump_json {all|summary|sum|delta|pools|osds|pgs|pgs_brief} [{all|summary|sum|delta|pools|osds|pgs|pgs_brief...]}
+
+Subcommand ``dump_pools_json`` shows pg pools info in json only.
+
+Usage::
+
+ ceph pg dump_pools_json
+
+Subcommand ``dump_stuck`` shows information about stuck pgs.
+
+Usage::
+
+ ceph pg dump_stuck {inactive|unclean|stale|undersized|degraded [inactive|unclean|stale|undersized|degraded...]}
+ {<int>}
+
+Subcommand ``getmap`` gets binary pg map to -o/stdout.
+
+Usage::
+
+ ceph pg getmap
+
+Subcommand ``ls`` lists pg with specific pool, osd, state
+
+Usage::
+
+ ceph pg ls {<int>} {<pg-state> [<pg-state>...]}
+
+Subcommand ``ls-by-osd`` lists pg on osd [osd]
+
+Usage::
+
+ ceph pg ls-by-osd <osdname (id|osd.id)> {<int>}
+ {<pg-state> [<pg-state>...]}
+
+Subcommand ``ls-by-pool`` lists pg with pool = [poolname]
+
+Usage::
+
+ ceph pg ls-by-pool <poolstr> {<int>} {<pg-state> [<pg-state>...]}
+
+Subcommand ``ls-by-primary`` lists pg with primary = [osd]
+
+Usage::
+
+ ceph pg ls-by-primary <osdname (id|osd.id)> {<int>}
+ {<pg-state> [<pg-state>...]}
+
+Subcommand ``map`` shows mapping of pg to osds.
+
+Usage::
+
+ ceph pg map <pgid>
+
+Subcommand ``repair`` starts repair on <pgid>.
+
+Usage::
+
+ ceph pg repair <pgid>
+
+Subcommand ``scrub`` starts scrub on <pgid>.
+
+Usage::
+
+ ceph pg scrub <pgid>
+
+Subcommand ``stat`` shows placement group status.
+
+Usage::
+
+ ceph pg stat
+
+
+quorum
+------
+
+Cause a specific MON to enter or exit quorum.
+
+Usage::
+
+ ceph tell mon.<id> quorum enter|exit
+
+quorum_status
+-------------
+
+Reports status of monitor quorum.
+
+Usage::
+
+ ceph quorum_status
+
+
+report
+------
+
+Reports full status of cluster, optional title tag strings.
+
+Usage::
+
+ ceph report {<tags> [<tags>...]}
+
+
+status
+------
+
+Shows cluster status.
+
+Usage::
+
+ ceph status
+
+
+tell
+----
+
+Sends a command to a specific daemon.
+
+Usage::
+
+ ceph tell <name (type.id)> <command> [options...]
+
+
+List all available commands.
+
+Usage::
+
+ ceph tell <name (type.id)> help
+
+version
+-------
+
+Show mon daemon version
+
+Usage::
+
+ ceph version
+
+Options
+=======
+
+.. option:: -i infile
+
+ will specify an input file to be passed along as a payload with the
+ command to the monitor cluster. This is only used for specific
+ monitor commands.
+
+.. option:: -o outfile
+
+ will write any payload returned by the monitor cluster with its
+ reply to outfile. Only specific monitor commands (e.g. osd getmap)
+ return a payload.
+
+.. option:: --setuser user
+
+ will apply the appropriate user ownership to the file specified by
+ the option '-o'.
+
+.. option:: --setgroup group
+
+ will apply the appropriate group ownership to the file specified by
+ the option '-o'.
+
+.. option:: -c ceph.conf, --conf=ceph.conf
+
+ Use ceph.conf configuration file instead of the default
+ ``/etc/ceph/ceph.conf`` to determine monitor addresses during startup.
+
+.. option:: --id CLIENT_ID, --user CLIENT_ID
+
+ Client id for authentication.
+
+.. option:: --name CLIENT_NAME, -n CLIENT_NAME
+
+ Client name for authentication.
+
+.. option:: --cluster CLUSTER
+
+ Name of the Ceph cluster.
+
+.. option:: --admin-daemon ADMIN_SOCKET, daemon DAEMON_NAME
+
+ Submit admin-socket commands via admin sockets in /var/run/ceph.
+
+.. option:: --admin-socket ADMIN_SOCKET_NOPE
+
+ You probably mean --admin-daemon
+
+.. option:: -s, --status
+
+ Show cluster status.
+
+.. option:: -w, --watch
+
+ Watch live cluster changes on the default 'cluster' channel
+
+.. option:: -W, --watch-channel
+
+ Watch live cluster changes on any channel (cluster, audit, cephadm, or * for all)
+
+.. option:: --watch-debug
+
+ Watch debug events.
+
+.. option:: --watch-info
+
+ Watch info events.
+
+.. option:: --watch-sec
+
+ Watch security events.
+
+.. option:: --watch-warn
+
+ Watch warning events.
+
+.. option:: --watch-error
+
+ Watch error events.
+
+.. option:: --version, -v
+
+ Display version.
+
+.. option:: --verbose
+
+ Make verbose.
+
+.. option:: --concise
+
+ Make less verbose.
+
+.. option:: -f {json,json-pretty,xml,xml-pretty,plain}, --format
+
+ Format of output.
+
+.. option:: --connect-timeout CLUSTER_TIMEOUT
+
+ Set a timeout for connecting to the cluster.
+
+.. option:: --no-increasing
+
+ ``--no-increasing`` is off by default. So increasing the osd weight is allowed
+ using the ``reweight-by-utilization`` or ``test-reweight-by-utilization`` commands.
+ If this option is used with these commands, it will help not to increase osd weight
+ even the osd is under utilized.
+
+.. option:: --block
+
+ block until completion (scrub and deep-scrub only)
+
+Availability
+============
+
+:program:`ceph` is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer to
+the Ceph documentation at http://ceph.com/docs for more information.
+
+
+See also
+========
+
+:doc:`ceph-mon <ceph-mon>`\(8),
+:doc:`ceph-osd <ceph-osd>`\(8),
+:doc:`ceph-mds <ceph-mds>`\(8)
diff --git a/doc/man/8/cephadm.rst b/doc/man/8/cephadm.rst
new file mode 100644
index 000000000..ebcc3a6a2
--- /dev/null
+++ b/doc/man/8/cephadm.rst
@@ -0,0 +1,526 @@
+:orphan:
+
+=========================================
+ cephadm -- manage the local cephadm host
+=========================================
+
+.. program:: cephadm
+
+Synopsis
+========
+
+| **cephadm**** [-h] [--image IMAGE] [--docker] [--data-dir DATA_DIR]
+| [--log-dir LOG_DIR] [--logrotate-dir LOGROTATE_DIR]
+| [--unit-dir UNIT_DIR] [--verbose] [--timeout TIMEOUT]
+| [--retry RETRY] [--no-container-init]
+| {version,pull,inspect-image,ls,list-networks,adopt,rm-daemon,rm-cluster,run,shell,enter,ceph-volume,unit,logs,bootstrap,deploy,check-host,prepare-host,add-repo,rm-repo,install}
+| ...
+
+
+| **cephadm** **pull**
+
+| **cephadm** **inspect-image**
+
+| **cephadm** **ls** [-h] [--no-detail] [--legacy-dir LEGACY_DIR]
+
+| **cephadm** **list-networks**
+
+| **cephadm** **adopt** [-h] --name NAME --style STYLE [--cluster CLUSTER]
+| [--legacy-dir LEGACY_DIR] [--config-json CONFIG_JSON]
+| [--skip-firewalld] [--skip-pull]
+
+| **cephadm** **rm-daemon** [-h] --name NAME --fsid FSID [--force]
+| [--force-delete-data]
+
+| **cephadm** **rm-cluster** [-h] --fsid FSID [--force]
+
+| **cephadm** **run** [-h] --name NAME --fsid FSID
+
+| **cephadm** **shell** [-h] [--fsid FSID] [--name NAME] [--config CONFIG]
+ [--keyring KEYRING] --mount [MOUNT [MOUNT ...]] [--env ENV]
+ [--] [command [command ...]]
+
+| **cephadm** **enter** [-h] [--fsid FSID] --name NAME [command [command ...]]
+
+| **cephadm** **ceph-volume** [-h] [--fsid FSID] [--config-json CONFIG_JSON]
+ [--config CONFIG] [--keyring KEYRING]
+ command [command ...]
+
+| **cephadm** **unit** [-h] [--fsid FSID] --name NAME command
+
+| **cephadm** **logs** [-h] [--fsid FSID] --name NAME [command [command ...]]
+
+| **cephadm** **bootstrap** [-h] [--config CONFIG] [--mon-id MON_ID]
+| [--mon-addrv MON_ADDRV] [--mon-ip MON_IP]
+| [--mgr-id MGR_ID] [--fsid FSID]
+| [--log-to-file] [--single-host-defaults]
+| [--output-dir OUTPUT_DIR]
+| [--output-keyring OUTPUT_KEYRING]
+| [--output-config OUTPUT_CONFIG]
+| [--output-pub-ssh-key OUTPUT_PUB_SSH_KEY]
+| [--skip-ssh]
+| [--initial-dashboard-user INITIAL_DASHBOARD_USER]
+| [--initial-dashboard-password INITIAL_DASHBOARD_PASSWORD]
+| [--ssl-dashboard-port SSL_DASHBOARD_PORT]
+| [--dashboard-key DASHBOARD_KEY]
+| [--dashboard-crt DASHBOARD_CRT]
+| [--ssh-config SSH_CONFIG]
+| [--ssh-private-key SSH_PRIVATE_KEY]
+| [--ssh-public-key SSH_PUBLIC_KEY]
+| [--ssh-user SSH_USER] [--skip-mon-network]
+| [--skip-dashboard] [--dashboard-password-noupdate]
+| [--no-minimize-config] [--skip-ping-check]
+| [--skip-pull] [--skip-firewalld] [--allow-overwrite]
+| [--allow-fqdn-hostname] [--skip-prepare-host]
+| [--orphan-initial-daemons] [--skip-monitoring-stack]
+| [--apply-spec APPLY_SPEC]
+| [--registry-url REGISTRY_URL]
+| [--registry-username REGISTRY_USERNAME]
+| [--registry-password REGISTRY_PASSWORD]
+| [--registry-json REGISTRY_JSON]
+
+
+
+| **cephadm** **deploy** [-h] --name NAME --fsid FSID [--config CONFIG]
+| [--config-json CONFIG_JSON] [--keyring KEYRING]
+| [--key KEY] [--osd-fsid OSD_FSID] [--skip-firewalld]
+| [--tcp-ports TCP_PORTS] [--reconfig] [--allow-ptrace]
+
+| **cephadm** **check-host** [-h] [--expect-hostname EXPECT_HOSTNAME]
+
+| **cephadm** **prepare-host**
+
+| **cephadm** **add-repo** [-h] [--release RELEASE] [--version VERSION]
+| [--dev DEV] [--dev-commit DEV_COMMIT]
+| [--gpg-url GPG_URL] [--repo-url REPO_URL]
+
+
+| **cephadm** **rm-repo**
+
+| **cephadm** **install** [-h] [packages [packages ...]]
+
+| **cephadm** **registry-login** [-h] [--registry-url REGISTRY_URL]
+| [--registry-username REGISTRY_USERNAME]
+| [--registry-password REGISTRY_PASSWORD]
+| [--registry-json REGISTRY_JSON] [--fsid FSID]
+
+
+
+Description
+===========
+
+:program:`cephadm` is a command line tool to manage the local host for the cephadm orchestrator.
+
+It provides commands to investigate and modify the state of the current host.
+
+:program:`cephadm` is not required on all hosts, but useful when investigating a particular
+daemon.
+
+Options
+=======
+
+.. option:: --image IMAGE
+
+ container image. Can also be set via the
+ "CEPHADM_IMAGE" env var (default: None)
+
+.. option:: --docker
+
+ use docker instead of podman (default: False)
+
+.. option:: --data-dir DATA_DIR
+
+ base directory for daemon data (default: /var/lib/ceph)
+
+.. option:: --log-dir LOG_DIR
+
+ base directory for daemon logs (default: /var/log/ceph)
+
+.. option:: --logrotate-dir LOGROTATE_DIR
+
+ location of logrotate configuration files (default: /etc/logrotate.d)
+
+.. option:: --unit-dir UNIT_DIR
+
+ base directory for systemd units (default: /etc/systemd/system)
+
+.. option:: --verbose, -v
+
+ Show debug-level log messages (default: False)
+
+.. option:: --timeout TIMEOUT
+
+ timeout in seconds (default: None)
+
+.. option:: --retry RETRY
+
+ max number of retries (default: 10)
+
+.. option:: --no-container-init
+
+ do not run podman/docker with `--init` (default: False)
+
+
+Commands
+========
+
+add-repo
+--------
+
+configure local package repository to also include the ceph repository.
+
+Arguments:
+
+* [--release RELEASE] use latest version of a named release (e.g., octopus)
+* [--version VERSION] use specific upstream version (x.y.z)
+* [--dev DEV] use specified bleeding edge build from git branch or tag
+* [--dev-commit DEV_COMMIT] use specified bleeding edge build from git commit
+* [--gpg-url GPG_URL] specify alternative GPG key location
+* [--repo-url REPO_URL] specify alternative repo location
+
+
+adopt
+-----
+
+Adopt a daemon deployed with a different deployment tool.
+
+Arguments:
+
+* [--name NAME, -n NAME] daemon name (type.id)
+* [--style STYLE] deployment style (legacy, ...)
+* [--cluster CLUSTER] cluster name
+* [--legacy-dir LEGACY_DIR] base directory for legacy daemon data
+* [--config-json CONFIG_JSON] Additional configuration information in JSON format
+* [--skip-firewalld] Do not configure firewalld
+* [--skip-pull] do not pull the latest image before adopting
+
+
+bootstrap
+---------
+
+Bootstrap a cluster on the local host. It deploys a MON and a MGR and then also automatically
+deploys the monitoring stack on this host (see --skip-monitoring-stack) and calls
+``ceph orch host add $(hostname)`` (see --skip-ssh).
+
+Arguments:
+
+* [--config CONFIG, -c CONFIG] ceph conf file to incorporate
+* [--mon-id MON_ID] mon id (default: local hostname)
+* [--mon-addrv MON_ADDRV] mon IPs (e.g., [v2:localipaddr:3300,v1:localipaddr:6789])
+* [--mon-ip MON_IP] mon IP
+* [--mgr-id MGR_ID] mgr id (default: randomly generated)
+* [--fsid FSID] cluster FSID
+* [--log-to-file] configure cluster to log to traditional log files
+* [--single-host-defaults] configure cluster to run on a single host
+* [--output-dir OUTPUT_DIR] directory to write config, keyring, and pub key files
+* [--output-keyring OUTPUT_KEYRING] location to write keyring file with new cluster admin and mon keys
+* [--output-config OUTPUT_CONFIG] location to write conf file to connect to new cluster
+* [--output-pub-ssh-key OUTPUT_PUB_SSH_KEY] location to write the cluster's public SSH key
+* [--skip-ssh skip setup of ssh key on local host
+* [--initial-dashboard-user INITIAL_DASHBOARD_USER] Initial user for the dashboard
+* [--initial-dashboard-password INITIAL_DASHBOARD_PASSWORD] Initial password for the initial dashboard user
+* [--ssl-dashboard-port SSL_DASHBOARD_PORT] Port number used to connect with dashboard using SSL
+* [--dashboard-key DASHBOARD_KEY] Dashboard key
+* [--dashboard-crt DASHBOARD_CRT] Dashboard certificate
+* [--ssh-config SSH_CONFIG] SSH config
+* [--ssh-private-key SSH_PRIVATE_KEY] SSH private key
+* [--ssh-public-key SSH_PUBLIC_KEY] SSH public key
+* [--ssh-user SSH_USER] set user for SSHing to cluster hosts, passwordless sudo will be needed for non-root users'
+* [--skip-mon-network] set mon public_network based on bootstrap mon ip
+* [--skip-dashboard] do not enable the Ceph Dashboard
+* [--dashboard-password-noupdate] stop forced dashboard password change
+* [--no-minimize-config] do not assimilate and minimize the config file
+* [--skip-ping-check] do not verify that mon IP is pingable
+* [--skip-pull] do not pull the latest image before bootstrapping
+* [--skip-firewalld] Do not configure firewalld
+* [--allow-overwrite] allow overwrite of existing --output-* config/keyring/ssh files
+* [--allow-fqdn-hostname] allow hostname that is fully-qualified (contains ".")
+* [--skip-prepare-host] Do not prepare host
+* [--orphan-initial-daemons] Do not create initial mon, mgr, and crash service specs
+* [--skip-monitoring-stack] Do not automatically provision monitoring stack] (prometheus, grafana, alertmanager, node-exporter)
+* [--apply-spec APPLY_SPEC] Apply cluster spec after bootstrap (copy ssh key, add hosts and apply services)
+* [--registry-url REGISTRY_URL] url of custom registry to login to. e.g. docker.io, quay.io
+* [--registry-username REGISTRY_USERNAME] username of account to login to on custom registry
+* [--registry-password REGISTRY_PASSWORD] password of account to login to on custom registry
+* [--registry-json REGISTRY_JSON] JSON file containing registry login info (see registry-login command documentation)
+
+
+ceph-volume
+-----------
+
+Run ceph-volume inside a container::
+
+ cephadm ceph-volume inventory
+
+Positional arguments:
+* [command] command
+
+Arguments:
+
+* [--fsid FSID] cluster FSID
+* [--config-json CONFIG_JSON] JSON file with config and (client.bootrap-osd) key
+* [--config CONFIG, -c CONFIG] ceph conf file
+* [--keyring KEYRING, -k KEYRING] ceph.keyring to pass through to the container
+
+
+check-host
+----------
+
+check host configuration to be suitable for a Ceph cluster.
+
+Arguments:
+
+* [--expect-hostname EXPECT_HOSTNAME] Check that hostname matches an expected value
+
+
+deploy
+------
+
+deploy a daemon on the local host. Used by the orchestrator CLI::
+
+ cephadm shell -- ceph orch apply <type> ...
+
+Arguments:
+
+* [--name NAME] daemon name (type.id)
+* [--fsid FSID] cluster FSID
+* [--config CONFIG, -c CONFIG] config file for new daemon
+* [--config-json CONFIG_JSON] Additional configuration information in JSON format
+* [--keyring KEYRING] keyring for new daemon
+* [--key KEY] key for new daemon
+* [--osd-fsid OSD_FSID] OSD uuid, if creating an OSD container
+* [--skip-firewalld] Do not configure firewalld
+* [--tcp-ports List of tcp ports to open in the host firewall
+* [--reconfig] Reconfigure a previously deployed daemon
+* [--allow-ptrace] Allow SYS_PTRACE on daemon container
+
+
+enter
+-----
+
+Run an interactive shell inside a running daemon container::
+
+ cephadm enter --name mgr.myhost.ysubfo
+
+Positional arguments:
+* [command] command
+
+Arguments:
+
+* [--fsid FSID] cluster FSID
+* [--name NAME, -n NAME] daemon name (type.id)
+
+install
+-------
+
+install ceph package(s)
+
+Positional arguments:
+
+* [packages] packages
+
+
+inspect-image
+-------------
+
+inspect local ceph container image.
+
+list-networks
+-------------
+
+list IP networks
+
+
+ls
+--
+
+list daemon instances known to cephadm on **this** host::
+
+ $ cephadm ls
+ [
+ {
+ "style": "cephadm:v1",
+ "name": "mgr.storage-14b-1.ysubfo",
+ "fsid": "5110cb22-8332-11ea-9148-0894ef7e8bdc",
+ "enabled": true,
+ "state": "running",
+ "container_id": "8562de72370a3836473ecfff8a22c9ccdd99815386b4692a2b30924fb5493c44",
+ "container_image_name": "docker.io/ceph/ceph:v15",
+ "container_image_id": "bc83a388465f0568dab4501fb7684398dca8b50ca12a342a57f21815721723c2",
+ "version": "15.2.1",
+ "started": "2020-04-21T01:16:41.831456",
+ "created": "2020-04-21T01:16:41.775024",
+ "deployed": "2020-04-21T01:16:41.415021",
+ "configured": "2020-04-21T01:16:41.775024"
+ },
+ ...
+
+Arguments:
+
+* [--no-detail] Do not include daemon status
+* [--legacy-dir LEGACY_DIR] Base directory for legacy daemon data
+
+logs
+----
+
+print journald logs for a daemon container::
+
+ cephadm logs --name mgr.myhost.ysubfo
+
+This is similar to::
+
+ journalctl -u mgr.myhost.ysubfo
+
+Can also specify additional journal arguments::
+
+ cephadm logs --name mgr.myhost.ysubfo -- -n 20 # last 20 lines
+ cephadm logs --name mgr.myhost.ysubfo -- -f # follow the log
+
+
+Positional arguments:
+
+* [command] command (optional)
+
+Arguments:
+
+* [--fsid FSID] cluster FSID
+* [--name NAME, -n NAME] daemon name (type.id)
+
+
+prepare-host
+------------
+
+prepare a host for cephadm use
+
+Arguments:
+
+* [--expect-hostname EXPECT_HOSTNAME] Set hostname
+
+
+pull
+----
+
+Pull the ceph image::
+
+ cephadm pull
+
+registry-login
+--------------
+
+Give cephadm login information for an authenticated registry (url, username and password).
+Cephadm will attempt to log the calling host into that registry::
+
+ cephadm registry-login --registry-url [REGISTRY_URL] --registry-username [USERNAME]
+ --registry-password [PASSWORD]
+
+Can also use a JSON file containing the login info formatted as::
+
+ {
+ "url":"REGISTRY_URL",
+ "username":"REGISTRY_USERNAME",
+ "password":"REGISTRY_PASSWORD"
+ }
+
+and turn it in with command::
+
+ cephadm registry-login --registry-json [JSON FILE]
+
+Arguments:
+
+* [--registry-url REGISTRY_URL] url of registry to login to. e.g. docker.io, quay.io
+* [--registry-username REGISTRY_USERNAME] username of account to login to on registry
+* [--registry-password REGISTRY_PASSWORD] password of account to login to on registry
+* [--registry-json REGISTRY_JSON] JSON file containing login info for custom registry
+* [--fsid FSID] cluster FSID
+
+rm-daemon
+---------
+
+Remove a specific daemon instance
+
+Arguments:
+
+* [--name NAME, -n NAME] daemon name (type.id)
+* [--fsid FSID] cluster FSID
+* [--force] proceed, even though this may destroy valuable data
+* [--force-delete-data] delete valuable daemon data instead of making a backup
+
+
+rm-cluster
+----------
+
+remove all daemons for a cluster
+
+Arguments:
+
+* [--fsid FSID] cluster FSID
+* [--force] proceed, even though this may destroy valuable data
+
+rm-repo
+-------
+
+remove package repository configuration
+
+run
+---
+
+run a ceph daemon, in a container, in the foreground
+
+Arguments:
+
+* [--name NAME, -n NAME] daemon name (type.id)
+* [--fsid FSID] cluster FSID
+
+
+shell
+-----
+
+Run an interactive shell::
+
+ cephadm shell
+
+Or one specific command inside a container::
+
+ cephadm shell -- ceph orch ls
+
+
+Positional arguments:
+
+* [command] command (optional)
+
+Arguments:
+
+* [--fsid FSID] cluster FSID
+* [--name NAME, -n NAME] daemon name (type.id)
+* [--config CONFIG, -c CONFIG] ceph.conf to pass through to the container
+* [--keyring KEYRING, -k KEYRING] ceph.keyring to pass through to the container
+* [--mount MOUNT, -m MOUNT] mount a file or directory under /mnt in the container
+* [--env ENV, -e ENV] set environment variable
+
+
+unit
+----
+
+Operate on the daemon's systemd unit.
+
+Positional arguments:
+
+* [command] systemd command (start, stop, restart, enable, disable, ...)
+
+Arguments:
+
+* [--fsid FSID] cluster FSID
+* [--name NAME, -n NAME] daemon name (type.id)
+
+
+Availability
+============
+
+:program:`cephadm` is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer to
+the documentation at http://docs.ceph.com/ for more information.
+
+
+See also
+========
+
+:doc:`ceph-volume <ceph-volume>`\(8),
diff --git a/doc/man/8/cephfs-mirror.rst b/doc/man/8/cephfs-mirror.rst
new file mode 100644
index 000000000..3448dd047
--- /dev/null
+++ b/doc/man/8/cephfs-mirror.rst
@@ -0,0 +1,66 @@
+:orphan:
+
+============================================================
+ cephfs-mirror -- Ceph daemon for mirroring CephFS snapshots
+============================================================
+
+.. program:: cephfs-mirror
+
+Synopsis
+========
+
+| **cephfs-mirror**
+
+
+Description
+===========
+
+:program:`cephfs-mirror` is a daemon for asynchronous mirroring of Ceph
+Filesystem snapshots among Ceph clusters.
+
+It connects to remote clusters via libcephfs, relying on default search
+paths to find ceph.conf files, i.e. ``/etc/ceph/$cluster.conf`` where
+``$cluster`` is the human-friendly name of the cluster.
+
+
+Options
+=======
+
+.. option:: -c ceph.conf, --conf=ceph.conf
+
+ Use ``ceph.conf`` configuration file instead of the default
+ ``/etc/ceph/ceph.conf`` to determine monitor addresses during startup.
+
+.. option:: -i ID, --id ID
+
+ Set the ID portion of name for cephfs-mirror
+
+.. option:: -n TYPE.ID, --name TYPE.ID
+
+ Set the rados user name (eg. client.mirror)
+
+.. option:: --cluster NAME
+
+ Set the cluster name (default: ceph)
+
+.. option:: -d
+
+ Run in foreground, log to stderr
+
+.. option:: -f
+
+ Run in foreground, log to usual location
+
+
+Availability
+============
+
+:program:`cephfs-mirror` is part of Ceph, a massively scalable, open-source, distributed
+storage system. Please refer to the Ceph documentation at http://ceph.com/docs for
+more information.
+
+
+See also
+========
+
+:doc:`ceph <ceph>`\(8)
diff --git a/doc/man/8/cephfs-top.rst b/doc/man/8/cephfs-top.rst
new file mode 100644
index 000000000..c3719cd36
--- /dev/null
+++ b/doc/man/8/cephfs-top.rst
@@ -0,0 +1,121 @@
+:orphan:
+
+==========================================
+ cephfs-top -- Ceph Filesystem Top Utility
+==========================================
+
+.. program:: cephfs-top
+
+Synopsis
+========
+
+| **cephfs-top** [flags]
+
+
+Description
+===========
+
+**cephfs-top** provides top(1) like functionality for Ceph Filesystem.
+Various client metrics are displayed and updated in realtime.
+
+Ceph Metadata Servers periodically send client metrics to Ceph Manager.
+``Stats`` plugin in Ceph Manager provides an interface to fetch these metrics.
+
+Options
+=======
+
+.. option:: --cluster
+
+ Cluster: Ceph cluster to connect. Defaults to ``ceph``.
+
+.. option:: --id
+
+ Id: Client used to connect to Ceph cluster. Defaults to ``fstop``.
+
+.. option:: --selftest
+
+ Perform a selftest. This mode performs a sanity check of ``stats`` module.
+
+Descriptions of fields
+======================
+
+.. describe:: chit
+
+ cap hit rate
+
+.. describe:: dlease
+
+ dentry lease rate
+
+.. describe:: ofiles
+
+ number of opened files
+
+.. describe:: oicaps
+
+ number of pinned caps
+
+.. describe:: oinodes
+
+ number of opened inodes
+
+.. describe:: rtio
+
+ total size of read IOs
+
+.. describe:: wtio
+
+ total size of write IOs
+
+.. describe:: raio
+
+ average size of read IOs
+
+.. describe:: waio
+
+ average size of write IOs
+
+.. describe:: rsp
+
+ speed of read IOs compared with the last refresh
+
+.. describe:: wsp
+
+ speed of write IOs compared with the last refresh
+
+.. describe:: rlatavg
+
+ average read latency
+
+.. describe:: rlatsd
+
+ standard deviation (variance) for read latency
+
+.. describe:: wlatavg
+
+ average write latency
+
+.. describe:: wlatsd
+
+ standard deviation (variance) for write latency
+
+.. describe:: mlatavg
+
+ average metadata latency
+
+.. describe:: mlatsd
+
+ standard deviation (variance) for metadata latency
+
+Availability
+============
+
+**cephfs-top** is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer to the Ceph documentation at
+http://ceph.com/ for more information.
+
+
+See also
+========
+
+:doc:`ceph <ceph>`\(8),
+:doc:`ceph-mds <ceph-mds>`\(8)
diff --git a/doc/man/8/crushtool.rst b/doc/man/8/crushtool.rst
new file mode 100644
index 000000000..31bb8272e
--- /dev/null
+++ b/doc/man/8/crushtool.rst
@@ -0,0 +1,302 @@
+:orphan:
+
+==========================================
+ crushtool -- CRUSH map manipulation tool
+==========================================
+
+.. program:: crushtool
+
+Synopsis
+========
+
+| **crushtool** ( -d *map* | -c *map.txt* | --build --num_osds *numosds*
+ *layer1* *...* | --test ) [ -o *outfile* ]
+
+
+Description
+===========
+
+**crushtool** is a utility that lets you create, compile, decompile
+and test CRUSH map files.
+
+CRUSH is a pseudo-random data distribution algorithm that efficiently
+maps input values (which, in the context of Ceph, correspond to Placement
+Groups) across a heterogeneous, hierarchically structured device map.
+The algorithm was originally described in detail in the following paper
+(although it has evolved some since then)::
+
+ http://www.ssrc.ucsc.edu/Papers/weil-sc06.pdf
+
+The tool has four modes of operation.
+
+.. option:: --compile|-c map.txt
+
+ will compile a plaintext map.txt into a binary map file.
+
+.. option:: --decompile|-d map
+
+ will take the compiled map and decompile it into a plaintext source
+ file, suitable for editing.
+
+.. option:: --build --num_osds {num-osds} layer1 ...
+
+ will create map with the given layer structure. See below for a
+ detailed explanation.
+
+.. option:: --test
+
+ will perform a dry run of a CRUSH mapping for a range of input
+ values ``[--min-x,--max-x]`` (default ``[0,1023]``) which can be
+ thought of as simulated Placement Groups. See below for a more
+ detailed explanation.
+
+Unlike other Ceph tools, **crushtool** does not accept generic options
+such as **--debug-crush** from the command line. They can, however, be
+provided via the CEPH_ARGS environment variable. For instance, to
+silence all output from the CRUSH subsystem::
+
+ CEPH_ARGS="--debug-crush 0" crushtool ...
+
+
+Running tests with --test
+=========================
+
+The test mode will use the input crush map ( as specified with **-i
+map** ) and perform a dry run of CRUSH mapping or random placement
+(if **--simulate** is set ). On completion, two kinds of reports can be
+created.
+1) The **--show-...** option outputs human readable information
+on stderr.
+2) The **--output-csv** option creates CSV files that are
+documented by the **--help-output** option.
+
+Note: Each Placement Group (PG) has an integer ID which can be obtained
+from ``ceph pg dump`` (for example PG 2.2f means pool id 2, PG id 32).
+The pool and PG IDs are combined by a function to get a value which is
+given to CRUSH to map it to OSDs. crushtool does not know about PGs or
+pools; it only runs simulations by mapping values in the range
+``[--min-x,--max-x]``.
+
+
+.. option:: --show-statistics
+
+ Displays a summary of the distribution. For instance::
+
+ rule 1 (metadata) num_rep 5 result size == 5: 1024/1024
+
+ shows that rule **1** which is named **metadata** successfully
+ mapped **1024** values to **result size == 5** devices when trying
+ to map them to **num_rep 5** replicas. When it fails to provide the
+ required mapping, presumably because the number of **tries** must
+ be increased, a breakdown of the failures is displayed. For instance::
+
+ rule 1 (metadata) num_rep 10 result size == 8: 4/1024
+ rule 1 (metadata) num_rep 10 result size == 9: 93/1024
+ rule 1 (metadata) num_rep 10 result size == 10: 927/1024
+
+ shows that although **num_rep 10** replicas were required, **4**
+ out of **1024** values ( **4/1024** ) were mapped to **result size
+ == 8** devices only.
+
+.. option:: --show-mappings
+
+ Displays the mapping of each value in the range ``[--min-x,--max-x]``.
+ For instance::
+
+ CRUSH rule 1 x 24 [11,6]
+
+ shows that value **24** is mapped to devices **[11,6]** by rule
+ **1**.
+
+ One of the following is required when using the ``--show-mappings`` option:
+
+ (a) ``--num-rep``
+ (b) both ``--min-rep`` and ``--max-rep``
+
+ ``--num-rep`` stands for "number of replicas, indicates the number of
+ replicas in a pool, and is used to specify an exact number of replicas (for
+ example ``--num-rep 5``). ``--min-rep`` and ``--max-rep`` are used together
+ to specify a range of replicas (for example, ``--min-rep 1 --max-rep 10``).
+
+.. option:: --show-bad-mappings
+
+ Displays which value failed to be mapped to the required number of
+ devices. For instance::
+
+ bad mapping rule 1 x 781 num_rep 7 result [8,10,2,11,6,9]
+
+ shows that when rule **1** was required to map **7** devices, it
+ could map only six : **[8,10,2,11,6,9]**.
+
+.. option:: --show-utilization
+
+ Displays the expected and actual utilization for each device, for
+ each number of replicas. For instance::
+
+ device 0: stored : 951 expected : 853.333
+ device 1: stored : 963 expected : 853.333
+ ...
+
+ shows that device **0** stored **951** values and was expected to store **853**.
+ Implies **--show-statistics**.
+
+.. option:: --show-utilization-all
+
+ Displays the same as **--show-utilization** but does not suppress
+ output when the weight of a device is zero.
+ Implies **--show-statistics**.
+
+.. option:: --show-choose-tries
+
+ Displays how many attempts were needed to find a device mapping.
+ For instance::
+
+ 0: 95224
+ 1: 3745
+ 2: 2225
+ ..
+
+ shows that **95224** mappings succeeded without retries, **3745**
+ mappings succeeded with one attempts, etc. There are as many rows
+ as the value of the **--set-choose-total-tries** option.
+
+.. option:: --output-csv
+
+ Creates CSV files (in the current directory) containing information
+ documented by **--help-output**. The files are named after the rule
+ used when collecting the statistics. For instance, if the rule
+ : 'metadata' is used, the CSV files will be::
+
+ metadata-absolute_weights.csv
+ metadata-device_utilization.csv
+ ...
+
+ The first line of the file shortly explains the column layout. For
+ instance::
+
+ metadata-absolute_weights.csv
+ Device ID, Absolute Weight
+ 0,1
+ ...
+
+.. option:: --output-name NAME
+
+ Prepend **NAME** to the file names generated when **--output-csv**
+ is specified. For instance **--output-name FOO** will create
+ files::
+
+ FOO-metadata-absolute_weights.csv
+ FOO-metadata-device_utilization.csv
+ ...
+
+The **--set-...** options can be used to modify the tunables of the
+input crush map. The input crush map is modified in
+memory. For example::
+
+ $ crushtool -i mymap --test --show-bad-mappings
+ bad mapping rule 1 x 781 num_rep 7 result [8,10,2,11,6,9]
+
+could be fixed by increasing the **choose-total-tries** as follows:
+
+ $ crushtool -i mymap --test \
+ --show-bad-mappings \
+ --set-choose-total-tries 500
+
+Building a map with --build
+===========================
+
+The build mode will generate hierarchical maps. The first argument
+specifies the number of devices (leaves) in the CRUSH hierarchy. Each
+layer describes how the layer (or devices) preceding it should be
+grouped.
+
+Each layer consists of::
+
+ bucket ( uniform | list | tree | straw | straw2 ) size
+
+The **bucket** is the type of the buckets in the layer
+(e.g. "rack"). Each bucket name will be built by appending a unique
+number to the **bucket** string (e.g. "rack0", "rack1"...).
+
+The second component is the type of bucket: **straw** should be used
+most of the time.
+
+The third component is the maximum size of the bucket. A size of zero
+means a bucket of infinite capacity.
+
+
+Example
+=======
+
+Suppose we have two rows with two racks each and 20 nodes per rack. Suppose
+each node contains 4 storage devices for Ceph OSD Daemons. This configuration
+allows us to deploy 320 Ceph OSD Daemons. Lets assume a 42U rack with 2U nodes,
+leaving an extra 2U for a rack switch.
+
+To reflect our hierarchy of devices, nodes, racks and rows, we would execute
+the following::
+
+ $ crushtool -o crushmap --build --num_osds 320 \
+ node straw 4 \
+ rack straw 20 \
+ row straw 2 \
+ root straw 0
+ # id weight type name reweight
+ -87 320 root root
+ -85 160 row row0
+ -81 80 rack rack0
+ -1 4 node node0
+ 0 1 osd.0 1
+ 1 1 osd.1 1
+ 2 1 osd.2 1
+ 3 1 osd.3 1
+ -2 4 node node1
+ 4 1 osd.4 1
+ 5 1 osd.5 1
+ ...
+
+CRUSH rules are created so the generated crushmap can be
+tested. They are the same rules as the ones created by default when
+creating a new Ceph cluster. They can be further edited with::
+
+ # decompile
+ crushtool -d crushmap -o map.txt
+
+ # edit
+ emacs map.txt
+
+ # recompile
+ crushtool -c map.txt -o crushmap
+
+Reclassify
+==========
+
+The *reclassify* function allows users to transition from older maps that
+maintain parallel hierarchies for OSDs of different types to a modern CRUSH
+map that makes use of the *device class* feature. For more information,
+see https://docs.ceph.com/en/latest/rados/operations/crush-map-edits/#migrating-from-a-legacy-ssd-rule-to-device-classes.
+
+Example output from --test
+==========================
+
+See https://github.com/ceph/ceph/blob/master/src/test/cli/crushtool/set-choose.t
+for sample ``crushtool --test`` commands and output produced thereby.
+
+Availability
+============
+
+**crushtool** is part of Ceph, a massively scalable, open-source, distributed storage system. Please
+refer to the Ceph documentation at http://ceph.com/docs for more
+information.
+
+
+See also
+========
+
+:doc:`ceph <ceph>`\(8),
+:doc:`osdmaptool <osdmaptool>`\(8),
+
+Authors
+=======
+
+John Wilkins, Sage Weil, Loic Dachary
diff --git a/doc/man/8/librados-config.rst b/doc/man/8/librados-config.rst
new file mode 100644
index 000000000..940e8c2e4
--- /dev/null
+++ b/doc/man/8/librados-config.rst
@@ -0,0 +1,46 @@
+:orphan:
+
+=======================================================
+ librados-config -- display information about librados
+=======================================================
+
+.. program:: librados-config
+
+Synopsis
+========
+
+| **librados-config** [ --version ] [ --vernum ]
+
+
+Description
+===========
+
+**librados-config** is a utility that displays information about the
+ installed ``librados``.
+
+
+Options
+=======
+
+.. option:: --version
+
+ Display ``librados`` version
+
+.. option:: --vernum
+
+ Display the ``librados`` version code
+
+
+Availability
+============
+
+**librados-config** is part of Ceph, a massively scalable, open-source, distributed storage system.
+Please refer to the Ceph documentation at http://ceph.com/docs for
+more information.
+
+
+See also
+========
+
+:doc:`ceph <ceph>`\(8),
+:doc:`rados <rados>`\(8)
diff --git a/doc/man/8/monmaptool.rst b/doc/man/8/monmaptool.rst
new file mode 100644
index 000000000..f564f8f2e
--- /dev/null
+++ b/doc/man/8/monmaptool.rst
@@ -0,0 +1,140 @@
+:orphan:
+
+==========================================================
+ monmaptool -- ceph monitor cluster map manipulation tool
+==========================================================
+
+.. program:: monmaptool
+
+Synopsis
+========
+
+| **monmaptool** <action> [options] *mapfilename*
+
+
+Description
+===========
+
+**monmaptool** is a utility to create, view, and modify a monitor
+cluster map for the Ceph distributed storage system. The monitor map
+specifies the only fixed addresses in the Ceph distributed system.
+All other daemons bind to arbitrary addresses and register themselves
+with the monitors.
+
+When creating a map with --create, a new monitor map with a new,
+random UUID will be created. It should be followed by one or more
+monitor addresses.
+
+The default Ceph monitor port for messenger protocol v1 is 6789, and
+3300 for protocol v2.
+
+Multiple actions can be performed per invocation.
+
+
+Options
+=======
+
+.. option:: --print
+
+ print a plaintext dump of the map, after any modifications are
+ made.
+
+.. option:: --feature-list [plain|parseable]
+
+ list the enabled features as well as the available ones.
+
+ By default, a human readable output is produced.
+
+.. option:: --create
+
+ create a new monitor map with a new UUID (and with it, a new,
+ empty Ceph cluster).
+
+.. option:: --clobber
+
+ allow monmaptool to create a new mapfilename in place of an existing map.
+
+ Only useful when *--create* is used.
+
+.. option:: --generate
+
+ generate a new monmap based on the values on the command line or specified
+ in the ceph configuration. This is, in order of preference,
+
+ #. ``--monmap filename`` to specify a monmap to load
+ #. ``--mon-host 'host1,ip2'`` to specify a list of hosts or ip addresses
+ #. ``[mon.foo]`` sections containing ``mon addr`` settings in the config. Note that this method is not recommended and support will be removed in a future release.
+
+.. option:: --filter-initial-members
+
+ filter the initial monmap by applying the ``mon initial members``
+ setting. Monitors not present in that list will be removed, and
+ initial members not present in the map will be added with dummy
+ addresses.
+
+.. option:: --add name ip[:port]
+
+ add a monitor with the specified ip:port to the map.
+
+ If the *nautilus* feature is set, and the port is not, the monitor
+ will be added for both messenger protocols.
+
+.. option:: --addv name [protocol:ip:port[,...]]
+
+ add a monitor with the specified version:ip:port to the map.
+
+.. option:: --rm name
+
+ remove the monitor with the specified name from the map.
+
+.. option:: --fsid uuid
+
+ set the fsid to the given uuid. If not specified with *--create*, a random fsid will be generated.
+
+.. option:: --feature-set value [--optional|--persistent]
+
+ enable a feature.
+
+.. option:: --feature-unset value [--optional|--persistent]
+
+ disable a feature.
+
+.. option:: --enable-all-features
+
+ enable all supported features.
+
+.. option:: --set-min-mon-release release
+
+ set the min_mon_release.
+
+Example
+=======
+
+To create a new map with three monitors (for a fresh Ceph cluster)::
+
+ monmaptool --create --add nodeA 192.168.0.10 --add nodeB 192.168.0.11 \
+ --add nodeC 192.168.0.12 --enable-all-features --clobber monmap
+
+To display the contents of the map::
+
+ monmaptool --print monmap
+
+To replace one monitor::
+
+ monmaptool --rm nodeA monmap
+ monmaptool --add nodeA 192.168.0.9 monmap
+
+
+Availability
+============
+
+**monmaptool** is part of Ceph, a massively scalable, open-source, distributed
+storage system. Please refer to the Ceph documentation at http://ceph.com/docs
+for more information.
+
+
+See also
+========
+
+:doc:`ceph <ceph>`\(8),
+:doc:`crushtool <crushtool>`\(8),
diff --git a/doc/man/8/mount.ceph.rst b/doc/man/8/mount.ceph.rst
new file mode 100644
index 000000000..1c67a12f8
--- /dev/null
+++ b/doc/man/8/mount.ceph.rst
@@ -0,0 +1,256 @@
+:orphan:
+
+========================================
+ mount.ceph -- mount a Ceph file system
+========================================
+
+.. program:: mount.ceph
+
+Synopsis
+========
+
+| **mount.ceph** [*mon1_socket*\ ,\ *mon2_socket*\ ,...]:/[*subdir*] *dir* [
+ -o *options* ]
+
+
+Description
+===========
+
+**mount.ceph** is a helper for mounting the Ceph file system on a Linux host.
+It serves to resolve monitor hostname(s) into IP addresses and read
+authentication keys from disk; the Linux kernel client component does most of
+the real work. In fact, it is possible to mount a non-authenticated Ceph file
+system without mount.ceph by specifying monitor address(es) by IP::
+
+ mount -t ceph 1.2.3.4:/ /mnt/mycephfs
+
+The first argument is the device part of the mount command. It includes host's
+socket and path within CephFS that will be mounted at the mount point. The
+socket, obviously, takes the form ip_address[:port]. If the port is not
+specified, the Ceph default of 6789 is assumed. Multiple monitor addresses can
+be passed by separating them by commas. Only one monitor is needed to mount
+successfully; the client will learn about all monitors from any responsive
+monitor. However, it is a good idea to specify more than one in case the one
+happens to be down at the time of mount.
+
+If the host portion of the device is left blank, then **mount.ceph** will
+attempt to determine monitor addresses using local configuration files
+and/or DNS SRV records. In similar way, if authentication is enabled on Ceph
+cluster (which is done using CephX) and options ``secret`` and ``secretfile``
+are not specified in the command, the mount helper will spawn a child process
+that will use the standard Ceph library routines to find a keyring and fetch
+the secret from it.
+
+A sub-directory of the file system can be mounted by specifying the (absolute)
+path to the sub-directory right after ":" after the socket in the device part
+of the mount command.
+
+Mount helper application conventions dictate that the first two options are
+device to be mounted and the mountpoint for that device. Options must be
+passed only after these fixed arguments.
+
+
+Options
+=======
+
+Basic
+-----
+
+:command:`conf`
+ Path to a ceph.conf file. This is used to initialize the Ceph context
+ for autodiscovery of monitor addresses and auth secrets. The default is
+ to use the standard search path for ceph.conf files.
+
+:command: `fs=<fs-name>`
+ Specify the non-default file system to be mounted. Not passing this
+ option mounts the default file system.
+
+:command: `mds_namespace=<fs-name>`
+ A synonym of "fs=" and its use is deprecated.
+
+:command:`mount_timeout`
+ int (seconds), Default: 60
+
+:command:`ms_mode=<legacy|crc|secure|prefer-crc|prefer-secure>`
+ Set the connection mode that the client uses for transport. The available
+ modes are:
+
+ - ``legacy``: use messenger v1 protocol to talk to the cluster
+
+ - ``crc``: use messenger v2, without on-the-wire encryption
+
+ - ``secure``: use messenger v2, with on-the-wire encryption
+
+ - ``prefer-crc``: crc mode, if denied agree to secure mode
+
+ - ``prefer-secure``: secure mode, if denied agree to crc mode
+
+:command:`name`
+ RADOS user to authenticate as when using CephX. Default: guest
+
+:command:`secret`
+ secret key for use with CephX. This option is insecure because it exposes
+ the secret on the command line. To avoid this, use the secretfile option.
+
+:command:`secretfile`
+ path to file containing the secret key to use with CephX
+
+:command:`recover_session=<no|clean>`
+ Set auto reconnect mode in the case where the client is blocklisted. The
+ available modes are ``no`` and ``clean``. The default is ``no``.
+
+ - ``no``: never attempt to reconnect when client detects that it has been
+ blocklisted. Blocklisted clients will not attempt to reconnect and
+ their operations will fail too.
+
+ - ``clean``: client reconnects to the Ceph cluster automatically when it
+ detects that it has been blocklisted. During reconnect, client drops
+ dirty data/metadata, invalidates page caches and writable file handles.
+ After reconnect, file locks become stale because the MDS loses track of
+ them. If an inode contains any stale file locks, read/write on the inode
+ is not allowed until applications release all stale file locks.
+
+Advanced
+--------
+:command:`cap_release_safety`
+ int, Default: calculated
+
+:command:`caps_wanted_delay_max`
+ int, cap release delay, Default: 60
+
+:command:`caps_wanted_delay_min`
+ int, cap release delay, Default: 5
+
+:command:`dirstat`
+ funky `cat dirname` for stats, Default: off
+
+:command:`nodirstat`
+ no funky `cat dirname` for stats
+
+:command:`ip`
+ my ip
+
+:command:`noasyncreaddir`
+ no dcache readdir
+
+:command:`nocrc`
+ no data crc on writes
+
+:command:`noshare`
+ create a new client instance, instead of sharing an existing instance of
+ a client mounting the same cluster
+
+:command:`osdkeepalive`
+ int, Default: 5
+
+:command:`osd_idle_ttl`
+ int (seconds), Default: 60
+
+:command:`rasize`
+ int (bytes), max readahead. Default: 8388608 (8192*1024)
+
+:command:`rbytes`
+ Report the recursive size of the directory contents for st_size on
+ directories. Default: off
+
+:command:`norbytes`
+ Do not report the recursive size of the directory contents for
+ st_size on directories.
+
+:command:`readdir_max_bytes`
+ int, Default: 524288 (512*1024)
+
+:command:`readdir_max_entries`
+ int, Default: 1024
+
+:command:`rsize`
+ int (bytes), max read size. Default: 16777216 (16*1024*1024)
+
+:command:`snapdirname`
+ string, set the name of the hidden snapdir. Default: .snap
+
+:command:`write_congestion_kb`
+ int (kb), max writeback in flight. scale with available
+ memory. Default: calculated from available memory
+
+:command:`wsize`
+ int (bytes), max write size. Default: 16777216 (16*1024*1024) (writeback
+ uses smaller of wsize and stripe unit)
+
+:command:`wsync`
+ Execute all namespace operations synchronously. This ensures that the
+ namespace operation will only complete after receiving a reply from
+ the MDS. This is the default.
+
+:command:`nowsync`
+ Allow the client to do namespace operations asynchronously. When this
+ option is enabled, a namespace operation may complete before the MDS
+ replies, if it has sufficient capabilities to do so.
+
+Examples
+========
+
+Mount the full file system::
+
+ mount.ceph :/ /mnt/mycephfs
+
+Assuming mount.ceph is installed properly, it should be automatically invoked
+by mount(8)::
+
+ mount -t ceph :/ /mnt/mycephfs
+
+Mount only part of the namespace/file system::
+
+ mount.ceph :/some/directory/in/cephfs /mnt/mycephfs
+
+Mount non-default FS, in case cluster has multiple FSs::
+ mount -t ceph :/ /mnt/mycephfs2 -o fs=mycephfs2
+
+ or
+
+ mount -t ceph :/ /mnt/mycephfs2 -o mds_namespace=mycephfs2 # This option name is deprecated.
+
+Pass the monitor host's IP address, optionally::
+
+ mount.ceph 192.168.0.1:/ /mnt/mycephfs
+
+Pass the port along with IP address if it's running on a non-standard port::
+
+ mount.ceph 192.168.0.1:7000:/ /mnt/mycephfs
+
+If there are multiple monitors, passes addresses separated by a comma::
+
+ mount.ceph 192.168.0.1,192.168.0.2,192.168.0.3:/ /mnt/mycephfs
+
+If authentication is enabled on Ceph cluster::
+
+ mount.ceph :/ /mnt/mycephfs -o name=fs_username
+
+Pass secret key for CephX user optionally::
+
+ mount.ceph :/ /mnt/mycephfs -o name=fs_username,secret=AQATSKdNGBnwLhAAnNDKnH65FmVKpXZJVasUeQ==
+
+Pass file containing secret key to avoid leaving secret key in shell's command
+history::
+
+ mount.ceph :/ /mnt/mycephfs -o name=fs_username,secretfile=/etc/ceph/fs_username.secret
+
+
+Availability
+============
+
+**mount.ceph** is part of Ceph, a massively scalable, open-source, distributed
+storage system. Please refer to the Ceph documentation at http://ceph.com/docs
+for more information.
+
+Feature Availability
+====================
+
+The ``recover_session=`` option was added to mainline Linux kernels in v5.4.
+``wsync`` and ``nowsync`` were added in v5.7.
+
+See also
+========
+
+:doc:`ceph-fuse <ceph-fuse>`\(8),
+:doc:`ceph <ceph>`\(8)
diff --git a/doc/man/8/mount.fuse.ceph.rst b/doc/man/8/mount.fuse.ceph.rst
new file mode 100644
index 000000000..0b3b2d67d
--- /dev/null
+++ b/doc/man/8/mount.fuse.ceph.rst
@@ -0,0 +1,71 @@
+:orphan:
+
+====================================================
+ mount.fuse.ceph -- mount ceph-fuse from /etc/fstab.
+====================================================
+
+.. program:: mount.fuse.ceph
+
+Synopsis
+========
+
+| **mount.fuse.ceph** [-h] [-o OPTIONS [*OPTIONS* ...]]
+ device [*device* ...]
+ mountpoint [*mountpoint* ...]
+
+Description
+===========
+
+**mount.fuse.ceph** is a helper for mounting ceph-fuse from
+``/etc/fstab``.
+
+To use mount.fuse.ceph, add an entry in ``/etc/fstab`` like::
+
+ DEVICE PATH TYPE OPTIONS
+ none /mnt/ceph fuse.ceph ceph.id=admin,_netdev,defaults 0 0
+ none /mnt/ceph fuse.ceph ceph.name=client.admin,_netdev,defaults 0 0
+ none /mnt/ceph fuse.ceph ceph.id=myuser,ceph.conf=/etc/ceph/foo.conf,_netdev,defaults 0 0
+
+ceph-fuse options are specified in the ``OPTIONS`` column and must begin
+with '``ceph.``' prefix. This way ceph related fs options will be passed to
+ceph-fuse and others will be ignored by ceph-fuse.
+
+Options
+=======
+
+.. option:: ceph.id=<username>
+
+ Specify that the ceph-fuse will authenticate as the given user.
+
+.. option:: ceph.name=client.admin
+
+ Specify that the ceph-fuse will authenticate as client.admin
+
+.. option:: ceph.conf=/etc/ceph/foo.conf
+
+ Sets 'conf' option to /etc/ceph/foo.conf via ceph-fuse command line.
+
+
+Any valid ceph-fuse options can be passed this way.
+
+Additional Info
+===============
+
+The old format /etc/fstab entries are also supported::
+
+ DEVICE PATH TYPE OPTIONS
+ id=admin /mnt/ceph fuse.ceph defaults 0 0
+ id=myuser,conf=/etc/ceph/foo.conf /mnt/ceph fuse.ceph defaults 0 0
+
+Availability
+============
+
+**mount.fuse.ceph** is part of Ceph, a massively scalable, open-source, distributed storage system. Please
+refer to the Ceph documentation at http://ceph.com/docs for more
+information.
+
+See also
+========
+
+:doc:`ceph-fuse <ceph-fuse>`\(8),
+:doc:`ceph <ceph>`\(8)
diff --git a/doc/man/8/osdmaptool.rst b/doc/man/8/osdmaptool.rst
new file mode 100644
index 000000000..92ad8039c
--- /dev/null
+++ b/doc/man/8/osdmaptool.rst
@@ -0,0 +1,331 @@
+:orphan:
+
+.. _osdmaptool:
+
+======================================================
+ osdmaptool -- ceph osd cluster map manipulation tool
+======================================================
+
+.. program:: osdmaptool
+
+Synopsis
+========
+
+| **osdmaptool** *mapfilename* [--print] [--createsimple *numosd*
+ [--pgbits *bitsperosd* ] ] [--clobber]
+| **osdmaptool** *mapfilename* [--import-crush *crushmap*]
+| **osdmaptool** *mapfilename* [--export-crush *crushmap*]
+| **osdmaptool** *mapfilename* [--upmap *file*] [--upmap-max *max-optimizations*]
+ [--upmap-deviation *max-deviation*] [--upmap-pool *poolname*]
+ [--save] [--upmap-active]
+| **osdmaptool** *mapfilename* [--upmap-cleanup] [--upmap *file*]
+
+
+Description
+===========
+
+**osdmaptool** is a utility that lets you create, view, and manipulate
+OSD cluster maps from the Ceph distributed storage system. Notably, it
+lets you extract the embedded CRUSH map or import a new CRUSH map.
+It can also simulate the upmap balancer mode so you can get a sense of
+what is needed to balance your PGs.
+
+
+Options
+=======
+
+.. option:: --print
+
+ will simply make the tool print a plaintext dump of the map, after
+ any modifications are made.
+
+.. option:: --dump <format>
+
+ displays the map in plain text when <format> is 'plain', 'json' if specified
+ format is not supported. This is an alternative to the print option.
+
+.. option:: --clobber
+
+ will allow osdmaptool to overwrite mapfilename if changes are made.
+
+.. option:: --import-crush mapfile
+
+ will load the CRUSH map from mapfile and embed it in the OSD map.
+
+.. option:: --export-crush mapfile
+
+ will extract the CRUSH map from the OSD map and write it to
+ mapfile.
+
+.. option:: --createsimple numosd [--pg-bits bitsperosd] [--pgp-bits bits]
+
+ will create a relatively generic OSD map with the numosd devices.
+ If --pg-bits is specified, the initial placement group counts will
+ be set with bitsperosd bits per OSD. That is, the pg_num map
+ attribute will be set to numosd shifted by bitsperosd.
+ If --pgp-bits is specified, then the pgp_num map attribute will
+ be set to numosd shifted by bits.
+
+.. option:: --create-from-conf
+
+ creates an osd map with default configurations.
+
+.. option:: --test-map-pgs [--pool poolid] [--range-first <first> --range-last <last>]
+
+ will print out the mappings from placement groups to OSDs.
+ If range is specified, then it iterates from first to last in the directory
+ specified by argument to osdmaptool.
+ Eg: **osdmaptool --test-map-pgs --range-first 0 --range-last 2 osdmap_dir**.
+ This will iterate through the files named 0,1,2 in osdmap_dir.
+
+.. option:: --test-map-pgs-dump [--pool poolid] [--range-first <first> --range-last <last>]
+
+ will print out the summary of all placement groups and the mappings from them to the mapped OSDs.
+ If range is specified, then it iterates from first to last in the directory
+ specified by argument to osdmaptool.
+ Eg: **osdmaptool --test-map-pgs-dump --range-first 0 --range-last 2 osdmap_dir**.
+ This will iterate through the files named 0,1,2 in osdmap_dir.
+
+.. option:: --test-map-pgs-dump-all [--pool poolid] [--range-first <first> --range-last <last>]
+
+ will print out the summary of all placement groups and the mappings
+ from them to all the OSDs.
+ If range is specified, then it iterates from first to last in the directory
+ specified by argument to osdmaptool.
+ Eg: **osdmaptool --test-map-pgs-dump-all --range-first 0 --range-last 2 osdmap_dir**.
+ This will iterate through the files named 0,1,2 in osdmap_dir.
+
+.. option:: --test-random
+
+ does a random mapping of placement groups to the OSDs.
+
+.. option:: --test-map-pg <pgid>
+
+ map a particular placement group(specified by pgid) to the OSDs.
+
+.. option:: --test-map-object <objectname> [--pool <poolid>]
+
+ map a particular placement group(specified by objectname) to the OSDs.
+
+.. option:: --test-crush [--range-first <first> --range-last <last>]
+
+ map placement groups to acting OSDs.
+ If range is specified, then it iterates from first to last in the directory
+ specified by argument to osdmaptool.
+ Eg: **osdmaptool --test-crush --range-first 0 --range-last 2 osdmap_dir**.
+ This will iterate through the files named 0,1,2 in osdmap_dir.
+
+.. option:: --mark-up-in
+
+ mark osds up and in (but do not persist).
+
+.. option:: --mark-out
+
+ mark an osd as out (but do not persist)
+
+.. option:: --mark-up <osdid>
+
+ mark an osd as up (but do not persist)
+
+.. option:: --mark-in <osdid>
+
+ mark an osd as in (but do not persist)
+
+.. option:: --tree
+
+ Displays a hierarchical tree of the map.
+
+.. option:: --clear-temp
+
+ clears pg_temp and primary_temp variables.
+
+.. option:: --clean-temps
+
+ clean pg_temps.
+
+.. option:: --health
+
+ dump health checks
+
+.. option:: --with-default-pool
+
+ include default pool when creating map
+
+.. option:: --upmap-cleanup <file>
+
+ clean up pg_upmap[_items] entries, writing commands to <file> [default: - for stdout]
+
+.. option:: --upmap <file>
+
+ calculate pg upmap entries to balance pg layout writing commands to <file> [default: - for stdout]
+
+.. option:: --upmap-max <max-optimizations>
+
+ set max upmap entries to calculate [default: 10]
+
+.. option:: --upmap-deviation <max-deviation>
+
+ max deviation from target [default: 5]
+
+.. option:: --upmap-pool <poolname>
+
+ restrict upmap balancing to 1 pool or the option can be repeated for multiple pools
+
+.. option:: --upmap-active
+
+ Act like an active balancer, keep applying changes until balanced
+
+.. option:: --adjust-crush-weight <osdid:weight>[,<osdid:weight>,<...>]
+
+ Change CRUSH weight of <osdid>
+
+.. option:: --save
+
+ write modified osdmap with upmap or crush-adjust changes
+
+Example
+=======
+
+To create a simple map with 16 devices::
+
+ osdmaptool --createsimple 16 osdmap --clobber
+
+To view the result::
+
+ osdmaptool --print osdmap
+
+To view the mappings of placement groups for pool 1::
+
+ osdmaptool osdmap --test-map-pgs-dump --pool 1
+
+ pool 1 pg_num 8
+ 1.0 [0,2,1] 0
+ 1.1 [2,0,1] 2
+ 1.2 [0,1,2] 0
+ 1.3 [2,0,1] 2
+ 1.4 [0,2,1] 0
+ 1.5 [0,2,1] 0
+ 1.6 [0,1,2] 0
+ 1.7 [1,0,2] 1
+ #osd count first primary c wt wt
+ osd.0 8 5 5 1 1
+ osd.1 8 1 1 1 1
+ osd.2 8 2 2 1 1
+ in 3
+ avg 8 stddev 0 (0x) (expected 2.3094 0.288675x))
+ min osd.0 8
+ max osd.0 8
+ size 0 0
+ size 1 0
+ size 2 0
+ size 3 8
+
+In which,
+ #. pool 1 has 8 placement groups. And two tables follow:
+ #. A table for placement groups. Each row presents a placement group. With columns of:
+
+ * placement group id,
+ * acting set, and
+ * primary OSD.
+ #. A table for all OSDs. Each row presents an OSD. With columns of:
+
+ * count of placement groups being mapped to this OSD,
+ * count of placement groups where this OSD is the first one in their acting sets,
+ * count of placement groups where this OSD is the primary of them,
+ * the CRUSH weight of this OSD, and
+ * the weight of this OSD.
+ #. Looking at the number of placement groups held by 3 OSDs. We have
+
+ * avarge, stddev, stddev/average, expected stddev, expected stddev / average
+ * min and max
+ #. The number of placement groups mapping to n OSDs. In this case, all 8 placement
+ groups are mapping to 3 different OSDs.
+
+In a less-balanced cluster, we could have following output for the statistics of
+placement group distribution, whose standard deviation is 1.41421::
+
+ #osd count first primary c wt wt
+ osd.0 8 5 5 1 1
+ osd.1 8 1 1 1 1
+ osd.2 8 2 2 1 1
+
+ #osd count first primary c wt wt
+ osd.0 33 9 9 0.0145874 1
+ osd.1 34 14 14 0.0145874 1
+ osd.2 31 7 7 0.0145874 1
+ osd.3 31 13 13 0.0145874 1
+ osd.4 30 14 14 0.0145874 1
+ osd.5 33 7 7 0.0145874 1
+ in 6
+ avg 32 stddev 1.41421 (0.0441942x) (expected 5.16398 0.161374x))
+ min osd.4 30
+ max osd.1 34
+ size 00
+ size 10
+ size 20
+ size 364
+
+To simulate the active balancer in upmap mode::
+
+ osdmaptool --upmap upmaps.out --upmap-active --upmap-deviation 6 --upmap-max 11 osdmap
+
+ osdmaptool: osdmap file 'osdmap'
+ writing upmap command output to: upmaps.out
+ checking for upmap cleanups
+ upmap, max-count 11, max deviation 6
+ pools movies photos metadata data
+ prepared 11/11 changes
+ Time elapsed 0.00310404 secs
+ pools movies photos metadata data
+ prepared 11/11 changes
+ Time elapsed 0.00283402 secs
+ pools data metadata movies photos
+ prepared 11/11 changes
+ Time elapsed 0.003122 secs
+ pools photos metadata data movies
+ prepared 11/11 changes
+ Time elapsed 0.00324372 secs
+ pools movies metadata data photos
+ prepared 1/11 changes
+ Time elapsed 0.00222609 secs
+ pools data movies photos metadata
+ prepared 0/11 changes
+ Time elapsed 0.00209916 secs
+ Unable to find further optimization, or distribution is already perfect
+ osd.0 pgs 41
+ osd.1 pgs 42
+ osd.2 pgs 42
+ osd.3 pgs 41
+ osd.4 pgs 46
+ osd.5 pgs 39
+ osd.6 pgs 39
+ osd.7 pgs 43
+ osd.8 pgs 41
+ osd.9 pgs 46
+ osd.10 pgs 46
+ osd.11 pgs 46
+ osd.12 pgs 46
+ osd.13 pgs 41
+ osd.14 pgs 40
+ osd.15 pgs 40
+ osd.16 pgs 39
+ osd.17 pgs 46
+ osd.18 pgs 46
+ osd.19 pgs 39
+ osd.20 pgs 42
+ Total time elapsed 0.0167765 secs, 5 rounds
+
+
+Availability
+============
+
+**osdmaptool** is part of Ceph, a massively scalable, open-source, distributed storage system. Please
+refer to the Ceph documentation at http://ceph.com/docs for more
+information.
+
+
+See also
+========
+
+:doc:`ceph <ceph>`\(8),
+:doc:`crushtool <crushtool>`\(8),
diff --git a/doc/man/8/rados.rst b/doc/man/8/rados.rst
new file mode 100644
index 000000000..147313f14
--- /dev/null
+++ b/doc/man/8/rados.rst
@@ -0,0 +1,404 @@
+:orphan:
+
+=======================================
+ rados -- rados object storage utility
+=======================================
+
+.. program:: rados
+
+Synopsis
+========
+
+| **rados** [ *options* ] [ *command* ]
+
+
+Description
+===========
+
+**rados** is a utility for interacting with a Ceph object storage
+cluster (RADOS), part of the Ceph distributed storage system.
+
+
+Global Options
+==============
+
+.. option:: --object-locator object_locator
+
+ Set object_locator for operation.
+
+.. option:: -p pool, --pool pool
+
+ Interact with the given pool. Required by most commands.
+
+.. option:: --target-pool pool
+
+ Select target pool by name.
+
+.. option:: --pgid
+
+ As an alternative to ``--pool``, ``--pgid`` also allow users to specify the
+ PG id to which the command will be directed. With this option, certain
+ commands like ``ls`` allow users to limit the scope of the command to the given PG.
+
+.. option:: -N namespace, --namespace namespace
+
+ Specify the rados namespace to use for the object.
+
+.. option:: --all
+
+ Use with ls to list objects in all namespaces.
+ Put in CEPH_ARGS environment variable to make this the default.
+
+.. option:: --default
+
+ Use with ls to list objects in default namespace.
+ Takes precedence over --all in case --all is in environment.
+
+.. option:: -s snap, --snap snap
+
+ Read from the given pool snapshot. Valid for all pool-specific read operations.
+
+.. option:: --create
+
+ Create the pool or directory that was specified.
+
+.. option:: -i infile
+
+ will specify an input file to be passed along as a payload with the
+ command to the monitor cluster. This is only used for specific
+ monitor commands.
+
+.. option:: -m monaddress[:port]
+
+ Connect to specified monitor (instead of looking through ceph.conf).
+
+.. option:: -b block_size
+
+ Set the block size for put/get/append ops and for write benchmarking.
+
+.. option:: --striper
+
+ Uses the striping API of rados rather than the default one.
+ Available for stat, stat2, get, put, append, truncate, rm, ls
+ and all xattr related operation.
+
+.. option:: -O object_size, --object-size object_size
+
+ Set the object size for put/get ops and for write benchmarking.
+
+.. option:: --max-objects
+
+ Set the max number of objects for write benchmarking.
+
+.. option:: --lock-cookie locker-cookie
+
+ Will set the lock cookie for acquiring advisory lock (lock get command).
+ If the cookie is not empty, this option must be passed to lock break command
+ to find the correct lock when releasing lock.
+
+.. option:: --target-locator
+
+ Use with cp to specify the locator of the new object.
+
+.. option:: --target-nspace
+
+ Use with cp to specify the namespace of the new object.
+
+
+Bench options
+=============
+
+.. option:: -t N, --concurrent-ios=N
+
+ Set number of concurrent I/O operations.
+
+.. option:: --show-time
+
+ Prefix output with date/time.
+
+.. option:: --no-verify
+
+ Do not verify contents of read objects.
+
+.. option:: --write-object
+
+ Write contents to the objects.
+
+.. option:: --write-omap
+
+ Write contents to the omap.
+
+.. option:: --write-xattr
+
+ Write contents to the extended attributes.
+
+
+Load gen options
+================
+
+.. option:: --num-objects
+
+ Total number of objects.
+
+.. option:: --min-object-size
+
+ Min object size.
+
+.. option:: --max-object-size
+
+ Max object size.
+
+.. option:: --min-op-len
+
+ Min io size of operations.
+
+.. option:: --max-op-len
+
+ Max io size of operations.
+
+.. option:: --max-ops
+
+ Max number of operations.
+
+.. option:: --max-backlog
+
+ Max backlog size.
+
+.. option:: --read-percent
+
+ Percent of operations that are read.
+
+.. option:: --target-throughput
+
+ Target throughput (in bytes).
+
+.. option:: --run-length
+
+ Total time (in seconds).
+
+.. option:: --offset-align
+
+ At what boundary to align random op offsets.
+
+
+Cache pools options
+===================
+
+.. option:: --with-clones
+
+ Include clones when doing flush or evict.
+
+
+OMAP options
+============
+
+.. option:: --omap-key-file file
+
+ Read the omap key from a file.
+
+
+Generic options
+===============
+
+.. option:: -c FILE, --conf FILE
+
+ Read configuration from the given configuration file.
+
+.. option:: --id ID
+
+ Set ID portion of my name.
+
+.. option:: -n TYPE.ID, --name TYPE.ID
+
+ Set cephx user name.
+
+.. option:: --cluster NAME
+
+ Set cluster name (default: ceph).
+
+.. option:: --setuser USER
+
+ Set uid to user or uid (and gid to user's gid).
+
+.. option:: --setgroup GROUP
+
+ Set gid to group or gid.
+
+.. option:: --version
+
+ Show version and quit.
+
+
+Global commands
+===============
+
+:command:`lspools`
+ List object pools
+
+:command:`df`
+ Show utilization statistics, including disk usage (bytes) and object
+ counts, over the entire system and broken down by pool.
+
+:command:`list-inconsistent-pg` *pool*
+ List inconsistent PGs in given pool.
+
+:command:`list-inconsistent-obj` *pgid*
+ List inconsistent objects in given PG.
+
+:command:`list-inconsistent-snapset` *pgid*
+ List inconsistent snapsets in given PG.
+
+
+Pool specific commands
+======================
+
+:command:`get` *name* *outfile*
+ Read object name from the cluster and write it to outfile.
+
+:command:`put` *name* *infile* [--offset offset]
+ Write object name with start offset (default:0) to the cluster with contents from infile.
+ **Warning:** The put command creates a single RADOS object, sized just as
+ large as your input file. Unless your objects are of reasonable and consistent sizes, that
+ is probably not what you want -- consider using RGW/S3, CephFS, or RBD instead.
+
+:command:`append` *name* *infile*
+ Append object name to the cluster with contents from infile.
+
+:command:`rm` *name*
+ Remove object name.
+
+:command:`listwatchers` *name*
+ List the watchers of object name.
+
+:command:`ls` *outfile*
+ List objects in the given pool and write to outfile. Instead of ``--pool`` if ``--pgid`` will be specified, ``ls`` will only list the objects in the given PG.
+
+:command:`lssnap`
+ List snapshots for given pool.
+
+:command:`clonedata` *srcname* *dstname* --object-locator *key*
+ Clone object byte data from *srcname* to *dstname*. Both objects must be stored with the locator key *key* (usually either *srcname* or *dstname*). Object attributes and omap keys are not copied or cloned.
+
+:command:`mksnap` *foo*
+ Create pool snapshot named *foo*.
+
+:command:`rmsnap` *foo*
+ Remove pool snapshot named *foo*.
+
+:command:`bench` *seconds* *mode* [ -b *objsize* ] [ -t *threads* ]
+ Benchmark for *seconds*. The mode can be *write*, *seq*, or
+ *rand*. *seq* and *rand* are read benchmarks, either
+ sequential or random. Before running one of the reading benchmarks,
+ run a write benchmark with the *--no-cleanup* option. The default
+ object size is 4 MB, and the default number of simulated threads
+ (parallel writes) is 16. The *--run-name <label>* option is useful
+ for benchmarking a workload test from multiple clients. The *<label>*
+ is an arbitrary object name. It is "benchmark_last_metadata" by
+ default, and is used as the underlying object name for "read" and
+ "write" ops.
+ Note: -b *objsize* option is valid only in *write* mode.
+ Note: *write* and *seq* must be run on the same host otherwise the
+ objects created by *write* will have names that will fail *seq*.
+
+:command:`cleanup` [ --run-name *run_name* ] [ --prefix *prefix* ]
+ Clean up a previous benchmark operation.
+ Note: the default run-name is "benchmark_last_metadata"
+
+:command:`listxattr` *name*
+ List all extended attributes of an object.
+
+:command:`getxattr` *name* *attr*
+ Dump the extended attribute value of *attr* of an object.
+
+:command:`setxattr` *name* *attr* *value*
+ Set the value of *attr* in the extended attributes of an object.
+
+:command:`rmxattr` *name* *attr*
+ Remove *attr* from the extended attributes of an object.
+
+:command:`stat` *name*
+ Get stat (ie. mtime, size) of given object
+
+:command:`stat2` *name*
+ Get stat (similar to stat, but with high precision time) of given object
+
+:command:`listomapkeys` *name*
+ List all the keys stored in the object map of object name.
+
+:command:`listomapvals` *name*
+ List all key/value pairs stored in the object map of object name.
+ The values are dumped in hexadecimal.
+
+:command:`getomapval` [ --omap-key-file *file* ] *name* *key* [ *out-file* ]
+ Dump the hexadecimal value of key in the object map of object name.
+ If the optional *out-file* argument is not provided, the value will be
+ written to standard output.
+
+:command:`setomapval` [ --omap-key-file *file* ] *name* *key* [ *value* ]
+ Set the value of key in the object map of object name. If the optional
+ *value* argument is not provided, the value will be read from standard
+ input.
+
+:command:`rmomapkey` [ --omap-key-file *file* ] *name* *key*
+ Remove key from the object map of object name.
+
+:command:`getomapheader` *name*
+ Dump the hexadecimal value of the object map header of object name.
+
+:command:`setomapheader` *name* *value*
+ Set the value of the object map header of object name.
+
+:command:`export` *filename*
+ Serialize pool contents to a file or standard output.\n"
+
+:command:`import` [--dry-run] [--no-overwrite] < filename | - >
+ Load pool contents from a file or standard input
+
+
+Examples
+========
+
+To view cluster utilization::
+
+ rados df
+
+To get a list object in pool foo sent to stdout::
+
+ rados -p foo ls -
+
+To get a list of objects in PG 0.6::
+
+ rados --pgid 0.6 ls
+
+To write an object::
+
+ rados -p foo put myobject blah.txt
+
+To create a snapshot::
+
+ rados -p foo mksnap mysnap
+
+To delete the object::
+
+ rados -p foo rm myobject
+
+To read a previously snapshotted version of an object::
+
+ rados -p foo -s mysnap get myobject blah.txt.old
+
+To list inconsistent objects in PG 0.6::
+
+ rados list-inconsistent-obj 0.6 --format=json-pretty
+
+
+Availability
+============
+
+**rados** is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer to
+the Ceph documentation at http://ceph.com/docs for more information.
+
+
+See also
+========
+
+:doc:`ceph <ceph>`\(8)
diff --git a/doc/man/8/radosgw-admin.rst b/doc/man/8/radosgw-admin.rst
new file mode 100644
index 000000000..6fa2aba3e
--- /dev/null
+++ b/doc/man/8/radosgw-admin.rst
@@ -0,0 +1,1023 @@
+:orphan:
+
+=================================================================
+ radosgw-admin -- rados REST gateway user administration utility
+=================================================================
+
+.. program:: radosgw-admin
+
+Synopsis
+========
+
+| **radosgw-admin** *command* [ *options* *...* ]
+
+
+Description
+===========
+
+:program:`radosgw-admin` is a RADOS gateway user administration utility. It
+allows creating and modifying users.
+
+
+Commands
+========
+
+:program:`radosgw-admin` utility uses many commands for administration purpose
+which are as follows:
+
+:command:`user create`
+ Create a new user.
+
+:command:`user modify`
+ Modify a user.
+
+:command:`user info`
+ Display information of a user, and any potentially available
+ subusers and keys.
+
+:command:`user rename`
+ Renames a user.
+
+:command:`user rm`
+ Remove a user.
+
+:command:`user suspend`
+ Suspend a user.
+
+:command:`user enable`
+ Re-enable user after suspension.
+
+:command:`user check`
+ Check user info.
+
+:command:`user stats`
+ Show user stats as accounted by quota subsystem.
+
+:command:`user list`
+ List all users.
+
+:command:`caps add`
+ Add user capabilities.
+
+:command:`caps rm`
+ Remove user capabilities.
+
+:command:`subuser create`
+ Create a new subuser (primarily useful for clients using the Swift API).
+
+:command:`subuser modify`
+ Modify a subuser.
+
+:command:`subuser rm`
+ Remove a subuser.
+
+:command:`key create`
+ Create access key.
+
+:command:`key rm`
+ Remove access key.
+
+:command:`bucket list`
+ List buckets, or, if bucket specified with --bucket=<bucket>,
+ list its objects. If bucket specified adding --allow-unordered
+ removes ordering requirement, possibly generating results more
+ quickly in buckets with large number of objects.
+
+:command:`bucket limit check`
+ Show bucket sharding stats.
+
+:command:`bucket link`
+ Link bucket to specified user.
+
+:command:`bucket unlink`
+ Unlink bucket from specified user.
+
+:command:`bucket chown`
+ Link bucket to specified user and update object ACLs.
+ Use --marker to resume if command gets interrupted.
+
+:command:`bucket stats`
+ Returns bucket statistics.
+
+:command:`bucket rm`
+ Remove a bucket.
+
+:command:`bucket check`
+ Check bucket index.
+
+:command:`bucket rewrite`
+ Rewrite all objects in the specified bucket.
+
+:command:`bucket radoslist`
+ List the rados objects that contain the data for all objects is
+ the designated bucket, if --bucket=<bucket> is specified, or
+ otherwise all buckets.
+
+:command:`bucket reshard`
+ Reshard a bucket.
+
+:command:`bucket sync disable`
+ Disable bucket sync.
+
+:command:`bucket sync enable`
+ Enable bucket sync.
+
+:command:`bi get`
+ Retrieve bucket index object entries.
+
+:command:`bi put`
+ Store bucket index object entries.
+
+:command:`bi list`
+ List raw bucket index entries.
+
+:command:`bi purge`
+ Purge bucket index entries.
+
+:command:`object rm`
+ Remove an object.
+
+:command:`object stat`
+ Stat an object for its metadata.
+
+:command:`object unlink`
+ Unlink object from bucket index.
+
+:command:`object rewrite`
+ Rewrite the specified object.
+
+:command:`objects expire`
+ Run expired objects cleanup.
+
+:command:`period rm`
+ Remove a period.
+
+:command:`period get`
+ Get the period info.
+
+:command:`period get-current`
+ Get the current period info.
+
+:command:`period pull`
+ Pull a period.
+
+:command:`period push`
+ Push a period.
+
+:command:`period list`
+ List all periods.
+
+:command:`period update`
+ Update the staging period.
+
+:command:`period commit`
+ Commit the staging period.
+
+:command:`quota set`
+ Set quota params.
+
+:command:`quota enable`
+ Enable quota.
+
+:command:`quota disable`
+ Disable quota.
+
+:command:`global quota get`
+ View global quota parameters.
+
+:command:`global quota set`
+ Set global quota parameters.
+
+:command:`global quota enable`
+ Enable a global quota.
+
+:command:`global quota disable`
+ Disable a global quota.
+
+:command:`realm create`
+ Create a new realm.
+
+:command:`realm rm`
+ Remove a realm.
+
+:command:`realm get`
+ Show the realm info.
+
+:command:`realm get-default`
+ Get the default realm name.
+
+:command:`realm list`
+ List all realms.
+
+:command:`realm list-periods`
+ List all realm periods.
+
+:command:`realm rename`
+ Rename a realm.
+
+:command:`realm set`
+ Set the realm info (requires infile).
+
+:command:`realm default`
+ Set the realm as default.
+
+:command:`realm pull`
+ Pull a realm and its current period.
+
+:command:`zonegroup add`
+ Add a zone to a zonegroup.
+
+:command:`zonegroup create`
+ Create a new zone group info.
+
+:command:`zonegroup default`
+ Set the default zone group.
+
+:command:`zonegroup rm`
+ Remove a zone group info.
+
+:command:`zonegroup get`
+ Show the zone group info.
+
+:command:`zonegroup modify`
+ Modify an existing zonegroup.
+
+:command:`zonegroup set`
+ Set the zone group info (requires infile).
+
+:command:`zonegroup remove`
+ Remove a zone from a zonegroup.
+
+:command:`zonegroup rename`
+ Rename a zone group.
+
+:command:`zonegroup list`
+ List all zone groups set on this cluster.
+
+:command:`zonegroup placement list`
+ List zonegroup's placement targets.
+
+:command:`zonegroup placement add`
+ Add a placement target id to a zonegroup.
+
+:command:`zonegroup placement modify`
+ Modify a placement target of a specific zonegroup.
+
+:command:`zonegroup placement rm`
+ Remove a placement target from a zonegroup.
+
+:command:`zonegroup placement default`
+ Set a zonegroup's default placement target.
+
+:command:`zone create`
+ Create a new zone.
+
+:command:`zone rm`
+ Remove a zone.
+
+:command:`zone get`
+ Show zone cluster params.
+
+:command:`zone set`
+ Set zone cluster params (requires infile).
+
+:command:`zone modify`
+ Modify an existing zone.
+
+:command:`zone list`
+ List all zones set on this cluster.
+
+:command:`metadata sync status`
+ Get metadata sync status.
+
+:command:`metadata sync init`
+ Init metadata sync.
+
+:command:`metadata sync run`
+ Run metadata sync.
+
+:command:`data sync status`
+ Get data sync status of the specified source zone.
+
+:command:`data sync init`
+ Init data sync for the specified source zone.
+
+:command:`data sync run`
+ Run data sync for the specified source zone.
+
+:command:`sync error list`
+ list sync error.
+
+:command:`sync error trim`
+ trim sync error.
+
+:command:`zone rename`
+ Rename a zone.
+
+:command:`zone placement list`
+ List zone's placement targets.
+
+:command:`zone placement add`
+ Add a zone placement target.
+
+:command:`zone placement modify`
+ Modify a zone placement target.
+
+:command:`zone placement rm`
+ Remove a zone placement target.
+
+:command:`pool add`
+ Add an existing pool for data placement.
+
+:command:`pool rm`
+ Remove an existing pool from data placement set.
+
+:command:`pools list`
+ List placement active set.
+
+:command:`policy`
+ Display bucket/object policy.
+
+:command:`log list`
+ List log objects.
+
+:command:`log show`
+ Dump a log from specific object or (bucket + date + bucket-id).
+ (NOTE: required to specify formatting of date to "YYYY-MM-DD-hh")
+
+:command:`log rm`
+ Remove log object.
+
+:command:`usage show`
+ Show the usage information (with optional user and date range).
+
+:command:`usage trim`
+ Trim usage information (with optional user and date range).
+
+:command:`gc list`
+ Dump expired garbage collection objects (specify --include-all to list all
+ entries, including unexpired).
+
+:command:`gc process`
+ Manually process garbage.
+
+:command:`lc list`
+ List all bucket lifecycle progress.
+
+:command:`lc process`
+ Manually process lifecycle.
+
+:command:`metadata get`
+ Get metadata info.
+
+:command:`metadata put`
+ Put metadata info.
+
+:command:`metadata rm`
+ Remove metadata info.
+
+:command:`metadata list`
+ List metadata info.
+
+:command:`mdlog list`
+ List metadata log.
+
+:command:`mdlog trim`
+ Trim metadata log.
+
+:command:`mdlog status`
+ Read metadata log status.
+
+:command:`bilog list`
+ List bucket index log.
+
+:command:`bilog trim`
+ Trim bucket index log (use start-marker, end-marker).
+
+:command:`datalog list`
+ List data log.
+
+:command:`datalog trim`
+ Trim data log.
+
+:command:`datalog status`
+ Read data log status.
+
+:command:`orphans find`
+ Init and run search for leaked rados objects.
+ DEPRECATED. See the "rgw-orphan-list" tool.
+
+:command:`orphans finish`
+ Clean up search for leaked rados objects.
+ DEPRECATED. See the "rgw-orphan-list" tool.
+
+:command:`orphans list-jobs`
+ List the current job-ids for the orphans search.
+ DEPRECATED. See the "rgw-orphan-list" tool.
+
+:command:`role create`
+ create a new AWS role for use with STS.
+
+:command:`role rm`
+ Remove a role.
+
+:command:`role get`
+ Get a role.
+
+:command:`role list`
+ List the roles with specified path prefix.
+
+:command:`role modify`
+ Modify the assume role policy of an existing role.
+
+:command:`role-policy put`
+ Add/update permission policy to role.
+
+:command:`role-policy list`
+ List the policies attached to a role.
+
+:command:`role-policy get`
+ Get the specified inline policy document embedded with the given role.
+
+:command:`role-policy rm`
+ Remove the policy attached to a role
+
+:command:`reshard add`
+ Schedule a resharding of a bucket
+
+:command:`reshard list`
+ List all bucket resharding or scheduled to be resharded
+
+:command:`reshard process`
+ Process of scheduled reshard jobs
+
+:command:`reshard status`
+ Resharding status of a bucket
+
+:command:`reshard cancel`
+ Cancel resharding a bucket
+
+:command:`topic list`
+ List bucket notifications/pubsub topics
+
+:command:`topic get`
+ Get a bucket notifications/pubsub topic
+
+:command:`topic rm`
+ Remove a bucket notifications/pubsub topic
+
+:command:`subscription get`
+ Get a pubsub subscription definition
+
+:command:`subscription rm`
+ Remove a pubsub subscription
+
+:command:`subscription pull`
+ Show events in a pubsub subscription
+
+:command:`subscription ack`
+ Ack (remove) an events in a pubsub subscription
+
+
+Options
+=======
+
+.. option:: -c ceph.conf, --conf=ceph.conf
+
+ Use ``ceph.conf`` configuration file instead of the default
+ ``/etc/ceph/ceph.conf`` to determine monitor addresses during
+ startup.
+
+.. option:: -m monaddress[:port]
+
+ Connect to specified monitor (instead of looking through ceph.conf).
+
+.. option:: --tenant=<tenant>
+
+ Name of the tenant.
+
+.. option:: --uid=uid
+
+ The radosgw user ID.
+
+.. option:: --new-uid=uid
+
+ ID of the new user. Used with 'user rename' command.
+
+.. option:: --subuser=<name>
+
+ Name of the subuser.
+
+.. option:: --access-key=<key>
+
+ S3 access key.
+
+.. option:: --email=email
+
+ The e-mail address of the user.
+
+.. option:: --secret/--secret-key=<key>
+
+ The secret key.
+
+.. option:: --gen-access-key
+
+ Generate random access key (for S3).
+
+.. option:: --gen-secret
+
+ Generate random secret key.
+
+.. option:: --key-type=<type>
+
+ key type, options are: swift, s3.
+
+.. option:: --temp-url-key[-2]=<key>
+
+ Temporary url key.
+
+.. option:: --max-buckets
+
+ max number of buckets for a user (0 for no limit, negative value to disable bucket creation).
+ Default is 1000.
+
+.. option:: --access=<access>
+
+ Set the access permissions for the sub-user.
+ Available access permissions are read, write, readwrite and full.
+
+.. option:: --display-name=<name>
+
+ The display name of the user.
+
+.. option:: --admin
+
+ Set the admin flag on the user.
+
+.. option:: --system
+
+ Set the system flag on the user.
+
+.. option:: --bucket=[tenant-id/]bucket
+
+ Specify the bucket name. If tenant-id is not specified, the tenant-id
+ of the user (--uid) is used.
+
+.. option:: --pool=<pool>
+
+ Specify the pool name.
+ Also used with `orphans find` as data pool to scan for leaked rados objects.
+
+.. option:: --object=object
+
+ Specify the object name.
+
+.. option:: --date=yyyy-mm-dd
+
+ The date in the format yyyy-mm-dd.
+
+.. option:: --start-date=yyyy-mm-dd
+
+ The start date in the format yyyy-mm-dd.
+
+.. option:: --end-date=yyyy-mm-dd
+
+ The end date in the format yyyy-mm-dd.
+
+.. option:: --bucket-id=<bucket-id>
+
+ Specify the bucket id.
+
+.. option:: --bucket-new-name=[tenant-id/]<bucket>
+
+ Optional for `bucket link`; use to rename a bucket.
+ While tenant-id/ can be specified, this is never
+ necessary for normal operation.
+
+.. option:: --shard-id=<shard-id>
+
+ Optional for mdlog list, bi list, data sync status. Required for ``mdlog trim``.
+
+.. option:: --max-entries=<entries>
+
+ Optional for listing operations to specify the max entires
+
+.. option:: --purge-data
+
+ When specified, user removal will also purge all the user data.
+
+.. option:: --purge-keys
+
+ When specified, subuser removal will also purge all the subuser keys.
+
+.. option:: --purge-objects
+
+ When specified, the bucket removal will also purge all objects in it.
+
+.. option:: --metadata-key=<key>
+
+ Key to retrieve metadata from with ``metadata get``.
+
+.. option:: --remote=<remote>
+
+ Zone or zonegroup id of remote gateway.
+
+.. option:: --period=<id>
+
+ Period id.
+
+.. option:: --url=<url>
+
+ url for pushing/pulling period or realm.
+
+.. option:: --epoch=<number>
+
+ Period epoch.
+
+.. option:: --commit
+
+ Commit the period during 'period update'.
+
+.. option:: --staging
+
+ Get the staging period info.
+
+.. option:: --master
+
+ Set as master.
+
+.. option:: --master-zone=<id>
+
+ Master zone id.
+
+.. option:: --rgw-realm=<name>
+
+ The realm name.
+
+.. option:: --realm-id=<id>
+
+ The realm id.
+
+.. option:: --realm-new-name=<name>
+
+ New name of realm.
+
+.. option:: --rgw-zonegroup=<name>
+
+ The zonegroup name.
+
+.. option:: --zonegroup-id=<id>
+
+ The zonegroup id.
+
+.. option:: --zonegroup-new-name=<name>
+
+ The new name of the zonegroup.
+
+.. option:: --rgw-zone=<zone>
+
+ Zone in which radosgw is running.
+
+.. option:: --zone-id=<id>
+
+ The zone id.
+
+.. option:: --zone-new-name=<name>
+
+ The new name of the zone.
+
+.. option:: --source-zone
+
+ The source zone for data sync.
+
+.. option:: --default
+
+ Set the entity (realm, zonegroup, zone) as default.
+
+.. option:: --read-only
+
+ Set the zone as read-only when adding to the zonegroup.
+
+.. option:: --placement-id
+
+ Placement id for the zonegroup placement commands.
+
+.. option:: --tags=<list>
+
+ The list of tags for zonegroup placement add and modify commands.
+
+.. option:: --tags-add=<list>
+
+ The list of tags to add for zonegroup placement modify command.
+
+.. option:: --tags-rm=<list>
+
+ The list of tags to remove for zonegroup placement modify command.
+
+.. option:: --endpoints=<list>
+
+ The zone endpoints.
+
+.. option:: --index-pool=<pool>
+
+ The placement target index pool.
+
+.. option:: --data-pool=<pool>
+
+ The placement target data pool.
+
+.. option:: --data-extra-pool=<pool>
+
+ The placement target data extra (non-ec) pool.
+
+.. option:: --placement-index-type=<type>
+
+ The placement target index type (normal, indexless, or #id).
+
+.. option:: --tier-type=<type>
+
+ The zone tier type.
+
+.. option:: --tier-config=<k>=<v>[,...]
+
+ Set zone tier config keys, values.
+
+.. option:: --tier-config-rm=<k>[,...]
+
+ Unset zone tier config keys.
+
+.. option:: --sync-from-all[=false]
+
+ Set/reset whether zone syncs from all zonegroup peers.
+
+.. option:: --sync-from=[zone-name][,...]
+
+ Set the list of zones to sync from.
+
+.. option:: --sync-from-rm=[zone-name][,...]
+
+ Remove the zones from list of zones to sync from.
+
+.. option:: --bucket-index-max-shards
+
+ Override a zone's or zonegroup's default number of bucket index shards. This
+ option is accepted by the 'zone create', 'zone modify', 'zonegroup add',
+ and 'zonegroup modify' commands, and applies to buckets that are created
+ after the zone/zonegroup changes take effect.
+
+.. option:: --fix
+
+ Besides checking bucket index, will also fix it.
+
+.. option:: --check-objects
+
+ bucket check: Rebuilds bucket index according to actual objects state.
+
+.. option:: --format=<format>
+
+ Specify output format for certain operations. Supported formats: xml, json.
+
+.. option:: --sync-stats
+
+ Option for 'user stats' command. When specified, it will update user stats with
+ the current stats reported by user's buckets indexes.
+
+.. option:: --show-log-entries=<flag>
+
+ Enable/disable dump of log entries on log show.
+
+.. option:: --show-log-sum=<flag>
+
+ Enable/disable dump of log summation on log show.
+
+.. option:: --skip-zero-entries
+
+ Log show only dumps entries that don't have zero value in one of the numeric
+ field.
+
+.. option:: --infile
+
+ Specify a file to read in when setting data.
+
+.. option:: --categories=<list>
+
+ Comma separated list of categories, used in usage show.
+
+.. option:: --caps=<caps>
+
+ List of caps (e.g., "usage=read, write; user=read".
+
+.. option:: --compression=<compression-algorithm>
+
+ Placement target compression algorithm (lz4|snappy|zlib|zstd)
+
+.. option:: --yes-i-really-mean-it
+
+ Required for certain operations.
+
+.. option:: --min-rewrite-size
+
+ Specify the min object size for bucket rewrite (default 4M).
+
+.. option:: --max-rewrite-size
+
+ Specify the max object size for bucket rewrite (default ULLONG_MAX).
+
+.. option:: --min-rewrite-stripe-size
+
+ Specify the min stripe size for object rewrite (default 0). If the value
+ is set to 0, then the specified object will always be
+ rewritten for restriping.
+
+.. option:: --warnings-only
+
+ When specified with bucket limit check,
+ list only buckets nearing or over the current max objects per shard value.
+
+.. option:: --bypass-gc
+
+ When specified with bucket deletion,
+ triggers object deletions by not involving GC.
+
+.. option:: --inconsistent-index
+
+ When specified with bucket deletion and bypass-gc set to true,
+ ignores bucket index consistency.
+
+.. option:: --max-concurrent-ios
+
+ Maximum concurrent ios for bucket operations. Affects operations that
+ scan the bucket index, e.g., listing, deletion, and all scan/search
+ operations such as finding orphans or checking the bucket index.
+ Default is 32.
+
+Quota Options
+=============
+
+.. option:: --max-objects
+
+ Specify max objects (negative value to disable).
+
+.. option:: --max-size
+
+ Specify max size (in B/K/M/G/T, negative value to disable).
+
+.. option:: --quota-scope
+
+ The scope of quota (bucket, user).
+
+
+Orphans Search Options
+======================
+
+.. option:: --num-shards
+
+ Number of shards to use for keeping the temporary scan info
+
+.. option:: --orphan-stale-secs
+
+ Number of seconds to wait before declaring an object to be an orphan.
+ Default is 86400 (24 hours).
+
+.. option:: --job-id
+
+ Set the job id (for orphans find)
+
+
+Orphans list-jobs options
+=========================
+
+.. option:: --extra-info
+
+ Provide extra info in the job list.
+
+
+Role Options
+============
+
+.. option:: --role-name
+
+ The name of the role to create.
+
+.. option:: --path
+
+ The path to the role.
+
+.. option:: --assume-role-policy-doc
+
+ The trust relationship policy document that grants an entity permission to
+ assume the role.
+
+.. option:: --policy-name
+
+ The name of the policy document.
+
+.. option:: --policy-doc
+
+ The permission policy document.
+
+.. option:: --path-prefix
+
+ The path prefix for filtering the roles.
+
+
+Bucket Notifications/PubSub Options
+===================================
+.. option:: --topic
+
+ The bucket notifications/pubsub topic name.
+
+.. option:: --subscription
+
+ The pubsub subscription name.
+
+.. option:: --event-id
+
+ The event id in a pubsub subscription.
+
+
+Examples
+========
+
+Generate a new user::
+
+ $ radosgw-admin user create --display-name="johnny rotten" --uid=johnny
+ { "user_id": "johnny",
+ "rados_uid": 0,
+ "display_name": "johnny rotten",
+ "email": "",
+ "suspended": 0,
+ "subusers": [],
+ "keys": [
+ { "user": "johnny",
+ "access_key": "TCICW53D9BQ2VGC46I44",
+ "secret_key": "tfm9aHMI8X76L3UdgE+ZQaJag1vJQmE6HDb5Lbrz"}],
+ "swift_keys": []}
+
+Remove a user::
+
+ $ radosgw-admin user rm --uid=johnny
+
+Rename a user::
+
+ $ radosgw-admin user rename --uid=johny --new-uid=joe
+
+Remove a user and all associated buckets with their contents::
+
+ $ radosgw-admin user rm --uid=johnny --purge-data
+
+Remove a bucket::
+
+ $ radosgw-admin bucket rm --bucket=foo
+
+Link bucket to specified user::
+
+ $ radosgw-admin bucket link --bucket=foo --bucket_id=<bucket id> --uid=johnny
+
+Unlink bucket from specified user::
+
+ $ radosgw-admin bucket unlink --bucket=foo --uid=johnny
+
+Rename a bucket::
+
+ $ radosgw-admin bucket link --bucket=foo --bucket-new-name=bar --uid=johnny
+
+Move a bucket from the old global tenant space to a specified tenant::
+
+ $ radosgw-admin bucket link --bucket=/foo --uid=12345678$12345678'
+
+Link bucket to specified user and change object ACLs::
+
+ $ radosgw-admin bucket chown --bucket=/foo --uid=12345678$12345678'
+
+Show the logs of a bucket from April 1st, 2012::
+
+ $ radosgw-admin log show --bucket=foo --date=2012-04-01-01 --bucket-id=default.14193.1
+
+Show usage information for user from March 1st to (but not including) April 1st, 2012::
+
+ $ radosgw-admin usage show --uid=johnny \
+ --start-date=2012-03-01 --end-date=2012-04-01
+
+Show only summary of usage information for all users::
+
+ $ radosgw-admin usage show --show-log-entries=false
+
+Trim usage information for user until March 1st, 2012::
+
+ $ radosgw-admin usage trim --uid=johnny --end-date=2012-04-01
+
+
+Availability
+============
+
+:program:`radosgw-admin` is part of Ceph, a massively scalable, open-source,
+distributed storage system. Please refer to the Ceph documentation at
+http://ceph.com/docs for more information.
+
+
+See also
+========
+
+:doc:`ceph <ceph>`\(8)
+:doc:`radosgw <radosgw>`\(8)
diff --git a/doc/man/8/radosgw.rst b/doc/man/8/radosgw.rst
new file mode 100644
index 000000000..84a1aea27
--- /dev/null
+++ b/doc/man/8/radosgw.rst
@@ -0,0 +1,253 @@
+:orphan:
+
+===============================
+ radosgw -- rados REST gateway
+===============================
+
+.. program:: radosgw
+
+Synopsis
+========
+
+| **radosgw**
+
+
+Description
+===========
+
+:program:`radosgw` is an HTTP REST gateway for the RADOS object store, a part
+of the Ceph distributed storage system. It is implemented as a FastCGI
+module using libfcgi, and can be used in conjunction with any FastCGI
+capable web server.
+
+
+Options
+=======
+
+.. option:: -c ceph.conf, --conf=ceph.conf
+
+ Use ``ceph.conf`` configuration file instead of the default
+ ``/etc/ceph/ceph.conf`` to determine monitor addresses during startup.
+
+.. option:: -m monaddress[:port]
+
+ Connect to specified monitor (instead of looking through ``ceph.conf``).
+
+.. option:: -i ID, --id ID
+
+ Set the ID portion of name for radosgw
+
+.. option:: -n TYPE.ID, --name TYPE.ID
+
+ Set the rados user name for the gateway (eg. client.radosgw.gateway)
+
+.. option:: --cluster NAME
+
+ Set the cluster name (default: ceph)
+
+.. option:: -d
+
+ Run in foreground, log to stderr
+
+.. option:: -f
+
+ Run in foreground, log to usual location
+
+.. option:: --rgw-socket-path=path
+
+ Specify a unix domain socket path.
+
+.. option:: --rgw-region=region
+
+ The region where radosgw runs
+
+.. option:: --rgw-zone=zone
+
+ The zone where radosgw runs
+
+
+Configuration
+=============
+
+Earlier RADOS Gateway had to be configured with ``Apache`` and ``mod_fastcgi``.
+Now, ``mod_proxy_fcgi`` module is used instead of ``mod_fastcgi``.
+``mod_proxy_fcgi`` works differently than a traditional FastCGI module. This
+module requires the service of ``mod_proxy`` which provides support for the
+FastCGI protocol. So, to be able to handle FastCGI protocol, both ``mod_proxy``
+and ``mod_proxy_fcgi`` have to be present in the server. Unlike ``mod_fastcgi``,
+``mod_proxy_fcgi`` cannot start the application process. Some platforms have
+``fcgistarter`` for that purpose. However, external launching of application
+or process management may be available in the FastCGI application framework
+in use.
+
+``Apache`` can be configured in a way that enables ``mod_proxy_fcgi`` to be used
+with localhost tcp or through unix domain socket. ``mod_proxy_fcgi`` that doesn't
+support unix domain socket such as the ones in Apache 2.2 and earlier versions of
+Apache 2.4, needs to be configured for use with localhost tcp. Later versions of
+Apache like Apache 2.4.9 or later support unix domain socket and as such they
+allow for the configuration with unix domain socket instead of localhost tcp.
+
+The following steps show the configuration in Ceph's configuration file i.e,
+``/etc/ceph/ceph.conf`` and the gateway configuration file i.e,
+``/etc/httpd/conf.d/rgw.conf`` (RPM-based distros) or
+``/etc/apache2/conf-available/rgw.conf`` (Debian-based distros) with localhost
+tcp and through unix domain socket:
+
+#. For distros with Apache 2.2 and early versions of Apache 2.4 that use
+ localhost TCP and do not support Unix Domain Socket, append the following
+ contents to ``/etc/ceph/ceph.conf``::
+
+ [client.radosgw.gateway]
+ host = {hostname}
+ keyring = /etc/ceph/ceph.client.radosgw.keyring
+ rgw socket path = ""
+ log file = /var/log/ceph/client.radosgw.gateway.log
+ rgw frontends = fastcgi socket_port=9000 socket_host=0.0.0.0
+ rgw print continue = false
+
+#. Add the following content in the gateway configuration file:
+
+ For Debian/Ubuntu add in ``/etc/apache2/conf-available/rgw.conf``::
+
+ <VirtualHost *:80>
+ ServerName localhost
+ DocumentRoot /var/www/html
+
+ ErrorLog /var/log/apache2/rgw_error.log
+ CustomLog /var/log/apache2/rgw_access.log combined
+
+ # LogLevel debug
+
+ RewriteEngine On
+
+ RewriteRule .* - [E=HTTP_AUTHORIZATION:%{HTTP:Authorization},L]
+
+ SetEnv proxy-nokeepalive 1
+
+ ProxyPass / fcgi://localhost:9000/
+
+ </VirtualHost>
+
+ For CentOS/RHEL add in ``/etc/httpd/conf.d/rgw.conf``::
+
+ <VirtualHost *:80>
+ ServerName localhost
+ DocumentRoot /var/www/html
+
+ ErrorLog /var/log/httpd/rgw_error.log
+ CustomLog /var/log/httpd/rgw_access.log combined
+
+ # LogLevel debug
+
+ RewriteEngine On
+
+ RewriteRule .* - [E=HTTP_AUTHORIZATION:%{HTTP:Authorization},L]
+
+ SetEnv proxy-nokeepalive 1
+
+ ProxyPass / fcgi://localhost:9000/
+
+ </VirtualHost>
+
+#. For distros with Apache 2.4.9 or later that support Unix Domain Socket,
+ append the following configuration to ``/etc/ceph/ceph.conf``::
+
+ [client.radosgw.gateway]
+ host = {hostname}
+ keyring = /etc/ceph/ceph.client.radosgw.keyring
+ rgw socket path = /var/run/ceph/ceph.radosgw.gateway.fastcgi.sock
+ log file = /var/log/ceph/client.radosgw.gateway.log
+ rgw print continue = false
+
+#. Add the following content in the gateway configuration file:
+
+ For CentOS/RHEL add in ``/etc/httpd/conf.d/rgw.conf``::
+
+ <VirtualHost *:80>
+ ServerName localhost
+ DocumentRoot /var/www/html
+
+ ErrorLog /var/log/httpd/rgw_error.log
+ CustomLog /var/log/httpd/rgw_access.log combined
+
+ # LogLevel debug
+
+ RewriteEngine On
+
+ RewriteRule .* - [E=HTTP_AUTHORIZATION:%{HTTP:Authorization},L]
+
+ SetEnv proxy-nokeepalive 1
+
+ ProxyPass / unix:///var/run/ceph/ceph.radosgw.gateway.fastcgi.sock|fcgi://localhost:9000/
+
+ </VirtualHost>
+
+ Please note, ``Apache 2.4.7`` does not have Unix Domain Socket support in
+ it and as such it has to be configured with localhost tcp. The Unix Domain
+ Socket support is available in ``Apache 2.4.9`` and later versions.
+
+#. Generate a key for radosgw to use for authentication with the cluster. ::
+
+ ceph-authtool -C -n client.radosgw.gateway --gen-key /etc/ceph/keyring.radosgw.gateway
+ ceph-authtool -n client.radosgw.gateway --cap mon 'allow rw' --cap osd 'allow rwx' /etc/ceph/keyring.radosgw.gateway
+
+#. Add the key to the auth entries. ::
+
+ ceph auth add client.radosgw.gateway --in-file=keyring.radosgw.gateway
+
+#. Start Apache and radosgw.
+
+ Debian/Ubuntu::
+
+ sudo /etc/init.d/apache2 start
+ sudo /etc/init.d/radosgw start
+
+ CentOS/RHEL::
+
+ sudo apachectl start
+ sudo /etc/init.d/ceph-radosgw start
+
+Usage Logging
+=============
+
+:program:`radosgw` maintains an asynchronous usage log. It accumulates
+statistics about user operations and flushes it periodically. The
+logs can be accessed and managed through :program:`radosgw-admin`.
+
+The information that is being logged contains total data transfer,
+total operations, and total successful operations. The data is being
+accounted in an hourly resolution under the bucket owner, unless the
+operation was done on the service (e.g., when listing a bucket) in
+which case it is accounted under the operating user.
+
+Following is an example configuration::
+
+ [client.radosgw.gateway]
+ rgw enable usage log = true
+ rgw usage log tick interval = 30
+ rgw usage log flush threshold = 1024
+ rgw usage max shards = 32
+ rgw usage max user shards = 1
+
+
+The total number of shards determines how many total objects hold the
+usage log information. The per-user number of shards specify how many
+objects hold usage information for a single user. The tick interval
+configures the number of seconds between log flushes, and the flush
+threshold specify how many entries can be kept before resorting to
+synchronous flush.
+
+
+Availability
+============
+
+:program:`radosgw` is part of Ceph, a massively scalable, open-source, distributed
+storage system. Please refer to the Ceph documentation at http://ceph.com/docs for
+more information.
+
+
+See also
+========
+
+:doc:`ceph <ceph>`\(8)
+:doc:`radosgw-admin <radosgw-admin>`\(8)
diff --git a/doc/man/8/rbd-fuse.rst b/doc/man/8/rbd-fuse.rst
new file mode 100644
index 000000000..5cdb1f735
--- /dev/null
+++ b/doc/man/8/rbd-fuse.rst
@@ -0,0 +1,61 @@
+:orphan:
+
+=======================================
+ rbd-fuse -- expose rbd images as files
+=======================================
+
+.. program:: rbd-fuse
+
+Synopsis
+========
+
+| **rbd-fuse** [ -p pool ] [-c conffile] *mountpoint* [ *fuse options* ]
+
+
+Note
+====
+
+**rbd-fuse** is not recommended for any production or high performance workloads.
+
+Description
+===========
+
+**rbd-fuse** is a FUSE ("Filesystem in USErspace") client for RADOS
+block device (rbd) images. Given a pool containing rbd images,
+it will mount a userspace file system allowing access to those images
+as regular files at **mountpoint**.
+
+The file system can be unmounted with::
+
+ fusermount -u mountpoint
+
+or by sending ``SIGINT`` to the ``rbd-fuse`` process.
+
+
+Options
+=======
+
+Any options not recognized by rbd-fuse will be passed on to libfuse.
+
+.. option:: -c ceph.conf
+
+ Use *ceph.conf* configuration file instead of the default
+ ``/etc/ceph/ceph.conf`` to determine monitor addresses during startup.
+
+.. option:: -p pool
+
+ Use *pool* as the pool to search for rbd images. Default is ``rbd``.
+
+
+Availability
+============
+
+**rbd-fuse** is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer to
+the Ceph documentation at http://ceph.com/docs for more information.
+
+
+See also
+========
+
+fusermount(8),
+:doc:`rbd <rbd>`\(8)
diff --git a/doc/man/8/rbd-ggate.rst b/doc/man/8/rbd-ggate.rst
new file mode 100644
index 000000000..67d0c81e8
--- /dev/null
+++ b/doc/man/8/rbd-ggate.rst
@@ -0,0 +1,79 @@
+:orphan:
+
+==================================================
+ rbd-ggate -- map rbd images via FreeBSD GEOM Gate
+==================================================
+
+.. program:: rbd-ggate
+
+Synopsis
+========
+
+| **rbd-ggate** [--read-only] [--exclusive] [--device *ggate device*] map *image-spec* | *snap-spec*
+| **rbd-ggate** unmap *ggate device*
+| **rbd-ggate** list
+
+Description
+===========
+
+**rbd-ggate** is a client for RADOS block device (rbd) images. It will
+map a rbd image to a ggate (FreeBSD GEOM Gate class) device, allowing
+access it as regular local block device.
+
+Commands
+========
+
+map
+---
+
+Spawn a process responsible for the creation of ggate device and
+forwarding I/O requests between the GEOM Gate kernel subsystem and
+RADOS.
+
+unmap
+-----
+
+Destroy ggate device and terminate the process responsible for it.
+
+list
+----
+
+List mapped ggate devices.
+
+Options
+=======
+
+.. option:: --device *ggate device*
+
+ Specify ggate device path.
+
+.. option:: --read-only
+
+ Map read-only.
+
+.. option:: --exclusive
+
+ Forbid writes by other clients.
+
+Image and snap specs
+====================
+
+| *image-spec* is [*pool-name*]/*image-name*
+| *snap-spec* is [*pool-name*]/*image-name*\ @\ *snap-name*
+
+The default for *pool-name* is "rbd". If an image name contains a slash
+character ('/'), *pool-name* is required.
+
+Availability
+============
+
+**rbd-ggate** is part of Ceph, a massively scalable, open-source,
+distributed storage system. Please refer to the Ceph documentation at
+http://ceph.com/docs for more information.
+
+
+See also
+========
+
+:doc:`rbd <rbd>`\(8)
+:doc:`ceph <ceph>`\(8)
diff --git a/doc/man/8/rbd-mirror.rst b/doc/man/8/rbd-mirror.rst
new file mode 100644
index 000000000..b35787fbf
--- /dev/null
+++ b/doc/man/8/rbd-mirror.rst
@@ -0,0 +1,75 @@
+:orphan:
+
+===================================================
+ rbd-mirror -- Ceph daemon for mirroring RBD images
+===================================================
+
+.. program:: rbd-mirror
+
+Synopsis
+========
+
+| **rbd-mirror**
+
+
+Description
+===========
+
+:program:`rbd-mirror` is a daemon for asynchronous mirroring of RADOS
+block device (rbd) images among Ceph clusters. It replays changes to
+images in remote clusters in a local cluster, for disaster recovery.
+
+It connects to remote clusters via the RADOS protocol, relying on
+default search paths to find ceph.conf files, monitor addresses and
+authentication information for them, i.e. ``/etc/ceph/$cluster.conf``,
+``/etc/ceph/$cluster.keyring``, and
+``/etc/ceph/$cluster.$name.keyring``, where ``$cluster`` is the
+human-friendly name of the cluster, and ``$name`` is the rados user to
+connect as, e.g. ``client.rbd-mirror``.
+
+
+Options
+=======
+
+.. option:: -c ceph.conf, --conf=ceph.conf
+
+ Use ``ceph.conf`` configuration file instead of the default
+ ``/etc/ceph/ceph.conf`` to determine monitor addresses during startup.
+
+.. option:: -m monaddress[:port]
+
+ Connect to specified monitor (instead of looking through ``ceph.conf``).
+
+.. option:: -i ID, --id ID
+
+ Set the ID portion of name for rbd-mirror
+
+.. option:: -n TYPE.ID, --name TYPE.ID
+
+ Set the rados user name for the gateway (eg. client.rbd-mirror)
+
+.. option:: --cluster NAME
+
+ Set the cluster name (default: ceph)
+
+.. option:: -d
+
+ Run in foreground, log to stderr
+
+.. option:: -f
+
+ Run in foreground, log to usual location
+
+
+Availability
+============
+
+:program:`rbd-mirror` is part of Ceph, a massively scalable, open-source, distributed
+storage system. Please refer to the Ceph documentation at http://ceph.com/docs for
+more information.
+
+
+See also
+========
+
+:doc:`rbd <rbd>`\(8)
diff --git a/doc/man/8/rbd-nbd.rst b/doc/man/8/rbd-nbd.rst
new file mode 100644
index 000000000..8fcc5799d
--- /dev/null
+++ b/doc/man/8/rbd-nbd.rst
@@ -0,0 +1,89 @@
+:orphan:
+
+=========================================
+ rbd-nbd -- map rbd images to nbd device
+=========================================
+
+.. program:: rbd-nbd
+
+Synopsis
+========
+
+| **rbd-nbd** [-c conf] [--read-only] [--device *nbd device*] [--nbds_max *limit*] [--max_part *limit*] [--exclusive] [--encryption-format *format*] [--encryption-passphrase-file *passphrase-file*] [--io-timeout *seconds*] [--reattach-timeout *seconds*] map *image-spec* | *snap-spec*
+| **rbd-nbd** unmap *nbd device* | *image-spec* | *snap-spec*
+| **rbd-nbd** list-mapped
+| **rbd-nbd** attach --device *nbd device* *image-spec* | *snap-spec*
+| **rbd-nbd** detach *nbd device* | *image-spec* | *snap-spec*
+
+Description
+===========
+
+**rbd-nbd** is a client for RADOS block device (rbd) images like rbd kernel module.
+It will map a rbd image to a nbd (Network Block Device) device, allowing access it
+as regular local block device.
+
+Options
+=======
+
+.. option:: -c ceph.conf
+
+ Use *ceph.conf* configuration file instead of the default
+ ``/etc/ceph/ceph.conf`` to determine monitor addresses during startup.
+
+.. option:: --read-only
+
+ Map read-only.
+
+.. option:: --nbds_max *limit*
+
+ Override the parameter nbds_max of NBD kernel module when modprobe, used to
+ limit the count of nbd device.
+
+.. option:: --max_part *limit*
+
+ Override for module param max_part.
+
+.. option:: --exclusive
+
+ Forbid writes by other clients.
+
+.. option:: --encryption-format
+
+ Image encryption format.
+ Possible values: *luks1*, *luks2*
+
+.. option:: --encryption-passphrase-file
+
+ Path of file containing a passphrase for unlocking image encryption.
+
+.. option:: --io-timeout *seconds*
+
+ Override device timeout. Linux kernel will default to a 30 second request timeout.
+ Allow the user to optionally specify an alternate timeout.
+
+.. option:: --reattach-timeout *seconds*
+
+ Specify timeout for the kernel to wait for a new rbd-nbd process is
+ attached after the old process is detached. The default is 30
+ second.
+
+Image and snap specs
+====================
+
+| *image-spec* is [*pool-name*]/*image-name*
+| *snap-spec* is [*pool-name*]/*image-name*\ @\ *snap-name*
+
+The default for *pool-name* is "rbd". If an image name contains a slash
+character ('/'), *pool-name* is required.
+
+Availability
+============
+
+**rbd-nbd** is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer to
+the Ceph documentation at http://ceph.com/docs for more information.
+
+
+See also
+========
+
+:doc:`rbd <rbd>`\(8)
diff --git a/doc/man/8/rbd-replay-many.rst b/doc/man/8/rbd-replay-many.rst
new file mode 100644
index 000000000..5fb93498a
--- /dev/null
+++ b/doc/man/8/rbd-replay-many.rst
@@ -0,0 +1,73 @@
+:orphan:
+
+==================================================================================
+ rbd-replay-many -- replay a rados block device (RBD) workload on several clients
+==================================================================================
+
+.. program:: rbd-replay-many
+
+Synopsis
+========
+
+| **rbd-replay-many** [ *options* ] --original-image *name* *host1* [ *host2* [ ... ] ] -- *rbd_replay_args*
+
+
+Description
+===========
+
+**rbd-replay-many** is a utility for replaying a rados block device (RBD) workload on several clients.
+Although all clients use the same workload, they replay against separate images.
+This matches normal use of librbd, where each original client is a VM with its own image.
+
+Configuration and replay files are not automatically copied to clients.
+Replay images must already exist.
+
+
+Options
+=======
+
+.. option:: --original-image name
+
+ Specifies the name (and snap) of the originally traced image.
+ Necessary for correct name mapping.
+
+.. option:: --image-prefix prefix
+
+ Prefix of image names to replay against.
+ Specifying --image-prefix=foo results in clients replaying against foo-0, foo-1, etc.
+ Defaults to the original image name.
+
+.. option:: --exec program
+
+ Path to the rbd-replay executable.
+
+.. option:: --delay seconds
+
+ Delay between starting each client. Defaults to 0.
+
+
+Examples
+========
+
+Typical usage::
+
+ rbd-replay-many host-0 host-1 --original-image=image -- -c ceph.conf replay.bin
+
+This results in the following commands being executed::
+
+ ssh host-0 'rbd-replay' --map-image 'image=image-0' -c ceph.conf replay.bin
+ ssh host-1 'rbd-replay' --map-image 'image=image-1' -c ceph.conf replay.bin
+
+
+Availability
+============
+
+**rbd-replay-many** is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer to
+the Ceph documentation at http://ceph.com/docs for more information.
+
+
+See also
+========
+
+:doc:`rbd-replay <rbd-replay>`\(8),
+:doc:`rbd <rbd>`\(8)
diff --git a/doc/man/8/rbd-replay-prep.rst b/doc/man/8/rbd-replay-prep.rst
new file mode 100644
index 000000000..abb08decf
--- /dev/null
+++ b/doc/man/8/rbd-replay-prep.rst
@@ -0,0 +1,55 @@
+:orphan:
+
+====================================================================================
+ rbd-replay-prep -- prepare captured rados block device (RBD) workloads for replay
+====================================================================================
+
+.. program:: rbd-replay-prep
+
+Synopsis
+========
+
+| **rbd-replay-prep** [ --window *seconds* ] [ --anonymize ] *trace_dir* *replay_file*
+
+
+Description
+===========
+
+**rbd-replay-prep** processes raw rados block device (RBD) traces to prepare them for **rbd-replay**.
+
+
+Options
+=======
+
+.. option:: --window seconds
+
+ Requests further apart than 'seconds' seconds are assumed to be independent.
+
+.. option:: --anonymize
+
+ Anonymizes image and snap names.
+
+.. option:: --verbose
+
+ Print all processed events to console
+
+Examples
+========
+
+To prepare workload1-trace for replay::
+
+ rbd-replay-prep workload1-trace/ust/uid/1000/64-bit workload1
+
+
+Availability
+============
+
+**rbd-replay-prep** is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer to
+the Ceph documentation at http://ceph.com/docs for more information.
+
+
+See also
+========
+
+:doc:`rbd-replay <rbd-replay>`\(8),
+:doc:`rbd <rbd>`\(8)
diff --git a/doc/man/8/rbd-replay.rst b/doc/man/8/rbd-replay.rst
new file mode 100644
index 000000000..74b8018f5
--- /dev/null
+++ b/doc/man/8/rbd-replay.rst
@@ -0,0 +1,78 @@
+:orphan:
+
+=========================================================
+ rbd-replay -- replay rados block device (RBD) workloads
+=========================================================
+
+.. program:: rbd-replay
+
+Synopsis
+========
+
+| **rbd-replay** [ *options* ] *replay_file*
+
+
+Description
+===========
+
+**rbd-replay** is a utility for replaying rados block device (RBD) workloads.
+
+
+Options
+=======
+
+.. option:: -c ceph.conf, --conf ceph.conf
+
+ Use ceph.conf configuration file instead of the default /etc/ceph/ceph.conf to
+ determine monitor addresses during startup.
+
+.. option:: -p pool, --pool pool
+
+ Interact with the given pool. Defaults to 'rbd'.
+
+.. option:: --latency-multiplier
+
+ Multiplies inter-request latencies. Default: 1.
+
+.. option:: --read-only
+
+ Only replay non-destructive requests.
+
+.. option:: --map-image rule
+
+ Add a rule to map image names in the trace to image names in the replay cluster.
+ A rule of image1@snap1=image2@snap2 would map snap1 of image1 to snap2 of image2.
+
+.. option:: --dump-perf-counters
+
+ **Experimental**
+ Dump performance counters to standard out before an image is closed.
+ Performance counters may be dumped multiple times if multiple images are closed,
+ or if the same image is opened and closed multiple times.
+ Performance counters and their meaning may change between versions.
+
+
+Examples
+========
+
+To replay workload1 as fast as possible::
+
+ rbd-replay --latency-multiplier=0 workload1
+
+To replay workload1 but use test_image instead of prod_image::
+
+ rbd-replay --map-image=prod_image=test_image workload1
+
+
+Availability
+============
+
+**rbd-replay** is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer to
+the Ceph documentation at http://ceph.com/docs for more information.
+
+
+See also
+========
+
+:doc:`rbd-replay-prep <rbd-replay-prep>`\(8),
+:doc:`rbd <rbd>`\(8)
diff --git a/doc/man/8/rbd.rst b/doc/man/8/rbd.rst
new file mode 100644
index 000000000..57423348d
--- /dev/null
+++ b/doc/man/8/rbd.rst
@@ -0,0 +1,1036 @@
+:orphan:
+
+===============================================
+ rbd -- manage rados block device (RBD) images
+===============================================
+
+.. program:: rbd
+
+Synopsis
+========
+
+| **rbd** [ -c *ceph.conf* ] [ -m *monaddr* ] [--cluster *cluster-name*]
+ [ -p | --pool *pool* ] [ *command* ... ]
+
+
+Description
+===========
+
+**rbd** is a utility for manipulating rados block device (RBD) images,
+used by the Linux rbd driver and the rbd storage driver for QEMU/KVM.
+RBD images are simple block devices that are striped over objects and
+stored in a RADOS object store. The size of the objects the image is
+striped over must be a power of two.
+
+
+Options
+=======
+
+.. option:: -c ceph.conf, --conf ceph.conf
+
+ Use ceph.conf configuration file instead of the default /etc/ceph/ceph.conf to
+ determine monitor addresses during startup.
+
+.. option:: -m monaddress[:port]
+
+ Connect to specified monitor (instead of looking through ceph.conf).
+
+.. option:: --cluster cluster-name
+
+ Use different cluster name as compared to default cluster name *ceph*.
+
+.. option:: -p pool-name, --pool pool-name
+
+ Interact with the given pool. Required by most commands.
+
+.. option:: --namespace namespace-name
+
+ Use a pre-defined image namespace within a pool
+
+.. option:: --no-progress
+
+ Do not output progress information (goes to standard error by
+ default for some commands).
+
+
+Parameters
+==========
+
+.. option:: --image-format format-id
+
+ Specifies which object layout to use. The default is 2.
+
+ * format 1 - (deprecated) Use the original format for a new rbd image. This
+ format is understood by all versions of librbd and the kernel rbd module,
+ but does not support newer features like cloning.
+
+ * format 2 - Use the second rbd format, which is supported by librbd since
+ the Bobtail release and the kernel rbd module since kernel 3.10 (except
+ for "fancy" striping, which is supported since kernel 4.17). This adds
+ support for cloning and is more easily extensible to allow more
+ features in the future.
+
+.. option:: -s size-in-M/G/T, --size size-in-M/G/T
+
+ Specifies the size of the new rbd image or the new size of the existing rbd
+ image in M/G/T. If no suffix is given, unit M is assumed.
+
+.. option:: --object-size size-in-B/K/M
+
+ Specifies the object size in B/K/M. Object size will be rounded up the
+ nearest power of two; if no suffix is given, unit B is assumed. The default
+ object size is 4M, smallest is 4K and maximum is 32M.
+
+.. option:: --stripe-unit size-in-B/K/M
+
+ Specifies the stripe unit size in B/K/M. If no suffix is given, unit B is
+ assumed. See striping section (below) for more details.
+
+.. option:: --stripe-count num
+
+ Specifies the number of objects to stripe over before looping back
+ to the first object. See striping section (below) for more details.
+
+.. option:: --snap snap
+
+ Specifies the snapshot name for the specific operation.
+
+.. option:: --id username
+
+ Specifies the username (without the ``client.`` prefix) to use with the map command.
+
+.. option:: --keyring filename
+
+ Specifies a keyring file containing a secret for the specified user
+ to use with the map command. If not specified, the default keyring
+ locations will be searched.
+
+.. option:: --keyfile filename
+
+ Specifies a file containing the secret key of ``--id user`` to use with the map command.
+ This option is overridden by ``--keyring`` if the latter is also specified.
+
+.. option:: --shared lock-tag
+
+ Option for `lock add` that allows multiple clients to lock the
+ same image if they use the same tag. The tag is an arbitrary
+ string. This is useful for situations where an image must
+ be open from more than one client at once, like during
+ live migration of a virtual machine, or for use underneath
+ a clustered file system.
+
+.. option:: --format format
+
+ Specifies output formatting (default: plain, json, xml)
+
+.. option:: --pretty-format
+
+ Make json or xml formatted output more human-readable.
+
+.. option:: -o krbd-options, --options krbd-options
+
+ Specifies which options to use when mapping or unmapping an image via the
+ rbd kernel driver. krbd-options is a comma-separated list of options
+ (similar to mount(8) mount options). See kernel rbd (krbd) options section
+ below for more details.
+
+.. option:: --read-only
+
+ Map the image read-only. Equivalent to -o ro.
+
+.. option:: --image-feature feature-name
+
+ Specifies which RBD format 2 feature should be enabled when creating
+ an image. Multiple features can be enabled by repeating this option
+ multiple times. The following features are supported:
+
+ * layering: layering support
+ * striping: striping v2 support
+ * exclusive-lock: exclusive locking support
+ * object-map: object map support (requires exclusive-lock)
+ * fast-diff: fast diff calculations (requires object-map)
+ * deep-flatten: snapshot flatten support
+ * journaling: journaled IO support (requires exclusive-lock)
+ * data-pool: erasure coded pool support
+
+.. option:: --image-shared
+
+ Specifies that the image will be used concurrently by multiple clients.
+ This will disable features that are dependent upon exclusive ownership
+ of the image.
+
+.. option:: --whole-object
+
+ Specifies that the diff should be limited to the extents of a full object
+ instead of showing intra-object deltas. When the object map feature is
+ enabled on an image, limiting the diff to the object extents will
+ dramatically improve performance since the differences can be computed
+ by examining the in-memory object map instead of querying RADOS for each
+ object within the image.
+
+.. option:: --limit
+
+ Specifies the limit for the number of snapshots permitted.
+
+Commands
+========
+
+.. TODO rst "option" directive seems to require --foo style options, parsing breaks on subcommands.. the args show up as bold too
+
+:command:`bench` --io-type <read | write | readwrite | rw> [--io-size *size-in-B/K/M/G/T*] [--io-threads *num-ios-in-flight*] [--io-total *size-in-B/K/M/G/T*] [--io-pattern seq | rand] [--rw-mix-read *read proportion in readwrite*] *image-spec*
+ Generate a series of IOs to the image and measure the IO throughput and
+ latency. If no suffix is given, unit B is assumed for both --io-size and
+ --io-total. Defaults are: --io-size 4096, --io-threads 16, --io-total 1G,
+ --io-pattern seq, --rw-mix-read 50.
+
+:command:`children` *snap-spec*
+ List the clones of the image at the given snapshot. This checks
+ every pool, and outputs the resulting poolname/imagename.
+
+ This requires image format 2.
+
+:command:`clone` [--object-size *size-in-B/K/M*] [--stripe-unit *size-in-B/K/M* --stripe-count *num*] [--image-feature *feature-name*] [--image-shared] *parent-snap-spec* *child-image-spec*
+ Will create a clone (copy-on-write child) of the parent snapshot.
+ Object size will be identical to that of the parent image unless
+ specified. Size will be the same as the parent snapshot. The --stripe-unit
+ and --stripe-count arguments are optional, but must be used together.
+
+ The parent snapshot must be protected (see `rbd snap protect`).
+ This requires image format 2.
+
+:command:`config global get` *config-entity* *key*
+ Get a global-level configuration override.
+
+:command:`config global list` [--format plain | json | xml] [--pretty-format] *config-entity*
+ List global-level configuration overrides.
+
+:command:`config global set` *config-entity* *key* *value*
+ Set a global-level configuration override.
+
+:command:`config global remove` *config-entity* *key*
+ Remove a global-level configuration override.
+
+:command:`config image get` *image-spec* *key*
+ Get an image-level configuration override.
+
+:command:`config image list` [--format plain | json | xml] [--pretty-format] *image-spec*
+ List image-level configuration overrides.
+
+:command:`config image set` *image-spec* *key* *value*
+ Set an image-level configuration override.
+
+:command:`config image remove` *image-spec* *key*
+ Remove an image-level configuration override.
+
+:command:`config pool get` *pool-name* *key*
+ Get a pool-level configuration override.
+
+:command:`config pool list` [--format plain | json | xml] [--pretty-format] *pool-name*
+ List pool-level configuration overrides.
+
+:command:`config pool set` *pool-name* *key* *value*
+ Set a pool-level configuration override.
+
+:command:`config pool remove` *pool-name* *key*
+ Remove a pool-level configuration override.
+
+:command:`cp` (*src-image-spec* | *src-snap-spec*) *dest-image-spec*
+ Copy the content of a src-image into the newly created dest-image.
+ dest-image will have the same size, object size, and image format as src-image.
+
+:command:`create` (-s | --size *size-in-M/G/T*) [--image-format *format-id*] [--object-size *size-in-B/K/M*] [--stripe-unit *size-in-B/K/M* --stripe-count *num*] [--thick-provision] [--no-progress] [--image-feature *feature-name*]... [--image-shared] *image-spec*
+ Will create a new rbd image. You must also specify the size via --size. The
+ --stripe-unit and --stripe-count arguments are optional, but must be used together.
+ If the --thick-provision is enabled, it will fully allocate storage for
+ the image at creation time. It will take a long time to do.
+ Note: thick provisioning requires zeroing the contents of the entire image.
+
+:command:`deep cp` (*src-image-spec* | *src-snap-spec*) *dest-image-spec*
+ Deep copy the content of a src-image into the newly created dest-image.
+ Dest-image will have the same size, object size, image format, and snapshots as src-image.
+
+:command:`device list` [-t | --device-type *device-type*] [--format plain | json | xml] --pretty-format
+ Show the rbd images that are mapped via the rbd kernel module
+ (default) or other supported device.
+
+:command:`device map` [-t | --device-type *device-type*] [--cookie *device-cookie*] [--show-cookie] [--read-only] [--exclusive] [-o | --options *device-options*] *image-spec* | *snap-spec*
+ Map the specified image to a block device via the rbd kernel module
+ (default) or other supported device (*nbd* on Linux or *ggate* on
+ FreeBSD).
+
+ The --options argument is a comma separated list of device type
+ specific options (opt1,opt2=val,...).
+
+:command:`device unmap` [-t | --device-type *device-type*] [-o | --options *device-options*] *image-spec* | *snap-spec* | *device-path*
+ Unmap the block device that was mapped via the rbd kernel module
+ (default) or other supported device.
+
+ The --options argument is a comma separated list of device type
+ specific options (opt1,opt2=val,...).
+
+:command:`device attach` [-t | --device-type *device-type*] --device *device-path* [--cookie *device-cookie*] [--show-cookie] [--read-only] [--exclusive] [--force] [-o | --options *device-options*] *image-spec* | *snap-spec*
+ Attach the specified image to the specified block device (currently only
+ `nbd` on Linux). This operation is unsafe and should not be normally used.
+ In particular, specifying the wrong image or the wrong block device may
+ lead to data corruption as no validation is performed by `nbd` kernel driver.
+
+ The --options argument is a comma separated list of device type
+ specific options (opt1,opt2=val,...).
+
+:command:`device detach` [-t | --device-type *device-type*] [-o | --options *device-options*] *image-spec* | *snap-spec* | *device-path*
+ Detach the block device that was mapped or attached (currently only `nbd`
+ on Linux). This operation is unsafe and should not be normally used.
+
+ The --options argument is a comma separated list of device type
+ specific options (opt1,opt2=val,...).
+
+:command:`diff` [--from-snap *snap-name*] [--whole-object] *image-spec* | *snap-spec*
+ Dump a list of byte extents in the image that have changed since the specified start
+ snapshot, or since the image was created. Each output line includes the starting offset
+ (in bytes), the length of the region (in bytes), and either 'zero' or 'data' to indicate
+ whether the region is known to be zeros or may contain other data.
+
+:command:`du` [-p | --pool *pool-name*] [*image-spec* | *snap-spec*] [--merge-snapshots]
+ Will calculate the provisioned and actual disk usage of all images and
+ associated snapshots within the specified pool. It can also be used against
+ individual images and snapshots.
+
+ If the RBD fast-diff feature is not enabled on images, this operation will
+ require querying the OSDs for every potential object within the image.
+
+ The --merge-snapshots will merge snapshots used space into their parent images.
+
+:command:`encryption format` *image-spec* *format* *passphrase-file* [--cipher-alg *alg*]
+ Formats image to an encrypted format.
+ All data previously written to the image will become unreadable.
+ A cloned image cannot be formatted, although encrypted images can be cloned.
+ Supported formats: *luks1*, *luks2*.
+ Supported cipher algorithms: *aes-128*, *aes-256* (default).
+
+:command:`export` [--export-format *format (1 or 2)*] (*image-spec* | *snap-spec*) [*dest-path*]
+ Export image to dest path (use - for stdout).
+ The --export-format accepts '1' or '2' currently. Format 2 allow us to export not only the content
+ of image, but also the snapshots and other properties, such as image_order, features.
+
+:command:`export-diff` [--from-snap *snap-name*] [--whole-object] (*image-spec* | *snap-spec*) *dest-path*
+ Export an incremental diff for an image to dest path (use - for stdout). If
+ an initial snapshot is specified, only changes since that snapshot are included; otherwise,
+ any regions of the image that contain data are included. The end snapshot is specified
+ using the standard --snap option or @snap syntax (see below). The image diff format includes
+ metadata about image size changes, and the start and end snapshots. It efficiently represents
+ discarded or 'zero' regions of the image.
+
+:command:`feature disable` *image-spec* *feature-name*...
+ Disable the specified feature on the specified image. Multiple features can
+ be specified.
+
+:command:`feature enable` *image-spec* *feature-name*...
+ Enable the specified feature on the specified image. Multiple features can
+ be specified.
+
+:command:`flatten` *image-spec*
+ If image is a clone, copy all shared blocks from the parent snapshot and
+ make the child independent of the parent, severing the link between
+ parent snap and child. The parent snapshot can be unprotected and
+ deleted if it has no further dependent clones.
+
+ This requires image format 2.
+
+:command:`group create` *group-spec*
+ Create a group.
+
+:command:`group image add` *group-spec* *image-spec*
+ Add an image to a group.
+
+:command:`group image list` *group-spec*
+ List images in a group.
+
+:command:`group image remove` *group-spec* *image-spec*
+ Remove an image from a group.
+
+:command:`group ls` [-p | --pool *pool-name*]
+ List rbd groups.
+
+:command:`group rename` *src-group-spec* *dest-group-spec*
+ Rename a group. Note: rename across pools is not supported.
+
+:command:`group rm` *group-spec*
+ Delete a group.
+
+:command:`group snap create` *group-snap-spec*
+ Make a snapshot of a group.
+
+:command:`group snap list` *group-spec*
+ List snapshots of a group.
+
+:command:`group snap rm` *group-snap-spec*
+ Remove a snapshot from a group.
+
+:command:`group snap rename` *group-snap-spec* *snap-name*
+ Rename group's snapshot.
+
+:command:`group snap rollback` *group-snap-spec*
+ Rollback group to snapshot.
+
+:command:`image-meta get` *image-spec* *key*
+ Get metadata value with the key.
+
+:command:`image-meta list` *image-spec*
+ Show metadata held on the image. The first column is the key
+ and the second column is the value.
+
+:command:`image-meta remove` *image-spec* *key*
+ Remove metadata key with the value.
+
+:command:`image-meta set` *image-spec* *key* *value*
+ Set metadata key with the value. They will displayed in `image-meta list`.
+
+:command:`import` [--export-format *format (1 or 2)*] [--image-format *format-id*] [--object-size *size-in-B/K/M*] [--stripe-unit *size-in-B/K/M* --stripe-count *num*] [--image-feature *feature-name*]... [--image-shared] *src-path* [*image-spec*]
+ Create a new image and imports its data from path (use - for
+ stdin). The import operation will try to create sparse rbd images
+ if possible. For import from stdin, the sparsification unit is
+ the data block size of the destination image (object size).
+
+ The --stripe-unit and --stripe-count arguments are optional, but must be
+ used together.
+
+ The --export-format accepts '1' or '2' currently. Format 2 allow us to import not only the content
+ of image, but also the snapshots and other properties, such as image_order, features.
+
+:command:`import-diff` *src-path* *image-spec*
+ Import an incremental diff of an image and applies it to the current image. If the diff
+ was generated relative to a start snapshot, we verify that snapshot already exists before
+ continuing. If there was an end snapshot we verify it does not already exist before
+ applying the changes, and create the snapshot when we are done.
+
+:command:`info` *image-spec* | *snap-spec*
+ Will dump information (such as size and object size) about a specific rbd image.
+ If image is a clone, information about its parent is also displayed.
+ If a snapshot is specified, whether it is protected is shown as well.
+
+:command:`journal client disconnect` *journal-spec*
+ Flag image journal client as disconnected.
+
+:command:`journal export` [--verbose] [--no-error] *src-journal-spec* *path-name*
+ Export image journal to path (use - for stdout). It can be make a backup
+ of the image journal especially before attempting dangerous operations.
+
+ Note that this command may not always work if the journal is badly corrupted.
+
+:command:`journal import` [--verbose] [--no-error] *path-name* *dest-journal-spec*
+ Import image journal from path (use - for stdin).
+
+:command:`journal info` *journal-spec*
+ Show information about image journal.
+
+:command:`journal inspect` [--verbose] *journal-spec*
+ Inspect and report image journal for structural errors.
+
+:command:`journal reset` *journal-spec*
+ Reset image journal.
+
+:command:`journal status` *journal-spec*
+ Show status of image journal.
+
+:command:`lock add` [--shared *lock-tag*] *image-spec* *lock-id*
+ Lock an image. The lock-id is an arbitrary name for the user's
+ convenience. By default, this is an exclusive lock, meaning it
+ will fail if the image is already locked. The --shared option
+ changes this behavior. Note that locking does not affect
+ any operation other than adding a lock. It does not
+ protect an image from being deleted.
+
+:command:`lock ls` *image-spec*
+ Show locks held on the image. The first column is the locker
+ to use with the `lock remove` command.
+
+:command:`lock rm` *image-spec* *lock-id* *locker*
+ Release a lock on an image. The lock id and locker are
+ as output by lock ls.
+
+:command:`ls` [-l | --long] [*pool-name*]
+ Will list all rbd images listed in the rbd_directory object. With
+ -l, also show snapshots, and use longer-format output including
+ size, parent (if clone), format, etc.
+
+:command:`merge-diff` *first-diff-path* *second-diff-path* *merged-diff-path*
+ Merge two continuous incremental diffs of an image into one single diff. The
+ first diff's end snapshot must be equal with the second diff's start snapshot.
+ The first diff could be - for stdin, and merged diff could be - for stdout, which
+ enables multiple diff files to be merged using something like
+ 'rbd merge-diff first second - | rbd merge-diff - third result'. Note this command
+ currently only support the source incremental diff with stripe_count == 1
+
+:command:`migration abort` *image-spec*
+ Cancel image migration. This step may be run after successful or
+ failed migration prepare or migration execute steps and returns the
+ image to its initial (before migration) state. All modifications to
+ the destination image are lost.
+
+:command:`migration commit` *image-spec*
+ Commit image migration. This step is run after a successful migration
+ prepare and migration execute steps and removes the source image data.
+
+:command:`migration execute` *image-spec*
+ Execute image migration. This step is run after a successful migration
+ prepare step and copies image data to the destination.
+
+:command:`migration prepare` [--order *order*] [--object-size *object-size*] [--image-feature *image-feature*] [--image-shared] [--stripe-unit *stripe-unit*] [--stripe-count *stripe-count*] [--data-pool *data-pool*] [--import-only] [--source-spec *json*] [--source-spec-path *path*] *src-image-spec* [*dest-image-spec*]
+ Prepare image migration. This is the first step when migrating an
+ image, i.e. changing the image location, format or other
+ parameters that can't be changed dynamically. The destination can
+ match the source, and in this case *dest-image-spec* can be omitted.
+ After this step the source image is set as a parent of the
+ destination image, and the image is accessible in copy-on-write mode
+ by its destination spec.
+
+ An image can also be migrated from a read-only import source by adding the
+ *--import-only* optional and providing a JSON-encoded *--source-spec* or a
+ path to a JSON-encoded source-spec file using the *--source-spec-path*
+ optionals.
+
+:command:`mirror image demote` *image-spec*
+ Demote a primary image to non-primary for RBD mirroring.
+
+:command:`mirror image disable` [--force] *image-spec*
+ Disable RBD mirroring for an image. If the mirroring is
+ configured in ``image`` mode for the image's pool, then it
+ can be explicitly disabled mirroring for each image within
+ the pool.
+
+:command:`mirror image enable` *image-spec* *mode*
+ Enable RBD mirroring for an image. If the mirroring is
+ configured in ``image`` mode for the image's pool, then it
+ can be explicitly enabled mirroring for each image within
+ the pool.
+
+ The mirror image mode can either be ``journal`` (default) or
+ ``snapshot``. The ``journal`` mode requires the RBD journaling
+ feature.
+
+:command:`mirror image promote` [--force] *image-spec*
+ Promote a non-primary image to primary for RBD mirroring.
+
+:command:`mirror image resync` *image-spec*
+ Force resync to primary image for RBD mirroring.
+
+:command:`mirror image status` *image-spec*
+ Show RBD mirroring status for an image.
+
+:command:`mirror pool demote` [*pool-name*]
+ Demote all primary images within a pool to non-primary.
+ Every mirroring enabled image will demoted in the pool.
+
+:command:`mirror pool disable` [*pool-name*]
+ Disable RBD mirroring by default within a pool. When mirroring
+ is disabled on a pool in this way, mirroring will also be
+ disabled on any images (within the pool) for which mirroring
+ was enabled explicitly.
+
+:command:`mirror pool enable` [*pool-name*] *mode*
+ Enable RBD mirroring by default within a pool.
+ The mirroring mode can either be ``pool`` or ``image``.
+ If configured in ``pool`` mode, all images in the pool
+ with the journaling feature enabled are mirrored.
+ If configured in ``image`` mode, mirroring needs to be
+ explicitly enabled (by ``mirror image enable`` command)
+ on each image.
+
+:command:`mirror pool info` [*pool-name*]
+ Show information about the pool mirroring configuration.
+ It includes mirroring mode, peer UUID, remote cluster name,
+ and remote client name.
+
+:command:`mirror pool peer add` [*pool-name*] *remote-cluster-spec*
+ Add a mirroring peer to a pool.
+ *remote-cluster-spec* is [*remote client name*\ @\ ]\ *remote cluster name*.
+
+ The default for *remote client name* is "client.admin".
+
+ This requires mirroring mode is enabled.
+
+:command:`mirror pool peer remove` [*pool-name*] *uuid*
+ Remove a mirroring peer from a pool. The peer uuid is available
+ from ``mirror pool info`` command.
+
+:command:`mirror pool peer set` [*pool-name*] *uuid* *key* *value*
+ Update mirroring peer settings.
+ The key can be either ``client`` or ``cluster``, and the value
+ is corresponding to remote client name or remote cluster name.
+
+:command:`mirror pool promote` [--force] [*pool-name*]
+ Promote all non-primary images within a pool to primary.
+ Every mirroring enabled image will promoted in the pool.
+
+:command:`mirror pool status` [--verbose] [*pool-name*]
+ Show status for all mirrored images in the pool.
+ With --verbose, also show additionally output status
+ details for every mirroring image in the pool.
+
+:command:`mirror snapshot schedule add` [-p | --pool *pool*] [--namespace *namespace*] [--image *image*] *interval* [*start-time*]
+ Add mirror snapshot schedule.
+
+:command:`mirror snapshot schedule list` [-R | --recursive] [--format *format*] [--pretty-format] [-p | --pool *pool*] [--namespace *namespace*] [--image *image*]
+ List mirror snapshot schedule.
+
+:command:`mirror snapshot schedule remove` [-p | --pool *pool*] [--namespace *namespace*] [--image *image*] *interval* [*start-time*]
+ Remove mirror snapshot schedule.
+
+:command:`mirror snapshot schedule status` [-p | --pool *pool*] [--format *format*] [--pretty-format] [--namespace *namespace*] [--image *image*]
+ Show mirror snapshot schedule status.
+
+:command:`mv` *src-image-spec* *dest-image-spec*
+ Rename an image. Note: rename across pools is not supported.
+
+:command:`namespace create` *pool-name*/*namespace-name*
+ Create a new image namespace within the pool.
+
+:command:`namespace list` *pool-name*
+ List image namespaces defined within the pool.
+
+:command:`namespace remove` *pool-name*/*namespace-name*
+ Remove an empty image namespace from the pool.
+
+:command:`object-map check` *image-spec* | *snap-spec*
+ Verify the object map is correct.
+
+:command:`object-map rebuild` *image-spec* | *snap-spec*
+ Rebuild an invalid object map for the specified image. An image snapshot can be
+ specified to rebuild an invalid object map for a snapshot.
+
+:command:`pool init` [*pool-name*] [--force]
+ Initialize pool for use by RBD. Newly created pools must initialized
+ prior to use.
+
+:command:`resize` (-s | --size *size-in-M/G/T*) [--allow-shrink] *image-spec*
+ Resize rbd image. The size parameter also needs to be specified.
+ The --allow-shrink option lets the size be reduced.
+
+:command:`rm` *image-spec*
+ Delete an rbd image (including all data blocks). If the image has
+ snapshots, this fails and nothing is deleted.
+
+:command:`snap create` *snap-spec*
+ Create a new snapshot. Requires the snapshot name parameter specified.
+
+:command:`snap limit clear` *image-spec*
+ Remove any previously set limit on the number of snapshots allowed on
+ an image.
+
+:command:`snap limit set` [--limit] *limit* *image-spec*
+ Set a limit for the number of snapshots allowed on an image.
+
+:command:`snap ls` *image-spec*
+ Dump the list of snapshots inside a specific image.
+
+:command:`snap protect` *snap-spec*
+ Protect a snapshot from deletion, so that clones can be made of it
+ (see `rbd clone`). Snapshots must be protected before clones are made;
+ protection implies that there exist dependent cloned children that
+ refer to this snapshot. `rbd clone` will fail on a nonprotected
+ snapshot.
+
+ This requires image format 2.
+
+:command:`snap purge` *image-spec*
+ Remove all unprotected snapshots from an image.
+
+:command:`snap rename` *src-snap-spec* *dest-snap-spec*
+ Rename a snapshot. Note: rename across pools and images is not supported.
+
+:command:`snap rm` [--force] *snap-spec*
+ Remove the specified snapshot.
+
+:command:`snap rollback` *snap-spec*
+ Rollback image content to snapshot. This will iterate through the entire blocks
+ array and update the data head content to the snapshotted version.
+
+:command:`snap unprotect` *snap-spec*
+ Unprotect a snapshot from deletion (undo `snap protect`). If cloned
+ children remain, `snap unprotect` fails. (Note that clones may exist
+ in different pools than the parent snapshot.)
+
+ This requires image format 2.
+
+:command:`sparsify` [--sparse-size *sparse-size*] *image-spec*
+ Reclaim space for zeroed image extents. The default sparse size is
+ 4096 bytes and can be changed via --sparse-size option with the
+ following restrictions: it should be power of two, not less than
+ 4096, and not larger than image object size.
+
+:command:`status` *image-spec*
+ Show the status of the image, including which clients have it open.
+
+:command:`trash ls` [*pool-name*]
+ List all entries from trash.
+
+:command:`trash mv` *image-spec*
+ Move an image to the trash. Images, even ones actively in-use by
+ clones, can be moved to the trash and deleted at a later time.
+
+:command:`trash purge` [*pool-name*]
+ Remove all expired images from trash.
+
+:command:`trash restore` *image-id*
+ Restore an image from trash.
+
+:command:`trash rm` *image-id*
+ Delete an image from trash. If image deferment time has not expired
+ you can not removed it unless use force. But an actively in-use by clones
+ or has snapshots can not be removed.
+
+:command:`trash purge schedule add` [-p | --pool *pool*] [--namespace *namespace*] *interval* [*start-time*]
+ Add trash purge schedule.
+
+:command:`trash purge schedule list` [-R | --recursive] [--format *format*] [--pretty-format] [-p | --pool *pool*] [--namespace *namespace*]
+ List trash purge schedule.
+
+:command:`trash purge schedule remove` [-p | --pool *pool*] [--namespace *namespace*] *interval* [*start-time*]
+ Remove trash purge schedule.
+
+:command:`trash purge schedule status` [-p | --pool *pool*] [--format *format*] [--pretty-format] [--namespace *namespace*]
+ Show trash purge schedule status.
+
+:command:`watch` *image-spec*
+ Watch events on image.
+
+Image, snap, group and journal specs
+====================================
+
+| *image-spec* is [*pool-name*/[*namespace-name*/]]\ *image-name*
+| *snap-spec* is [*pool-name*/[*namespace-name*/]]\ *image-name*\ @\ *snap-name*
+| *group-spec* is [*pool-name*/[*namespace-name*/]]\ *group-name*
+| *group-snap-spec* is [*pool-name*/[*namespace-name*/]]\ *group-name*\ @\ *snap-name*
+| *journal-spec* is [*pool-name*/[*namespace-name*/]]\ *journal-name*
+
+The default for *pool-name* is "rbd" and *namespace-name* is "". If an image
+name contains a slash character ('/'), *pool-name* is required.
+
+The *journal-name* is *image-id*.
+
+You may specify each name individually, using --pool, --namespace, --image, and
+--snap options, but this is discouraged in favor of the above spec syntax.
+
+Striping
+========
+
+RBD images are striped over many objects, which are then stored by the
+Ceph distributed object store (RADOS). As a result, read and write
+requests for the image are distributed across many nodes in the
+cluster, generally preventing any single node from becoming a
+bottleneck when individual images get large or busy.
+
+The striping is controlled by three parameters:
+
+.. option:: object-size
+
+ The size of objects we stripe over is a power of two. It will be rounded up the nearest power of two.
+ The default object size is 4 MB, smallest is 4K and maximum is 32M.
+
+.. option:: stripe_unit
+
+ Each [*stripe_unit*] contiguous bytes are stored adjacently in the same object, before we move on
+ to the next object.
+
+.. option:: stripe_count
+
+ After we write [*stripe_unit*] bytes to [*stripe_count*] objects, we loop back to the initial object
+ and write another stripe, until the object reaches its maximum size. At that point,
+ we move on to the next [*stripe_count*] objects.
+
+By default, [*stripe_unit*] is the same as the object size and [*stripe_count*] is 1. Specifying a different
+[*stripe_unit*] and/or [*stripe_count*] is often referred to as using "fancy" striping and requires format 2.
+
+
+Kernel rbd (krbd) options
+=========================
+
+Most of these options are useful mainly for debugging and benchmarking. The
+default values are set in the kernel and may therefore depend on the version of
+the running kernel.
+
+Per client instance `rbd device map` options:
+
+* fsid=aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeee - FSID that should be assumed by
+ the client.
+
+* ip=a.b.c.d[:p] - IP and, optionally, port the client should use.
+
+* share - Enable sharing of client instances with other mappings (default).
+
+* noshare - Disable sharing of client instances with other mappings.
+
+* crc - Enable CRC32C checksumming for msgr1 on-the-wire protocol (default).
+ For msgr2.1 protocol this option is ignored: full checksumming is always on
+ in 'crc' mode and always off in 'secure' mode.
+
+* nocrc - Disable CRC32C checksumming for msgr1 on-the-wire protocol. Note
+ that only payload checksumming is disabled, header checksumming is always on.
+ For msgr2.1 protocol this option is ignored.
+
+* cephx_require_signatures - Require msgr1 message signing feature (since 3.19,
+ default). This option is deprecated and will be removed in the future as the
+ feature has been supported since the Bobtail release.
+
+* nocephx_require_signatures - Don't require msgr1 message signing feature
+ (since 3.19). This option is deprecated and will be removed in the future.
+
+* tcp_nodelay - Disable Nagle's algorithm on client sockets (since 4.0,
+ default).
+
+* notcp_nodelay - Enable Nagle's algorithm on client sockets (since 4.0).
+
+* cephx_sign_messages - Enable message signing for msgr1 on-the-wire protocol
+ (since 4.4, default). For msgr2.1 protocol this option is ignored: message
+ signing is built into 'secure' mode and not offered in 'crc' mode.
+
+* nocephx_sign_messages - Disable message signing for msgr1 on-the-wire protocol
+ (since 4.4). For msgr2.1 protocol this option is ignored.
+
+* mount_timeout=x - A timeout on various steps in `rbd device map` and
+ `rbd device unmap` sequences (default is 60 seconds). In particular,
+ since 4.2 this can be used to ensure that `rbd device unmap` eventually
+ times out when there is no network connection to a cluster.
+
+* osdkeepalive=x - OSD keepalive timeout (default is 5 seconds).
+
+* osd_idle_ttl=x - OSD idle TTL (default is 60 seconds).
+
+Per mapping (block device) `rbd device map` options:
+
+* rw - Map the image read-write (default). Overridden by --read-only.
+
+* ro - Map the image read-only. Equivalent to --read-only.
+
+* queue_depth=x - queue depth (since 4.2, default is 128 requests).
+
+* lock_on_read - Acquire exclusive lock on reads, in addition to writes and
+ discards (since 4.9).
+
+* exclusive - Disable automatic exclusive lock transitions (since 4.12).
+ Equivalent to --exclusive.
+
+* lock_timeout=x - A timeout on waiting for the acquisition of exclusive lock
+ (since 4.17, default is 0 seconds, meaning no timeout).
+
+* notrim - Turn off discard and write zeroes offload support to avoid
+ deprovisioning a fully provisioned image (since 4.17). When enabled, discard
+ requests will fail with -EOPNOTSUPP, write zeroes requests will fall back to
+ manually zeroing.
+
+* abort_on_full - Fail write requests with -ENOSPC when the cluster is full or
+ the data pool reaches its quota (since 5.0). The default behaviour is to
+ block until the full condition is cleared.
+
+* alloc_size - Minimum allocation unit of the underlying OSD object store
+ backend (since 5.1, default is 64K bytes). This is used to round off and
+ drop discards that are too small. For bluestore, the recommended setting is
+ bluestore_min_alloc_size (currently set to 4K for all types of drives,
+ previously used to be set to 64K for hard disk drives and 16K for
+ solid-state drives). For filestore with filestore_punch_hole = false, the
+ recommended setting is image object size (typically 4M).
+
+* crush_location=x - Specify the location of the client in terms of CRUSH
+ hierarchy (since 5.8). This is a set of key-value pairs separated from
+ each other by '|', with keys separated from values by ':'. Note that '|'
+ may need to be quoted or escaped to avoid it being interpreted as a pipe
+ by the shell. The key is the bucket type name (e.g. rack, datacenter or
+ region with default bucket types) and the value is the bucket name. For
+ example, to indicate that the client is local to rack "myrack", data center
+ "mydc" and region "myregion"::
+
+ crush_location=rack:myrack|datacenter:mydc|region:myregion
+
+ Each key-value pair stands on its own: "myrack" doesn't need to reside in
+ "mydc", which in turn doesn't need to reside in "myregion". The location
+ is not a path to the root of the hierarchy but rather a set of nodes that
+ are matched independently, owning to the fact that bucket names are unique
+ within a CRUSH map. "Multipath" locations are supported, so it is possible
+ to indicate locality for multiple parallel hierarchies::
+
+ crush_location=rack:myrack1|rack:myrack2|datacenter:mydc
+
+* read_from_replica=no - Disable replica reads, always pick the primary OSD
+ (since 5.8, default).
+
+* read_from_replica=balance - When issued a read on a replicated pool, pick
+ a random OSD for serving it (since 5.8).
+
+ This mode is safe for general use only since Octopus (i.e. after "ceph osd
+ require-osd-release octopus"). Otherwise it should be limited to read-only
+ workloads such as images mapped read-only everywhere or snapshots.
+
+* read_from_replica=localize - When issued a read on a replicated pool, pick
+ the most local OSD for serving it (since 5.8). The locality metric is
+ calculated against the location of the client given with crush_location;
+ a match with the lowest-valued bucket type wins. For example, with default
+ bucket types, an OSD in a matching rack is closer than an OSD in a matching
+ data center, which in turn is closer than an OSD in a matching region.
+
+ This mode is safe for general use only since Octopus (i.e. after "ceph osd
+ require-osd-release octopus"). Otherwise it should be limited to read-only
+ workloads such as images mapped read-only everywhere or snapshots.
+
+* compression_hint=none - Don't set compression hints (since 5.8, default).
+
+* compression_hint=compressible - Hint to the underlying OSD object store
+ backend that the data is compressible, enabling compression in passive mode
+ (since 5.8).
+
+* compression_hint=incompressible - Hint to the underlying OSD object store
+ backend that the data is incompressible, disabling compression in aggressive
+ mode (since 5.8).
+
+* ms_mode=legacy - Use msgr1 on-the-wire protocol (since 5.11, default).
+
+* ms_mode=crc - Use msgr2.1 on-the-wire protocol, select 'crc' mode, also
+ referred to as plain mode (since 5.11). If the daemon denies 'crc' mode,
+ fail the connection.
+
+* ms_mode=secure - Use msgr2.1 on-the-wire protocol, select 'secure' mode
+ (since 5.11). 'secure' mode provides full in-transit encryption ensuring
+ both confidentiality and authenticity. If the daemon denies 'secure' mode,
+ fail the connection.
+
+* ms_mode=prefer-crc - Use msgr2.1 on-the-wire protocol, select 'crc'
+ mode (since 5.11). If the daemon denies 'crc' mode in favor of 'secure'
+ mode, agree to 'secure' mode.
+
+* ms_mode=prefer-secure - Use msgr2.1 on-the-wire protocol, select 'secure'
+ mode (since 5.11). If the daemon denies 'secure' mode in favor of 'crc'
+ mode, agree to 'crc' mode.
+
+* rxbounce - Use a bounce buffer when receiving data (since 5.17). The default
+ behaviour is to read directly into the destination buffer. A bounce buffer
+ is needed if the destination buffer isn't guaranteed to be stable (i.e. remain
+ unchanged while it is being read to). In particular this is the case for
+ Windows where a system-wide "dummy" (throwaway) page may be mapped into the
+ destination buffer in order to generate a single large I/O. Otherwise,
+ "libceph: ... bad crc/signature" or "libceph: ... integrity error, bad crc"
+ errors and associated performance degradation are expected.
+
+* udev - Wait for udev device manager to finish executing all matching
+ "add" rules and release the device before exiting (default). This option
+ is not passed to the kernel.
+
+* noudev - Don't wait for udev device manager. When enabled, the device may
+ not be fully usable immediately on exit.
+
+`rbd device unmap` options:
+
+* force - Force the unmapping of a block device that is open (since 4.9). The
+ driver will wait for running requests to complete and then unmap; requests
+ sent to the driver after initiating the unmap will be failed.
+
+* udev - Wait for udev device manager to finish executing all matching
+ "remove" rules and clean up after the device before exiting (default).
+ This option is not passed to the kernel.
+
+* noudev - Don't wait for udev device manager.
+
+
+Examples
+========
+
+To create a new rbd image that is 100 GB::
+
+ rbd create mypool/myimage --size 102400
+
+To use a non-default object size (8 MB)::
+
+ rbd create mypool/myimage --size 102400 --object-size 8M
+
+To delete an rbd image (be careful!)::
+
+ rbd rm mypool/myimage
+
+To create a new snapshot::
+
+ rbd snap create mypool/myimage@mysnap
+
+To create a copy-on-write clone of a protected snapshot::
+
+ rbd clone mypool/myimage@mysnap otherpool/cloneimage
+
+To see which clones of a snapshot exist::
+
+ rbd children mypool/myimage@mysnap
+
+To delete a snapshot::
+
+ rbd snap rm mypool/myimage@mysnap
+
+To map an image via the kernel with cephx enabled::
+
+ rbd device map mypool/myimage --id admin --keyfile secretfile
+
+To map an image via the kernel with different cluster name other than default *ceph*::
+
+ rbd device map mypool/myimage --cluster cluster-name
+
+To unmap an image::
+
+ rbd device unmap /dev/rbd0
+
+To create an image and a clone from it::
+
+ rbd import --image-format 2 image mypool/parent
+ rbd snap create mypool/parent@snap
+ rbd snap protect mypool/parent@snap
+ rbd clone mypool/parent@snap otherpool/child
+
+To create an image with a smaller stripe_unit (to better distribute small writes in some workloads)::
+
+ rbd create mypool/myimage --size 102400 --stripe-unit 65536B --stripe-count 16
+
+To change an image from one image format to another, export it and then
+import it as the desired image format::
+
+ rbd export mypool/myimage@snap /tmp/img
+ rbd import --image-format 2 /tmp/img mypool/myimage2
+
+To lock an image for exclusive use::
+
+ rbd lock add mypool/myimage mylockid
+
+To release a lock::
+
+ rbd lock remove mypool/myimage mylockid client.2485
+
+To list images from trash::
+
+ rbd trash ls mypool
+
+To defer delete an image (use *--expires-at* to set expiration time, default is now)::
+
+ rbd trash mv mypool/myimage --expires-at "tomorrow"
+
+To delete an image from trash (be careful!)::
+
+ rbd trash rm mypool/myimage-id
+
+To force delete an image from trash (be careful!)::
+
+ rbd trash rm mypool/myimage-id --force
+
+To restore an image from trash::
+
+ rbd trash restore mypool/myimage-id
+
+To restore an image from trash and rename it::
+
+ rbd trash restore mypool/myimage-id --image mynewimage
+
+
+Availability
+============
+
+**rbd** is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer to
+the Ceph documentation at http://ceph.com/docs for more information.
+
+
+See also
+========
+
+:doc:`ceph <ceph>`\(8),
+:doc:`rados <rados>`\(8)
diff --git a/doc/man/8/rbdmap.rst b/doc/man/8/rbdmap.rst
new file mode 100644
index 000000000..137920f4c
--- /dev/null
+++ b/doc/man/8/rbdmap.rst
@@ -0,0 +1,128 @@
+:orphan:
+
+=========================================
+ rbdmap -- map RBD devices at boot time
+=========================================
+
+.. program:: rbdmap
+
+Synopsis
+========
+
+| **rbdmap map**
+| **rbdmap unmap**
+
+
+Description
+===========
+
+**rbdmap** is a shell script that automates ``rbd map`` and ``rbd unmap``
+operations on one or more RBD (RADOS Block Device) images. While the script can be
+run manually by the system administrator at any time, the principal use case is
+automatic mapping/mounting of RBD images at boot time (and unmounting/unmapping
+at shutdown), as triggered by the init system (a systemd unit file,
+``rbdmap.service`` is included with the ceph-common package for this purpose).
+
+The script takes a single argument, which can be either "map" or "unmap".
+In either case, the script parses a configuration file (defaults to ``/etc/ceph/rbdmap``,
+but can be overridden via an environment variable ``RBDMAPFILE``). Each line
+of the configuration file corresponds to an RBD image which is to be mapped, or
+unmapped.
+
+The configuration file format is::
+
+ IMAGESPEC RBDOPTS
+
+where ``IMAGESPEC`` should be specified as ``POOLNAME/IMAGENAME`` (the pool
+name, a forward slash, and the image name), or merely ``IMAGENAME``, in which
+case the ``POOLNAME`` defaults to "rbd". ``RBDOPTS`` is an optional list of
+parameters to be passed to the underlying ``rbd map`` command. These parameters
+and their values should be specified as a comma-separated string::
+
+ PARAM1=VAL1,PARAM2=VAL2,...,PARAMN=VALN
+
+This will cause the script to issue an ``rbd map`` command like the following::
+
+ rbd map POOLNAME/IMAGENAME --PARAM1 VAL1 --PARAM2 VAL2
+
+(See the ``rbd`` manpage for a full list of possible options.)
+For parameters and values which contain commas or equality signs, a simple
+apostrophe can be used to prevent replacing them.
+
+When run as ``rbdmap map``, the script parses the configuration file, and for
+each RBD image specified attempts to first map the image (using the ``rbd map``
+command) and, second, to mount the image.
+
+When run as ``rbdmap unmap``, images listed in the configuration file will
+be unmounted and unmapped.
+
+``rbdmap unmap-all`` attempts to unmount and subsequently unmap all currently
+mapped RBD images, regardless of whether or not they are listed in the
+configuration file.
+
+If successful, the ``rbd map`` operation maps the image to a ``/dev/rbdX``
+device, at which point a udev rule is triggered to create a friendly device
+name symlink, ``/dev/rbd/POOLNAME/IMAGENAME``, pointing to the real mapped
+device.
+
+In order for mounting/unmounting to succeed, the friendly device name must
+have a corresponding entry in ``/etc/fstab``.
+
+When writing ``/etc/fstab`` entries for RBD images, it's a good idea to specify
+the "noauto" (or "nofail") mount option. This prevents the init system from
+trying to mount the device too early - before the device in question even
+exists. (Since ``rbdmap.service``
+executes a shell script, it is typically triggered quite late in the boot
+sequence.)
+
+
+Examples
+========
+
+Example ``/etc/ceph/rbdmap`` for three RBD images called "bar1", "bar2" and "bar3",
+which are in pool "foopool"::
+
+ foopool/bar1 id=admin,keyring=/etc/ceph/ceph.client.admin.keyring
+ foopool/bar2 id=admin,keyring=/etc/ceph/ceph.client.admin.keyring
+ foopool/bar3 id=admin,keyring=/etc/ceph/ceph.client.admin.keyring,options='lock_on_read,queue_depth=1024'
+
+Each line in the file contains two strings: the image spec and the options to
+be passed to ``rbd map``. These two lines get transformed into the following
+commands::
+
+ rbd map foopool/bar1 --id admin --keyring /etc/ceph/ceph.client.admin.keyring
+ rbd map foopool/bar2 --id admin --keyring /etc/ceph/ceph.client.admin.keyring
+ rbd map foopool/bar2 --id admin --keyring /etc/ceph/ceph.client.admin.keyring --options lock_on_read,queue_depth=1024
+
+If the images had XFS file systems on them, the corresponding ``/etc/fstab``
+entries might look like this::
+
+ /dev/rbd/foopool/bar1 /mnt/bar1 xfs noauto 0 0
+ /dev/rbd/foopool/bar2 /mnt/bar2 xfs noauto 0 0
+ /dev/rbd/foopool/bar3 /mnt/bar3 xfs noauto 0 0
+
+After creating the images and populating the ``/etc/ceph/rbdmap`` file, making
+the images get automatically mapped and mounted at boot is just a matter of
+enabling that unit::
+
+ systemctl enable rbdmap.service
+
+
+Options
+=======
+
+None
+
+
+Availability
+============
+
+**rbdmap** is part of Ceph, a massively scalable, open-source, distributed
+storage system. Please refer to the Ceph documentation at
+http://ceph.com/docs for more information.
+
+
+See also
+========
+
+:doc:`rbd <rbd>`\(8),
diff --git a/doc/man/8/rgw-orphan-list.rst b/doc/man/8/rgw-orphan-list.rst
new file mode 100644
index 000000000..408242da2
--- /dev/null
+++ b/doc/man/8/rgw-orphan-list.rst
@@ -0,0 +1,69 @@
+:orphan:
+
+==================================================================
+ rgw-orphan-list -- list rados objects that are not indexed by rgw
+==================================================================
+
+.. program:: rgw-orphan-list
+
+Synopsis
+========
+
+| **rgw-orphan-list**
+
+Description
+===========
+
+:program:`rgw-orphan-list` is an *EXPERIMENTAL* RADOS gateway user
+administration utility. It produces a listing of rados objects that
+are not directly or indirectly referenced through the bucket indexes
+on a pool. It places the results and intermediate files on the local
+filesystem rather than on the ceph cluster itself, and therefore will
+not itself consume additional cluster storage.
+
+In theory orphans should not exist. However because ceph evolves
+rapidly, bugs do crop up, and they may result in orphans that are left
+behind.
+
+In its current form this utility does not take any command-line
+arguments or options. It will list the available pools and prompt the
+user to enter the pool they would like to list orphans for.
+
+Behind the scenes it runs `rados ls` and `radosgw-admin bucket
+radoslist ...` and produces a list of those entries that appear in the
+former but not the latter. Those entries are presumed to be the
+orphans.
+
+Warnings
+========
+
+This utility is currently considered *EXPERIMENTAL*.
+
+This utility will produce false orphan entries for unindexed buckets
+since such buckets have no bucket indices that can provide the
+starting point for tracing.
+
+Options
+=======
+
+At present there are no options.
+
+Examples
+========
+
+Launch the tool::
+
+ $ rgw-orphan-list
+
+Availability
+============
+
+:program:`radosgw-admin` is part of Ceph, a massively scalable, open-source,
+distributed storage system. Please refer to the Ceph documentation at
+http://ceph.com/docs for more information.
+
+See also
+========
+
+:doc:`radosgw-admin <radosgw-admin>`\(8)
+:doc:`ceph-diff-sorted <ceph-diff-sorted>`\(8)
diff --git a/doc/man/CMakeLists.txt b/doc/man/CMakeLists.txt
new file mode 100644
index 000000000..e81631bab
--- /dev/null
+++ b/doc/man/CMakeLists.txt
@@ -0,0 +1,15 @@
+set(sphinx_input)
+set(sphinx_output)
+set(sphinx_output_dir ${CMAKE_BINARY_DIR}/doc/man)
+
+add_subdirectory(8)
+
+add_custom_command(
+ OUTPUT ${sphinx_output}
+ COMMAND ${SPHINX_BUILD} -b man -t man -d ${CMAKE_BINARY_DIR}/doc/doctrees -c ${CMAKE_SOURCE_DIR}/man ${CMAKE_CURRENT_SOURCE_DIR} ${sphinx_output_dir}
+ DEPENDS ${sphinx_input})
+
+add_custom_target(
+ manpages ALL
+ DEPENDS ${sphinx_output}
+ COMMENT "manpages building")