From 19fcec84d8d7d21e796c7624e521b60d28ee21ed Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 7 Apr 2024 20:45:59 +0200 Subject: Adding upstream version 16.2.11+ds. Signed-off-by: Daniel Baumann --- doc/man/8/CMakeLists.txt | 89 ++ doc/man/8/ceph-authtool.rst | 206 ++++ doc/man/8/ceph-bluestore-tool.rst | 212 ++++ doc/man/8/ceph-clsinfo.rst | 49 + doc/man/8/ceph-conf.rst | 149 +++ doc/man/8/ceph-create-keys.rst | 67 ++ doc/man/8/ceph-debugpack.rst | 50 + doc/man/8/ceph-dencoder.rst | 151 +++ doc/man/8/ceph-deploy.rst | 529 +++++++++ doc/man/8/ceph-diff-sorted.rst | 71 ++ doc/man/8/ceph-fuse.rst | 85 ++ doc/man/8/ceph-immutable-object-cache.rst | 76 ++ doc/man/8/ceph-kvstore-tool.rst | 98 ++ doc/man/8/ceph-mds.rst | 82 ++ doc/man/8/ceph-mon.rst | 99 ++ doc/man/8/ceph-objectstore-tool.rst | 488 +++++++++ doc/man/8/ceph-osd.rst | 140 +++ doc/man/8/ceph-post-file.rst | 71 ++ doc/man/8/ceph-rbdnamer.rst | 36 + doc/man/8/ceph-run.rst | 45 + doc/man/8/ceph-syn.rst | 99 ++ doc/man/8/ceph-volume-systemd.rst | 55 + doc/man/8/ceph-volume.rst | 425 ++++++++ doc/man/8/ceph.rst | 1649 +++++++++++++++++++++++++++++ doc/man/8/cephadm.rst | 526 +++++++++ doc/man/8/cephfs-mirror.rst | 66 ++ doc/man/8/cephfs-top.rst | 121 +++ doc/man/8/crushtool.rst | 302 ++++++ doc/man/8/librados-config.rst | 46 + doc/man/8/monmaptool.rst | 140 +++ doc/man/8/mount.ceph.rst | 256 +++++ doc/man/8/mount.fuse.ceph.rst | 71 ++ doc/man/8/osdmaptool.rst | 331 ++++++ doc/man/8/rados.rst | 404 +++++++ doc/man/8/radosgw-admin.rst | 1023 ++++++++++++++++++ doc/man/8/radosgw.rst | 253 +++++ doc/man/8/rbd-fuse.rst | 61 ++ doc/man/8/rbd-ggate.rst | 79 ++ doc/man/8/rbd-mirror.rst | 75 ++ doc/man/8/rbd-nbd.rst | 89 ++ doc/man/8/rbd-replay-many.rst | 73 ++ doc/man/8/rbd-replay-prep.rst | 55 + doc/man/8/rbd-replay.rst | 78 ++ doc/man/8/rbd.rst | 1036 ++++++++++++++++++ doc/man/8/rbdmap.rst | 128 +++ doc/man/8/rgw-orphan-list.rst | 69 ++ doc/man/CMakeLists.txt | 15 + 47 files changed, 10318 insertions(+) create mode 100644 doc/man/8/CMakeLists.txt create mode 100644 doc/man/8/ceph-authtool.rst create mode 100644 doc/man/8/ceph-bluestore-tool.rst create mode 100644 doc/man/8/ceph-clsinfo.rst create mode 100644 doc/man/8/ceph-conf.rst create mode 100644 doc/man/8/ceph-create-keys.rst create mode 100644 doc/man/8/ceph-debugpack.rst create mode 100644 doc/man/8/ceph-dencoder.rst create mode 100644 doc/man/8/ceph-deploy.rst create mode 100644 doc/man/8/ceph-diff-sorted.rst create mode 100644 doc/man/8/ceph-fuse.rst create mode 100644 doc/man/8/ceph-immutable-object-cache.rst create mode 100644 doc/man/8/ceph-kvstore-tool.rst create mode 100644 doc/man/8/ceph-mds.rst create mode 100644 doc/man/8/ceph-mon.rst create mode 100644 doc/man/8/ceph-objectstore-tool.rst create mode 100644 doc/man/8/ceph-osd.rst create mode 100644 doc/man/8/ceph-post-file.rst create mode 100644 doc/man/8/ceph-rbdnamer.rst create mode 100644 doc/man/8/ceph-run.rst create mode 100644 doc/man/8/ceph-syn.rst create mode 100644 doc/man/8/ceph-volume-systemd.rst create mode 100644 doc/man/8/ceph-volume.rst create mode 100644 doc/man/8/ceph.rst create mode 100644 doc/man/8/cephadm.rst create mode 100644 doc/man/8/cephfs-mirror.rst create mode 100644 doc/man/8/cephfs-top.rst create mode 100644 doc/man/8/crushtool.rst create mode 100644 doc/man/8/librados-config.rst create mode 100644 doc/man/8/monmaptool.rst create mode 100644 doc/man/8/mount.ceph.rst create mode 100644 doc/man/8/mount.fuse.ceph.rst create mode 100644 doc/man/8/osdmaptool.rst create mode 100644 doc/man/8/rados.rst create mode 100644 doc/man/8/radosgw-admin.rst create mode 100644 doc/man/8/radosgw.rst create mode 100644 doc/man/8/rbd-fuse.rst create mode 100644 doc/man/8/rbd-ggate.rst create mode 100644 doc/man/8/rbd-mirror.rst create mode 100644 doc/man/8/rbd-nbd.rst create mode 100644 doc/man/8/rbd-replay-many.rst create mode 100644 doc/man/8/rbd-replay-prep.rst create mode 100644 doc/man/8/rbd-replay.rst create mode 100644 doc/man/8/rbd.rst create mode 100644 doc/man/8/rbdmap.rst create mode 100644 doc/man/8/rgw-orphan-list.rst create mode 100644 doc/man/CMakeLists.txt (limited to 'doc/man') diff --git a/doc/man/8/CMakeLists.txt b/doc/man/8/CMakeLists.txt new file mode 100644 index 000000000..8e1b9373f --- /dev/null +++ b/doc/man/8/CMakeLists.txt @@ -0,0 +1,89 @@ +set(client_srcs + ceph-syn.rst + ceph-conf.rst + ceph.rst + ceph-authtool.rst + ceph-kvstore-tool.rst + rados.rst + ceph-post-file.rst + ceph-dencoder.rst) + +set(server_srcs + ceph-deploy.rst + crushtool.rst + ceph-run.rst + mount.ceph.rst + mount.fuse.ceph.rst + ceph-create-keys.rst) +if(WITH_TESTS) +list(APPEND server_srcs + ceph-debugpack.rst) +endif(WITH_TESTS) + +set(osd_srcs + ceph-clsinfo.rst + ceph-volume.rst + ceph-volume-systemd.rst + ceph-osd.rst + osdmaptool.rst + ceph-bluestore-tool.rst) + +set(mon_srcs + ceph-mon.rst + monmaptool.rst) + +list(APPEND man_srcs + ${client_srcs} + ${server_srcs} + ${osd_srcs} + ${mon_srcs} + ceph-mds.rst + cephfs-top.rst + librados-config.rst + cephadm.rst + cephfs-mirror.rst) + +if(HAVE_LIBFUSE) + list(APPEND man_srcs + ceph-fuse.rst + rbd-fuse.rst) +endif() + +if(WITH_RADOSGW) + list(APPEND man_srcs + radosgw.rst + radosgw-admin.rst + rgw-orphan-list.rst + ceph-diff-sorted.rst) +endif() + +if(WITH_RBD) + list(APPEND man_srcs + ceph-rbdnamer.rst + rbd-mirror.rst + rbd-replay-many.rst + rbd-replay-prep.rst + rbd-replay.rst + rbdmap.rst + rbd.rst) + if(LINUX) + list(APPEND man_srcs rbd-nbd.rst) + endif() + if(FREEBSD) + list(APPEND man_srcs rbd-ggate.rst) + endif() +endif() + +list(APPEND man_srcs ceph-immutable-object-cache.rst) +foreach(man ${man_srcs}) + list(APPEND sphinx_input ${CMAKE_CURRENT_SOURCE_DIR}/${man}) + # mount.ceph.rst => mount if we use + # get_filename_component(cmd ${man} NAME_WE) + string(REGEX REPLACE ".rst$" "" cmd ${man}) + list(APPEND sphinx_output ${sphinx_output_dir}/${cmd}.8) + install(FILES ${sphinx_output_dir}/${cmd}.8 + DESTINATION ${CEPH_MAN_DIR}/man8) +endforeach() + +set(sphinx_input ${sphinx_input} PARENT_SCOPE) +set(sphinx_output ${sphinx_output} PARENT_SCOPE) diff --git a/doc/man/8/ceph-authtool.rst b/doc/man/8/ceph-authtool.rst new file mode 100644 index 000000000..af9ee7123 --- /dev/null +++ b/doc/man/8/ceph-authtool.rst @@ -0,0 +1,206 @@ +:orphan: + +================================================= + ceph-authtool -- ceph keyring manipulation tool +================================================= + +.. program:: ceph-authtool + +Synopsis +======== + +| **ceph-authtool** *keyringfile* + [ -l | --list ] + [ -p | --print-key ] + [ -C | --create-keyring ] + [ -g | --gen-key ] + [ --gen-print-key ] + [ --import-keyring *otherkeyringfile* ] + [ -n | --name *entityname* ] + [ -a | --add-key *base64_key* ] + [ --cap *subsystem* *capability* ] + [ --caps *capfile* ] + [ --mode *mode* ] + + +Description +=========== + +**ceph-authtool** is a utility to create, view, and modify a Ceph keyring +file. A keyring file stores one or more Ceph authentication keys and +possibly an associated capability specification. Each key is +associated with an entity name, of the form +``{client,mon,mds,osd}.name``. + +**WARNING** Ceph provides authentication and protection against +man-in-the-middle attacks once secret keys are in place. However, +data over the wire is not encrypted, which may include the messages +used to configure said keys. The system is primarily intended to be +used in trusted environments. + +Options +======= + +.. option:: -l, --list + + will list all keys and capabilities present in the keyring + +.. option:: -p, --print-key + + will print an encoded key for the specified entityname. This is + suitable for the ``mount -o secret=`` argument + +.. option:: -C, --create-keyring + + will create a new keyring, overwriting any existing keyringfile + +.. option:: -g, --gen-key + + will generate a new secret key for the specified entityname + +.. option:: --gen-print-key + + will generate a new secret key for the specified entityname, + without altering the keyringfile, printing the secret to stdout + +.. option:: --import-keyring *secondkeyringfile* + + will import the content of a given keyring to the keyringfile + +.. option:: -n, --name *name* + + specify entityname to operate on + +.. option:: -a, --add-key *base64_key* + + will add an encoded key to the keyring + +.. option:: --cap *subsystem* *capability* + + will set the capability for given subsystem + +.. option:: --caps *capsfile* + + will set all of capabilities associated with a given key, for all subsystems + + .. option:: --mode *mode* + + will set the desired file mode to the keyring e.g: 0644, defaults to 0600 + + +Capabilities +============ + +The subsystem is the name of a Ceph subsystem: ``mon``, ``mds``, or +``osd``. + +The capability is a string describing what the given user is allowed +to do. This takes the form of a comma separated list of allow +clauses with a permission specifier containing one or more of rwx for +read, write, and execute permission. The ``allow *`` grants full +superuser permissions for the given subsystem. + +For example:: + + # can read, write, and execute objects + osd = "allow rwx" + + # can access mds server + mds = "allow" + + # can modify cluster state (i.e., is a server daemon) + mon = "allow rwx" + +A librados user restricted to a single pool might look like:: + + mon = "allow r" + + osd = "allow rw pool foo" + +A client using rbd with read access to one pool and read/write access to another:: + + mon = "allow r" + + osd = "allow class-read object_prefix rbd_children, allow pool templates r class-read, allow pool vms rwx" + +A client mounting the file system with minimal permissions would need caps like:: + + mds = "allow" + + osd = "allow rw pool data" + + mon = "allow r" + + +OSD Capabilities +================ + +In general, an osd capability follows the grammar:: + + osdcap := grant[,grant...] + grant := allow (match capspec | capspec match) + match := [ pool[=] | object_prefix + | namespace[=] + | tag = ] + capspec := * | [r][w][x] [class-read] [class-write] + +The capspec determines what kind of operations the entity can perform:: + + r = read access to objects + w = write access to objects + x = can call any class method (same as class-read class-write) + class-read = can call class methods that are reads + class-write = can call class methods that are writes + * or "all" = equivalent to rwx, plus the ability to run osd admin commands, + i.e. ceph osd tell ... + +The match criteria restrict a grant based on the pool being accessed. +Grants are additive if the client fulfills the match condition. For +example, if a client has the osd capabilities: "allow r object_prefix +prefix, allow w pool foo, allow x pool bar", then it has rw access to +pool foo, rx access to pool bar, and r access to objects whose +names begin with 'prefix' in any pool. + +Caps file format +================ + +The caps file format consists of zero or more key/value pairs, one per +line. The key and value are separated by an ``=``, and the value must +be quoted (with ``'`` or ``"``) if it contains any whitespace. The key +is the name of the Ceph subsystem (``osd``, ``mds``, ``mon``), and the +value is the capability string (see above). + + +Example +======= + +To create a new keyring containing a key for client.foo with a 0644 file mode:: + + ceph-authtool -C -n client.foo --gen-key keyring --mode 0644 + +To associate some capabilities with the key (namely, the ability to +mount a Ceph file system):: + + ceph-authtool -n client.foo --cap mds 'allow' --cap osd 'allow rw pool=data' --cap mon 'allow r' keyring + +To display the contents of the keyring:: + + ceph-authtool -l keyring + +When mounting a Ceph file system, you can grab the appropriately encoded secret key with:: + + mount -t ceph serverhost:/ mountpoint -o name=foo,secret=`ceph-authtool -p -n client.foo keyring` + + +Availability +============ + +**ceph-authtool** is part of Ceph, a massively scalable, open-source, distributed storage system. Please +refer to the Ceph documentation at http://ceph.com/docs for more +information. + + +See also +======== + +:doc:`ceph `\(8) diff --git a/doc/man/8/ceph-bluestore-tool.rst b/doc/man/8/ceph-bluestore-tool.rst new file mode 100644 index 000000000..bb67ccc71 --- /dev/null +++ b/doc/man/8/ceph-bluestore-tool.rst @@ -0,0 +1,212 @@ +:orphan: + +====================================================== + ceph-bluestore-tool -- bluestore administrative tool +====================================================== + +.. program:: ceph-bluestore-tool + +Synopsis +======== + +| **ceph-bluestore-tool** *command* + [ --dev *device* ... ] + [ --path *osd path* ] + [ --out-dir *dir* ] + [ --log-file | -l *filename* ] + [ --deep ] +| **ceph-bluestore-tool** fsck|repair --path *osd path* [ --deep ] +| **ceph-bluestore-tool** show-label --dev *device* ... +| **ceph-bluestore-tool** prime-osd-dir --dev *device* --path *osd path* +| **ceph-bluestore-tool** bluefs-export --path *osd path* --out-dir *dir* +| **ceph-bluestore-tool** bluefs-bdev-new-wal --path *osd path* --dev-target *new-device* +| **ceph-bluestore-tool** bluefs-bdev-new-db --path *osd path* --dev-target *new-device* +| **ceph-bluestore-tool** bluefs-bdev-migrate --path *osd path* --dev-target *new-device* --devs-source *device1* [--devs-source *device2*] +| **ceph-bluestore-tool** free-dump|free-score --path *osd path* [ --allocator block/bluefs-wal/bluefs-db/bluefs-slow ] +| **ceph-bluestore-tool** reshard --path *osd path* --sharding *new sharding* [ --sharding-ctrl *control string* ] +| **ceph-bluestore-tool** show-sharding --path *osd path* + + +Description +=========== + +**ceph-bluestore-tool** is a utility to perform low-level administrative +operations on a BlueStore instance. + +Commands +======== + +:command:`help` + + show help + +:command:`fsck` [ --deep ] + + run consistency check on BlueStore metadata. If *--deep* is specified, also read all object data and verify checksums. + +:command:`repair` + + Run a consistency check *and* repair any errors we can. + +:command:`bluefs-export` + + Export the contents of BlueFS (i.e., RocksDB files) to an output directory. + +:command:`bluefs-bdev-sizes` --path *osd path* + + Print the device sizes, as understood by BlueFS, to stdout. + +:command:`bluefs-bdev-expand` --path *osd path* + + Instruct BlueFS to check the size of its block devices and, if they have + expanded, make use of the additional space. Please note that only the new + files created by BlueFS will be allocated on the preferred block device if + it has enough free space, and the existing files that have spilled over to + the slow device will be gradually removed when RocksDB performs compaction. + In other words, if there is any data spilled over to the slow device, it + will be moved to the fast device over time. + +:command:`bluefs-bdev-new-wal` --path *osd path* --dev-target *new-device* + + Adds WAL device to BlueFS, fails if WAL device already exists. + +:command:`bluefs-bdev-new-db` --path *osd path* --dev-target *new-device* + + Adds DB device to BlueFS, fails if DB device already exists. + +:command:`bluefs-bdev-migrate` --dev-target *new-device* --devs-source *device1* [--devs-source *device2*] + + Moves BlueFS data from source device(s) to the target one, source devices + (except the main one) are removed on success. Target device can be both + already attached or new device. In the latter case it's added to OSD + replacing one of the source devices. Following replacement rules apply + (in the order of precedence, stop on the first match): + + - if source list has DB volume - target device replaces it. + - if source list has WAL volume - target device replace it. + - if source list has slow volume only - operation isn't permitted, requires explicit allocation via new-db/new-wal command. + +:command:`show-label` --dev *device* [...] + + Show device label(s). + +:command:`free-dump` --path *osd path* [ --allocator block/bluefs-wal/bluefs-db/bluefs-slow ] + + Dump all free regions in allocator. + +:command:`free-score` --path *osd path* [ --allocator block/bluefs-wal/bluefs-db/bluefs-slow ] + + Give a [0-1] number that represents quality of fragmentation in allocator. + 0 represents case when all free space is in one chunk. 1 represents worst possible fragmentation. + +:command:`reshard` --path *osd path* --sharding *new sharding* [ --resharding-ctrl *control string* ] + + Changes sharding of BlueStore's RocksDB. Sharding is build on top of RocksDB column families. + This option allows to test performance of *new sharding* without need to redeploy OSD. + Resharding is usually a long process, which involves walking through entire RocksDB key space + and moving some of them to different column families. + Option --resharding-ctrl provides performance control over resharding process. + Interrupted resharding will prevent OSD from running. + Interrupted resharding does not corrupt data. It is always possible to continue previous resharding, + or select any other sharding scheme, including reverting to original one. + +:command:`show-sharding` --path *osd path* + + Show sharding that is currently applied to BlueStore's RocksDB. + +Options +======= + +.. option:: --dev *device* + + Add *device* to the list of devices to consider + +.. option:: --devs-source *device* + + Add *device* to the list of devices to consider as sources for migrate operation + +.. option:: --dev-target *device* + + Specify target *device* migrate operation or device to add for adding new DB/WAL. + +.. option:: --path *osd path* + + Specify an osd path. In most cases, the device list is inferred from the symlinks present in *osd path*. This is usually simpler than explicitly specifying the device(s) with --dev. + +.. option:: --out-dir *dir* + + Output directory for bluefs-export + +.. option:: -l, --log-file *log file* + + file to log to + +.. option:: --log-level *num* + + debug log level. Default is 30 (extremely verbose), 20 is very + verbose, 10 is verbose, and 1 is not very verbose. + +.. option:: --deep + + deep scrub/repair (read and validate object data, not just metadata) + +.. option:: --allocator *name* + + Useful for *free-dump* and *free-score* actions. Selects allocator(s). + +.. option:: --resharding-ctrl *control string* + + Provides control over resharding process. Specifies how often refresh RocksDB iterator, + and how large should commit batch be before committing to RocksDB. Option format is: + /// + Default: 10000000/10000/1000000/1000 + +Device labels +============= + +Every BlueStore block device has a single block label at the beginning of the +device. You can dump the contents of the label with:: + + ceph-bluestore-tool show-label --dev *device* + +The main device will have a lot of metadata, including information +that used to be stored in small files in the OSD data directory. The +auxiliary devices (db and wal) will only have the minimum required +fields (OSD UUID, size, device type, birth time). + +OSD directory priming +===================== + +You can generate the content for an OSD data directory that can start up a +BlueStore OSD with the *prime-osd-dir* command:: + + ceph-bluestore-tool prime-osd-dir --dev *main device* --path /var/lib/ceph/osd/ceph-*id* + +BlueFS log rescue +===================== + +Some versions of BlueStore were susceptible to BlueFS log growing extremaly large - +beyond the point of making booting OSD impossible. This state is indicated by +booting that takes very long and fails in _replay function. + +This can be fixed by:: + ceph-bluestore-tool fsck --path *osd path* --bluefs_replay_recovery=true + +It is advised to first check if rescue process would be successfull:: + ceph-bluestore-tool fsck --path *osd path* \ + --bluefs_replay_recovery=true --bluefs_replay_recovery_disable_compact=true + +If above fsck is successful fix procedure can be applied. + +Availability +============ + +**ceph-bluestore-tool** is part of Ceph, a massively scalable, +open-source, distributed storage system. Please refer to the Ceph +documentation at http://ceph.com/docs for more information. + + +See also +======== + +:doc:`ceph-osd `\(8) diff --git a/doc/man/8/ceph-clsinfo.rst b/doc/man/8/ceph-clsinfo.rst new file mode 100644 index 000000000..0188ce131 --- /dev/null +++ b/doc/man/8/ceph-clsinfo.rst @@ -0,0 +1,49 @@ +:orphan: + +=============================================== + ceph-clsinfo -- show class object information +=============================================== + +.. program:: ceph-clsinfo + +Synopsis +======== + +| **ceph-clsinfo** [ *options* ] ... *filename* + + +Description +=========== + +**ceph-clsinfo** can show name, version, and architecture information +about a specific class object. + + +Options +======= + +.. option:: -n, --name + + Shows the class name + +.. option:: -v, --version + + Shows the class version + +.. option:: -a, --arch + + Shows the class architecture + + +Availability +============ + +**ceph-clsinfo** is part of Ceph, a massively scalable, open-source, distributed storage system. Please +refer to the Ceph documentation at http://ceph.com/docs for more +information. + + +See also +======== + +:doc:`ceph `\(8) diff --git a/doc/man/8/ceph-conf.rst b/doc/man/8/ceph-conf.rst new file mode 100644 index 000000000..b8b99c2aa --- /dev/null +++ b/doc/man/8/ceph-conf.rst @@ -0,0 +1,149 @@ +:orphan: + +================================== + ceph-conf -- ceph conf file tool +================================== + +.. program:: ceph-conf + +Synopsis +======== + +| **ceph-conf** -c *conffile* --list-all-sections +| **ceph-conf** -c *conffile* -L +| **ceph-conf** -c *conffile* -l *prefix* +| **ceph-conf** *key* -s *section1* ... +| **ceph-conf** [-s *section* ] [-r] --lookup *key* +| **ceph-conf** [-s *section* ] *key* + + +Description +=========== + +**ceph-conf** is a utility for getting information from a ceph +configuration file. As with most Ceph programs, you can specify which +Ceph configuration file to use with the ``-c`` flag. + +Note that unlike other ceph tools, **ceph-conf** will *only* read from +config files (or return compiled-in default values)--it will *not* +fetch config values from the monitor cluster. For this reason it is +recommended that **ceph-conf** only be used in legacy environments +that are strictly config-file based. New deployments and tools should +instead rely on either querying the monitor explicitly for +configuration (e.g., ``ceph config get