summaryrefslogtreecommitdiffstats
path: root/src/test/cli/osdmaptool
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-21 11:54:28 +0000
commite6918187568dbd01842d8d1d2c808ce16a894239 (patch)
tree64f88b554b444a49f656b6c656111a145cbbaa28 /src/test/cli/osdmaptool
parentInitial commit. (diff)
downloadceph-e6918187568dbd01842d8d1d2c808ce16a894239.tar.xz
ceph-e6918187568dbd01842d8d1d2c808ce16a894239.zip
Adding upstream version 18.2.2.upstream/18.2.2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/test/cli/osdmaptool')
-rw-r--r--src/test/cli/osdmaptool/ceph.conf.withracks1480
-rw-r--r--src/test/cli/osdmaptool/clobber.t65
-rw-r--r--src/test/cli/osdmaptool/create-print.t97
-rw-r--r--src/test/cli/osdmaptool/create-racks.t810
-rw-r--r--src/test/cli/osdmaptool/crush.t17
-rw-r--r--src/test/cli/osdmaptool/help.t42
-rw-r--r--src/test/cli/osdmaptool/missing-argument.t3
-rw-r--r--src/test/cli/osdmaptool/pool.t54
-rw-r--r--src/test/cli/osdmaptool/print-empty.t5
-rw-r--r--src/test/cli/osdmaptool/print-nonexistent.t4
-rw-r--r--src/test/cli/osdmaptool/test-map-pgs.t43
-rw-r--r--src/test/cli/osdmaptool/tree.t95
-rw-r--r--src/test/cli/osdmaptool/upmap-out.t24
-rw-r--r--src/test/cli/osdmaptool/upmap.t37
14 files changed, 2776 insertions, 0 deletions
diff --git a/src/test/cli/osdmaptool/ceph.conf.withracks b/src/test/cli/osdmaptool/ceph.conf.withracks
new file mode 100644
index 000000000..09399e955
--- /dev/null
+++ b/src/test/cli/osdmaptool/ceph.conf.withracks
@@ -0,0 +1,1480 @@
+
+[global]
+ auth supported = cephx
+ ms bind ipv6 = true
+
+[mon]
+ mon data = /var/ceph/mon
+ mon clock drift allowed = 0.1
+
+ osd pool default size = 3
+ osd pool default crush rule = 0
+
+ ; don't mark down osds out automatically; wait for an admin!
+ mon osd down out interval = 0
+
+[mon.alpha]
+ host = peon5752
+ mon addr = [2607:f298:4:2243::5752]:6789
+
+[mon.beta]
+ host = peon5753
+ mon addr = [2607:f298:4:2243::5753]:6789
+
+[mon.charlie]
+ host = peon5754
+ mon addr = [2607:f298:4:2243::5754]:6789
+
+[client]
+ rgw socket path = /var/run/ceph/radosgw.$name
+ rgw cache enabled = true
+ rgw dns name = objects.dreamhost.com
+ rgw swift url = https://objects.dreamhost.com
+
+[client.radosgw.peon5751]
+ host = peon5751
+ log file = /var/log/ceph/$name.log
+ debug rgw = 40
+ debug ms = 1
+
+
+[osd]
+ keyring = /mnt/osd.$id/keyring
+ osd data = /mnt/osd.$id
+ osd journal = /dev/disk/by-label/osd.$id.journal
+ osd mkfs type = btrfs
+ osd mount options btrfs = rw,noatime
+ devs = /dev/disk/by-label/osd.$id.data
+; temp sage
+ debug osd = 20
+ debug ms = 1
+ debug filestore = 20
+
+[osd.1]
+ host = cephstore5522
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5522]
+ public addr = [2607:f298:4:2243::5522]
+
+[osd.2]
+ host = cephstore5522
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5522]
+ public addr = [2607:f298:4:2243::5522]
+
+[osd.3]
+ host = cephstore5522
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5522]
+ public addr = [2607:f298:4:2243::5522]
+
+[osd.4]
+ host = cephstore5522
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5522]
+ public addr = [2607:f298:4:2243::5522]
+
+[osd.5]
+ host = cephstore5522
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5522]
+ public addr = [2607:f298:4:2243::5522]
+
+[osd.6]
+ host = cephstore5522
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5522]
+ public addr = [2607:f298:4:2243::5522]
+
+[osd.7]
+ host = cephstore5522
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5522]
+ public addr = [2607:f298:4:2243::5522]
+
+[osd.8]
+ host = cephstore5523
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5523]
+ public addr = [2607:f298:4:2243::5523]
+
+[osd.9]
+ host = cephstore5523
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5523]
+ public addr = [2607:f298:4:2243::5523]
+
+[osd.10]
+ host = cephstore5523
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5523]
+ public addr = [2607:f298:4:2243::5523]
+
+[osd.11]
+ host = cephstore5523
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5523]
+ public addr = [2607:f298:4:2243::5523]
+
+[osd.12]
+ host = cephstore5523
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5523]
+ public addr = [2607:f298:4:2243::5523]
+
+[osd.13]
+ host = cephstore5523
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5523]
+ public addr = [2607:f298:4:2243::5523]
+
+[osd.14]
+ host = cephstore5523
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5523]
+ public addr = [2607:f298:4:2243::5523]
+
+[osd.15]
+ host = cephstore5524
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5524]
+ public addr = [2607:f298:4:2243::5524]
+
+[osd.16]
+ host = cephstore5524
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5524]
+ public addr = [2607:f298:4:2243::5524]
+
+[osd.17]
+ host = cephstore5524
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5524]
+ public addr = [2607:f298:4:2243::5524]
+
+[osd.18]
+ host = cephstore5524
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5524]
+ public addr = [2607:f298:4:2243::5524]
+
+[osd.19]
+ host = cephstore5524
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5524]
+ public addr = [2607:f298:4:2243::5524]
+
+[osd.20]
+ host = cephstore5524
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5524]
+ public addr = [2607:f298:4:2243::5524]
+
+[osd.21]
+ host = cephstore5524
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5524]
+ public addr = [2607:f298:4:2243::5524]
+
+[osd.22]
+ host = cephstore5525
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5525]
+ public addr = [2607:f298:4:2243::5525]
+
+[osd.23]
+ host = cephstore5525
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5525]
+ public addr = [2607:f298:4:2243::5525]
+
+[osd.24]
+ host = cephstore5525
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5525]
+ public addr = [2607:f298:4:2243::5525]
+
+[osd.25]
+ host = cephstore5525
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5525]
+ public addr = [2607:f298:4:2243::5525]
+
+[osd.26]
+ host = cephstore5525
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5525]
+ public addr = [2607:f298:4:2243::5525]
+
+[osd.27]
+ host = cephstore5525
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5525]
+ public addr = [2607:f298:4:2243::5525]
+
+[osd.28]
+ host = cephstore5525
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5525]
+ public addr = [2607:f298:4:2243::5525]
+
+[osd.29]
+ host = cephstore5526
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5526]
+ public addr = [2607:f298:4:2243::5526]
+
+[osd.30]
+ host = cephstore5526
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5526]
+ public addr = [2607:f298:4:2243::5526]
+
+[osd.31]
+ host = cephstore5526
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5526]
+ public addr = [2607:f298:4:2243::5526]
+
+[osd.32]
+ host = cephstore5526
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5526]
+ public addr = [2607:f298:4:2243::5526]
+
+[osd.33]
+ host = cephstore5526
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5526]
+ public addr = [2607:f298:4:2243::5526]
+
+[osd.34]
+ host = cephstore5526
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5526]
+ public addr = [2607:f298:4:2243::5526]
+
+[osd.35]
+ host = cephstore5526
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5526]
+ public addr = [2607:f298:4:2243::5526]
+
+[osd.36]
+ host = cephstore5527
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5527]
+ public addr = [2607:f298:4:2243::5527]
+
+[osd.37]
+ host = cephstore5527
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5527]
+ public addr = [2607:f298:4:2243::5527]
+
+[osd.38]
+ host = cephstore5527
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5527]
+ public addr = [2607:f298:4:2243::5527]
+
+[osd.39]
+ host = cephstore5527
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5527]
+ public addr = [2607:f298:4:2243::5527]
+
+[osd.40]
+ host = cephstore5527
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5527]
+ public addr = [2607:f298:4:2243::5527]
+
+[osd.41]
+ host = cephstore5527
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5527]
+ public addr = [2607:f298:4:2243::5527]
+
+[osd.42]
+ host = cephstore5527
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5527]
+ public addr = [2607:f298:4:2243::5527]
+
+[osd.43]
+ host = cephstore5529
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5529]
+ public addr = [2607:f298:4:2243::5529]
+
+[osd.44]
+ host = cephstore5529
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5529]
+ public addr = [2607:f298:4:2243::5529]
+
+[osd.45]
+ host = cephstore5529
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5529]
+ public addr = [2607:f298:4:2243::5529]
+
+[osd.46]
+ host = cephstore5529
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5529]
+ public addr = [2607:f298:4:2243::5529]
+
+[osd.47]
+ host = cephstore5529
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5529]
+ public addr = [2607:f298:4:2243::5529]
+
+[osd.48]
+ host = cephstore5529
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5529]
+ public addr = [2607:f298:4:2243::5529]
+
+[osd.49]
+ host = cephstore5529
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5529]
+ public addr = [2607:f298:4:2243::5529]
+
+[osd.50]
+ host = cephstore5530
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5530]
+ public addr = [2607:f298:4:2243::5530]
+
+[osd.51]
+ host = cephstore5530
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5530]
+ public addr = [2607:f298:4:2243::5530]
+
+[osd.52]
+ host = cephstore5530
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5530]
+ public addr = [2607:f298:4:2243::5530]
+
+[osd.53]
+ host = cephstore5530
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5530]
+ public addr = [2607:f298:4:2243::5530]
+
+[osd.54]
+ host = cephstore5530
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5530]
+ public addr = [2607:f298:4:2243::5530]
+
+[osd.55]
+ host = cephstore5530
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5530]
+ public addr = [2607:f298:4:2243::5530]
+
+[osd.56]
+ host = cephstore5530
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::5530]
+ public addr = [2607:f298:4:2243::5530]
+
+[osd.57]
+ host = cephstore6230
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6230]
+ public addr = [2607:f298:4:2243::6230]
+
+[osd.58]
+ host = cephstore6230
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6230]
+ public addr = [2607:f298:4:2243::6230]
+
+[osd.59]
+ host = cephstore6230
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6230]
+ public addr = [2607:f298:4:2243::6230]
+
+[osd.60]
+ host = cephstore6230
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6230]
+ public addr = [2607:f298:4:2243::6230]
+
+[osd.61]
+ host = cephstore6230
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6230]
+ public addr = [2607:f298:4:2243::6230]
+
+[osd.62]
+ host = cephstore6230
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6230]
+ public addr = [2607:f298:4:2243::6230]
+
+[osd.63]
+ host = cephstore6230
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6230]
+ public addr = [2607:f298:4:2243::6230]
+
+[osd.64]
+ host = cephstore6231
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6231]
+ public addr = [2607:f298:4:2243::6231]
+
+[osd.65]
+ host = cephstore6231
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6231]
+ public addr = [2607:f298:4:2243::6231]
+
+[osd.66]
+ host = cephstore6231
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6231]
+ public addr = [2607:f298:4:2243::6231]
+
+[osd.67]
+ host = cephstore6231
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6231]
+ public addr = [2607:f298:4:2243::6231]
+
+[osd.68]
+ host = cephstore6231
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6231]
+ public addr = [2607:f298:4:2243::6231]
+
+[osd.69]
+ host = cephstore6231
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6231]
+ public addr = [2607:f298:4:2243::6231]
+
+[osd.70]
+ host = cephstore6231
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6231]
+ public addr = [2607:f298:4:2243::6231]
+
+[osd.71]
+ host = cephstore6232
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6232]
+ public addr = [2607:f298:4:2243::6232]
+
+[osd.72]
+ host = cephstore6232
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6232]
+ public addr = [2607:f298:4:2243::6232]
+
+[osd.73]
+ host = cephstore6232
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6232]
+ public addr = [2607:f298:4:2243::6232]
+
+[osd.74]
+ host = cephstore6232
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6232]
+ public addr = [2607:f298:4:2243::6232]
+
+[osd.75]
+ host = cephstore6232
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6232]
+ public addr = [2607:f298:4:2243::6232]
+
+[osd.76]
+ host = cephstore6232
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6232]
+ public addr = [2607:f298:4:2243::6232]
+
+[osd.77]
+ host = cephstore6232
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6232]
+ public addr = [2607:f298:4:2243::6232]
+
+[osd.78]
+ host = cephstore6233
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6233]
+ public addr = [2607:f298:4:2243::6233]
+
+[osd.79]
+ host = cephstore6233
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6233]
+ public addr = [2607:f298:4:2243::6233]
+
+[osd.80]
+ host = cephstore6233
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6233]
+ public addr = [2607:f298:4:2243::6233]
+
+[osd.81]
+ host = cephstore6233
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6233]
+ public addr = [2607:f298:4:2243::6233]
+
+[osd.82]
+ host = cephstore6233
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6233]
+ public addr = [2607:f298:4:2243::6233]
+
+[osd.83]
+ host = cephstore6233
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6233]
+ public addr = [2607:f298:4:2243::6233]
+
+[osd.84]
+ host = cephstore6233
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6233]
+ public addr = [2607:f298:4:2243::6233]
+
+[osd.85]
+ host = cephstore6234
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6234]
+ public addr = [2607:f298:4:2243::6234]
+
+[osd.86]
+ host = cephstore6234
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6234]
+ public addr = [2607:f298:4:2243::6234]
+
+[osd.87]
+ host = cephstore6234
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6234]
+ public addr = [2607:f298:4:2243::6234]
+
+[osd.88]
+ host = cephstore6234
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6234]
+ public addr = [2607:f298:4:2243::6234]
+
+[osd.89]
+ host = cephstore6234
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6234]
+ public addr = [2607:f298:4:2243::6234]
+
+[osd.90]
+ host = cephstore6234
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6234]
+ public addr = [2607:f298:4:2243::6234]
+
+[osd.91]
+ host = cephstore6234
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6234]
+ public addr = [2607:f298:4:2243::6234]
+
+[osd.92]
+ host = cephstore6235
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6235]
+ public addr = [2607:f298:4:2243::6235]
+
+[osd.93]
+ host = cephstore6235
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6235]
+ public addr = [2607:f298:4:2243::6235]
+
+[osd.94]
+ host = cephstore6235
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6235]
+ public addr = [2607:f298:4:2243::6235]
+
+[osd.95]
+ host = cephstore6235
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6235]
+ public addr = [2607:f298:4:2243::6235]
+
+[osd.96]
+ host = cephstore6235
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6235]
+ public addr = [2607:f298:4:2243::6235]
+
+[osd.97]
+ host = cephstore6235
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6235]
+ public addr = [2607:f298:4:2243::6235]
+
+[osd.98]
+ host = cephstore6235
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6235]
+ public addr = [2607:f298:4:2243::6235]
+
+[osd.99]
+ host = cephstore6236
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6236]
+ public addr = [2607:f298:4:2243::6236]
+
+[osd.100]
+ host = cephstore6236
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6236]
+ public addr = [2607:f298:4:2243::6236]
+
+[osd.101]
+ host = cephstore6236
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6236]
+ public addr = [2607:f298:4:2243::6236]
+
+[osd.102]
+ host = cephstore6236
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6236]
+ public addr = [2607:f298:4:2243::6236]
+
+[osd.103]
+ host = cephstore6236
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6236]
+ public addr = [2607:f298:4:2243::6236]
+
+[osd.104]
+ host = cephstore6236
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6236]
+ public addr = [2607:f298:4:2243::6236]
+
+[osd.105]
+ host = cephstore6236
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6236]
+ public addr = [2607:f298:4:2243::6236]
+
+[osd.106]
+ host = cephstore6237
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6237]
+ public addr = [2607:f298:4:2243::6237]
+
+[osd.107]
+ host = cephstore6237
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6237]
+ public addr = [2607:f298:4:2243::6237]
+
+[osd.108]
+ host = cephstore6237
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6237]
+ public addr = [2607:f298:4:2243::6237]
+
+[osd.109]
+ host = cephstore6237
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6237]
+ public addr = [2607:f298:4:2243::6237]
+
+[osd.110]
+ host = cephstore6237
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6237]
+ public addr = [2607:f298:4:2243::6237]
+
+[osd.111]
+ host = cephstore6237
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6237]
+ public addr = [2607:f298:4:2243::6237]
+
+[osd.112]
+ host = cephstore6237
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6237]
+ public addr = [2607:f298:4:2243::6237]
+
+[osd.113]
+ host = cephstore6238
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6238]
+ public addr = [2607:f298:4:2243::6238]
+
+[osd.114]
+ host = cephstore6238
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6238]
+ public addr = [2607:f298:4:2243::6238]
+
+[osd.115]
+ host = cephstore6238
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6238]
+ public addr = [2607:f298:4:2243::6238]
+
+[osd.116]
+ host = cephstore6238
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6238]
+ public addr = [2607:f298:4:2243::6238]
+
+[osd.117]
+ host = cephstore6238
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6238]
+ public addr = [2607:f298:4:2243::6238]
+
+[osd.118]
+ host = cephstore6238
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6238]
+ public addr = [2607:f298:4:2243::6238]
+
+[osd.119]
+ host = cephstore6238
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6238]
+ public addr = [2607:f298:4:2243::6238]
+
+[osd.120]
+ host = cephstore6239
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6239]
+ public addr = [2607:f298:4:2243::6239]
+
+[osd.121]
+ host = cephstore6239
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6239]
+ public addr = [2607:f298:4:2243::6239]
+
+[osd.122]
+ host = cephstore6239
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6239]
+ public addr = [2607:f298:4:2243::6239]
+
+[osd.123]
+ host = cephstore6239
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6239]
+ public addr = [2607:f298:4:2243::6239]
+
+[osd.124]
+ host = cephstore6239
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6239]
+ public addr = [2607:f298:4:2243::6239]
+
+[osd.125]
+ host = cephstore6239
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6239]
+ public addr = [2607:f298:4:2243::6239]
+
+[osd.126]
+ host = cephstore6239
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6239]
+ public addr = [2607:f298:4:2243::6239]
+
+[osd.127]
+ host = cephstore6240
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6240]
+ public addr = [2607:f298:4:2243::6240]
+
+[osd.128]
+ host = cephstore6240
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6240]
+ public addr = [2607:f298:4:2243::6240]
+
+[osd.129]
+ host = cephstore6240
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6240]
+ public addr = [2607:f298:4:2243::6240]
+
+[osd.130]
+ host = cephstore6240
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6240]
+ public addr = [2607:f298:4:2243::6240]
+
+[osd.131]
+ host = cephstore6240
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6240]
+ public addr = [2607:f298:4:2243::6240]
+
+[osd.132]
+ host = cephstore6240
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6240]
+ public addr = [2607:f298:4:2243::6240]
+
+[osd.133]
+ host = cephstore6240
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6240]
+ public addr = [2607:f298:4:2243::6240]
+
+[osd.134]
+ host = cephstore6241
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6241]
+ public addr = [2607:f298:4:2243::6241]
+
+[osd.135]
+ host = cephstore6241
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6241]
+ public addr = [2607:f298:4:2243::6241]
+
+[osd.136]
+ host = cephstore6241
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6241]
+ public addr = [2607:f298:4:2243::6241]
+
+[osd.137]
+ host = cephstore6241
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6241]
+ public addr = [2607:f298:4:2243::6241]
+
+[osd.138]
+ host = cephstore6241
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6241]
+ public addr = [2607:f298:4:2243::6241]
+
+[osd.139]
+ host = cephstore6241
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6241]
+ public addr = [2607:f298:4:2243::6241]
+
+[osd.140]
+ host = cephstore6241
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6241]
+ public addr = [2607:f298:4:2243::6241]
+
+[osd.141]
+ host = cephstore6242
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6242]
+ public addr = [2607:f298:4:2243::6242]
+
+[osd.142]
+ host = cephstore6242
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6242]
+ public addr = [2607:f298:4:2243::6242]
+
+[osd.143]
+ host = cephstore6242
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6242]
+ public addr = [2607:f298:4:2243::6242]
+
+[osd.144]
+ host = cephstore6242
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6242]
+ public addr = [2607:f298:4:2243::6242]
+
+[osd.145]
+ host = cephstore6242
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6242]
+ public addr = [2607:f298:4:2243::6242]
+
+[osd.146]
+ host = cephstore6242
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6242]
+ public addr = [2607:f298:4:2243::6242]
+
+[osd.147]
+ host = cephstore6242
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6242]
+ public addr = [2607:f298:4:2243::6242]
+
+[osd.148]
+ host = cephstore6243
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6243]
+ public addr = [2607:f298:4:2243::6243]
+
+[osd.149]
+ host = cephstore6243
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6243]
+ public addr = [2607:f298:4:2243::6243]
+
+[osd.150]
+ host = cephstore6243
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6243]
+ public addr = [2607:f298:4:2243::6243]
+
+[osd.151]
+ host = cephstore6243
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6243]
+ public addr = [2607:f298:4:2243::6243]
+
+[osd.152]
+ host = cephstore6243
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6243]
+ public addr = [2607:f298:4:2243::6243]
+
+[osd.153]
+ host = cephstore6243
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6243]
+ public addr = [2607:f298:4:2243::6243]
+
+[osd.154]
+ host = cephstore6243
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6243]
+ public addr = [2607:f298:4:2243::6243]
+
+[osd.155]
+ host = cephstore6244
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6244]
+ public addr = [2607:f298:4:2243::6244]
+
+[osd.156]
+ host = cephstore6244
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6244]
+ public addr = [2607:f298:4:2243::6244]
+
+[osd.157]
+ host = cephstore6244
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6244]
+ public addr = [2607:f298:4:2243::6244]
+
+[osd.158]
+ host = cephstore6244
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6244]
+ public addr = [2607:f298:4:2243::6244]
+
+[osd.159]
+ host = cephstore6244
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6244]
+ public addr = [2607:f298:4:2243::6244]
+
+[osd.160]
+ host = cephstore6244
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6244]
+ public addr = [2607:f298:4:2243::6244]
+
+[osd.161]
+ host = cephstore6244
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6244]
+ public addr = [2607:f298:4:2243::6244]
+
+[osd.162]
+ host = cephstore6245
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6245]
+ public addr = [2607:f298:4:2243::6245]
+
+[osd.163]
+ host = cephstore6245
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6245]
+ public addr = [2607:f298:4:2243::6245]
+
+[osd.164]
+ host = cephstore6245
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6245]
+ public addr = [2607:f298:4:2243::6245]
+
+[osd.165]
+ host = cephstore6245
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6245]
+ public addr = [2607:f298:4:2243::6245]
+
+[osd.166]
+ host = cephstore6245
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6245]
+ public addr = [2607:f298:4:2243::6245]
+
+[osd.167]
+ host = cephstore6245
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6245]
+ public addr = [2607:f298:4:2243::6245]
+
+[osd.168]
+ host = cephstore6245
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6245]
+ public addr = [2607:f298:4:2243::6245]
+
+[osd.169]
+ host = cephstore6246
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6246]
+ public addr = [2607:f298:4:2243::6246]
+
+[osd.170]
+ host = cephstore6246
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6246]
+ public addr = [2607:f298:4:2243::6246]
+
+[osd.171]
+ host = cephstore6246
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6246]
+ public addr = [2607:f298:4:2243::6246]
+
+[osd.172]
+ host = cephstore6246
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6246]
+ public addr = [2607:f298:4:2243::6246]
+
+[osd.173]
+ host = cephstore6246
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6246]
+ public addr = [2607:f298:4:2243::6246]
+
+[osd.174]
+ host = cephstore6246
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6246]
+ public addr = [2607:f298:4:2243::6246]
+
+[osd.175]
+ host = cephstore6246
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6246]
+ public addr = [2607:f298:4:2243::6246]
+
+[osd.176]
+ host = cephstore6336
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6336]
+ public addr = [2607:f298:4:2243::6336]
+
+[osd.177]
+ host = cephstore6336
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6336]
+ public addr = [2607:f298:4:2243::6336]
+
+[osd.178]
+ host = cephstore6336
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6336]
+ public addr = [2607:f298:4:2243::6336]
+
+[osd.179]
+ host = cephstore6336
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6336]
+ public addr = [2607:f298:4:2243::6336]
+
+[osd.180]
+ host = cephstore6336
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6336]
+ public addr = [2607:f298:4:2243::6336]
+
+[osd.181]
+ host = cephstore6336
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6336]
+ public addr = [2607:f298:4:2243::6336]
+
+[osd.182]
+ host = cephstore6336
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6336]
+ public addr = [2607:f298:4:2243::6336]
+
+[osd.183]
+ host = cephstore6337
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6337]
+ public addr = [2607:f298:4:2243::6337]
+
+[osd.184]
+ host = cephstore6337
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6337]
+ public addr = [2607:f298:4:2243::6337]
+
+[osd.185]
+ host = cephstore6337
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6337]
+ public addr = [2607:f298:4:2243::6337]
+
+[osd.186]
+ host = cephstore6337
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6337]
+ public addr = [2607:f298:4:2243::6337]
+
+[osd.187]
+ host = cephstore6337
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6337]
+ public addr = [2607:f298:4:2243::6337]
+
+[osd.188]
+ host = cephstore6337
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6337]
+ public addr = [2607:f298:4:2243::6337]
+
+[osd.189]
+ host = cephstore6337
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6337]
+ public addr = [2607:f298:4:2243::6337]
+
+[osd.190]
+ host = cephstore6338
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6338]
+ public addr = [2607:f298:4:2243::6338]
+
+[osd.191]
+ host = cephstore6338
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6338]
+ public addr = [2607:f298:4:2243::6338]
+
+[osd.192]
+ host = cephstore6338
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6338]
+ public addr = [2607:f298:4:2243::6338]
+
+[osd.193]
+ host = cephstore6338
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6338]
+ public addr = [2607:f298:4:2243::6338]
+
+[osd.194]
+ host = cephstore6338
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6338]
+ public addr = [2607:f298:4:2243::6338]
+
+[osd.195]
+ host = cephstore6338
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6338]
+ public addr = [2607:f298:4:2243::6338]
+
+[osd.196]
+ host = cephstore6338
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6338]
+ public addr = [2607:f298:4:2243::6338]
+
+[osd.197]
+ host = cephstore6339
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6339]
+ public addr = [2607:f298:4:2243::6339]
+
+[osd.198]
+ host = cephstore6339
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6339]
+ public addr = [2607:f298:4:2243::6339]
+
+[osd.199]
+ host = cephstore6339
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6339]
+ public addr = [2607:f298:4:2243::6339]
+
+[osd.200]
+ host = cephstore6339
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6339]
+ public addr = [2607:f298:4:2243::6339]
+
+[osd.201]
+ host = cephstore6339
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6339]
+ public addr = [2607:f298:4:2243::6339]
+
+[osd.202]
+ host = cephstore6339
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6339]
+ public addr = [2607:f298:4:2243::6339]
+
+[osd.203]
+ host = cephstore6339
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6339]
+ public addr = [2607:f298:4:2243::6339]
+
+[osd.204]
+ host = cephstore6340
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6340]
+ public addr = [2607:f298:4:2243::6340]
+
+[osd.205]
+ host = cephstore6340
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6340]
+ public addr = [2607:f298:4:2243::6340]
+
+[osd.206]
+ host = cephstore6340
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6340]
+ public addr = [2607:f298:4:2243::6340]
+
+[osd.207]
+ host = cephstore6340
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6340]
+ public addr = [2607:f298:4:2243::6340]
+
+[osd.208]
+ host = cephstore6340
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6340]
+ public addr = [2607:f298:4:2243::6340]
+
+[osd.209]
+ host = cephstore6340
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6340]
+ public addr = [2607:f298:4:2243::6340]
+
+[osd.210]
+ host = cephstore6340
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6340]
+ public addr = [2607:f298:4:2243::6340]
+
+[osd.211]
+ host = cephstore6341
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6341]
+ public addr = [2607:f298:4:2243::6341]
+
+[osd.212]
+ host = cephstore6341
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6341]
+ public addr = [2607:f298:4:2243::6341]
+
+[osd.213]
+ host = cephstore6341
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6341]
+ public addr = [2607:f298:4:2243::6341]
+
+[osd.214]
+ host = cephstore6341
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6341]
+ public addr = [2607:f298:4:2243::6341]
+
+[osd.215]
+ host = cephstore6341
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6341]
+ public addr = [2607:f298:4:2243::6341]
+
+[osd.216]
+ host = cephstore6341
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6341]
+ public addr = [2607:f298:4:2243::6341]
+
+[osd.217]
+ host = cephstore6341
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6341]
+ public addr = [2607:f298:4:2243::6341]
+
+[osd.218]
+ host = cephstore6342
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6342]
+ public addr = [2607:f298:4:2243::6342]
+
+[osd.219]
+ host = cephstore6342
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6342]
+ public addr = [2607:f298:4:2243::6342]
+
+[osd.220]
+ host = cephstore6342
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6342]
+ public addr = [2607:f298:4:2243::6342]
+
+[osd.221]
+ host = cephstore6342
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6342]
+ public addr = [2607:f298:4:2243::6342]
+
+[osd.222]
+ host = cephstore6342
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6342]
+ public addr = [2607:f298:4:2243::6342]
+
+[osd.223]
+ host = cephstore6342
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6342]
+ public addr = [2607:f298:4:2243::6342]
+
+[osd.224]
+ host = cephstore6342
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6342]
+ public addr = [2607:f298:4:2243::6342]
+
+[osd.225]
+ host = cephstore6343
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6343]
+ public addr = [2607:f298:4:2243::6343]
+
+[osd.226]
+ host = cephstore6343
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6343]
+ public addr = [2607:f298:4:2243::6343]
+
+[osd.227]
+ host = cephstore6343
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6343]
+ public addr = [2607:f298:4:2243::6343]
+
+[osd.228]
+ host = cephstore6343
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6343]
+ public addr = [2607:f298:4:2243::6343]
+
+[osd.229]
+ host = cephstore6343
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6343]
+ public addr = [2607:f298:4:2243::6343]
+
+[osd.230]
+ host = cephstore6343
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6343]
+ public addr = [2607:f298:4:2243::6343]
+
+[osd.231]
+ host = cephstore6343
+ rack = irv-n1
+ cluster addr = [2607:f298:4:3243::6343]
+ public addr = [2607:f298:4:2243::6343]
+
+[osd.232]
+ host = cephstore6345
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6345]
+ public addr = [2607:f298:4:2243::6345]
+
+[osd.233]
+ host = cephstore6345
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6345]
+ public addr = [2607:f298:4:2243::6345]
+
+[osd.234]
+ host = cephstore6345
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6345]
+ public addr = [2607:f298:4:2243::6345]
+
+[osd.235]
+ host = cephstore6345
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6345]
+ public addr = [2607:f298:4:2243::6345]
+
+[osd.236]
+ host = cephstore6345
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6345]
+ public addr = [2607:f298:4:2243::6345]
+
+[osd.237]
+ host = cephstore6345
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6345]
+ public addr = [2607:f298:4:2243::6345]
+
+[osd.238]
+ host = cephstore6345
+ rack = irv-n2
+ cluster addr = [2607:f298:4:3243::6345]
+ public addr = [2607:f298:4:2243::6345]
+
diff --git a/src/test/cli/osdmaptool/clobber.t b/src/test/cli/osdmaptool/clobber.t
new file mode 100644
index 000000000..146960693
--- /dev/null
+++ b/src/test/cli/osdmaptool/clobber.t
@@ -0,0 +1,65 @@
+ $ osdmaptool --createsimple 3 myosdmap --with-default-pool
+ osdmaptool: osdmap file 'myosdmap'
+ osdmaptool: writing epoch 1 to myosdmap
+
+ $ ORIG_FSID="$(osdmaptool --print myosdmap|grep ^fsid)"
+ osdmaptool: osdmap file 'myosdmap'
+
+ $ osdmaptool --createsimple 3 myosdmap --with-default-pool
+ osdmaptool: osdmap file 'myosdmap'
+ osdmaptool: myosdmap exists, --clobber to overwrite
+ [255]
+
+# hasn't changed yet
+#TODO typo
+ $ osdmaptool --print myosdmap
+ osdmaptool: osdmap file 'myosdmap'
+ epoch 1
+ fsid [0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12} (re)
+ created \d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d+.\d\d\d\d (re)
+ modified \d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d+.\d\d\d\d (re)
+ flags
+ crush_version 1
+ full_ratio 0
+ backfillfull_ratio 0
+ nearfull_ratio 0
+ min_compat_client jewel
+ stretch_mode_enabled false
+
+ pool 1 'rbd' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 192 pgp_num 192 autoscale_mode on last_change 0 flags hashpspool stripe_width 0 application rbd
+
+ max_osd 3
+
+
+ $ NEW_FSID="$(osdmaptool --print myosdmap|grep ^fsid)"
+ osdmaptool: osdmap file 'myosdmap'
+ $ [ "$ORIG_FSID" = "$NEW_FSID" ]
+
+ $ osdmaptool --createsimple 1 --clobber myosdmap --with-default-pool
+ osdmaptool: osdmap file 'myosdmap'
+ osdmaptool: writing epoch 1 to myosdmap
+
+ $ osdmaptool --print myosdmap
+ osdmaptool: osdmap file 'myosdmap'
+ epoch 1
+ fsid [0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12} (re)
+ created \d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d+.\d\d\d\d (re)
+ modified \d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d+.\d\d\d\d (re)
+ flags
+ crush_version 1
+ full_ratio 0
+ backfillfull_ratio 0
+ nearfull_ratio 0
+ min_compat_client jewel
+ stretch_mode_enabled false
+
+ pool 1 'rbd' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 64 pgp_num 64 autoscale_mode on last_change 0 flags hashpspool stripe_width 0 application rbd
+
+ max_osd 1
+
+
+ $ NEW_FSID="$(osdmaptool --print myosdmap|grep ^fsid)"
+ osdmaptool: osdmap file 'myosdmap'
+#TODO --clobber should probably set new fsid, remove the [1]
+ $ [ "$ORIG_FSID" != "$NEW_FSID" ]
+ [1]
diff --git a/src/test/cli/osdmaptool/create-print.t b/src/test/cli/osdmaptool/create-print.t
new file mode 100644
index 000000000..9d745b82f
--- /dev/null
+++ b/src/test/cli/osdmaptool/create-print.t
@@ -0,0 +1,97 @@
+ $ osdmaptool --createsimple 3 myosdmap --with-default-pool
+ osdmaptool: osdmap file 'myosdmap'
+ osdmaptool: writing epoch 1 to myosdmap
+
+ $ osdmaptool --export-crush oc myosdmap
+ osdmaptool: osdmap file 'myosdmap'
+ osdmaptool: exported crush map to oc
+ $ crushtool --decompile oc
+ # begin crush map
+ tunable choose_local_tries 0
+ tunable choose_local_fallback_tries 0
+ tunable choose_total_tries 50
+ tunable chooseleaf_descend_once 1
+ tunable chooseleaf_vary_r 1
+ tunable chooseleaf_stable 1
+ tunable straw_calc_version 1
+ tunable allowed_bucket_algs 54
+
+ # devices
+ device 0 osd.0
+ device 1 osd.1
+ device 2 osd.2
+
+ # types
+ type 0 osd
+ type 1 host
+ type 2 chassis
+ type 3 rack
+ type 4 row
+ type 5 pdu
+ type 6 pod
+ type 7 room
+ type 8 datacenter
+ type 9 zone
+ type 10 region
+ type 11 root
+
+ # buckets
+ host localhost {
+ \tid -2\t\t# do not change unnecessarily (esc)
+ \t# weight 3.00000 (esc)
+ \talg straw2 (esc)
+ \thash 0\t# rjenkins1 (esc)
+ \titem osd.0 weight 1.00000 (esc)
+ \titem osd.1 weight 1.00000 (esc)
+ \titem osd.2 weight 1.00000 (esc)
+ }
+ rack localrack {
+ \tid -3\t\t# do not change unnecessarily (esc)
+ \t# weight 3.00000 (esc)
+ \talg straw2 (esc)
+ \thash 0\t# rjenkins1 (esc)
+ \titem localhost weight 3.00000 (esc)
+ }
+ root default {
+ \tid -1\t\t# do not change unnecessarily (esc)
+ \t# weight 3.00000 (esc)
+ \talg straw2 (esc)
+ \thash 0\t# rjenkins1 (esc)
+ \titem localrack weight 3.00000 (esc)
+ }
+
+ # rules
+ rule replicated_rule {
+ \tid 0 (esc)
+ \ttype replicated (esc)
+ \tstep take default (esc)
+ \tstep chooseleaf firstn 0 type host (esc)
+ \tstep emit (esc)
+ }
+
+ # end crush map
+ $ osdmaptool --print myosdmap
+ osdmaptool: osdmap file 'myosdmap'
+ epoch 1
+ fsid [0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12} (re)
+ created \d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d+.\d\d\d\d (re)
+ modified \d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d+.\d\d\d\d (re)
+ flags
+ crush_version 1
+ full_ratio 0
+ backfillfull_ratio 0
+ nearfull_ratio 0
+ min_compat_client jewel
+ stretch_mode_enabled false
+
+ pool 1 'rbd' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 192 pgp_num 192 autoscale_mode on last_change 0 flags hashpspool stripe_width 0 application rbd
+
+ max_osd 3
+
+ $ osdmaptool --clobber --createsimple 3 --with-default-pool myosdmap
+ osdmaptool: osdmap file 'myosdmap'
+ osdmaptool: writing epoch 1 to myosdmap
+ $ osdmaptool --print myosdmap | grep 'pool 1'
+ osdmaptool: osdmap file 'myosdmap'
+ pool 1 'rbd' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 192 pgp_num 192 autoscale_mode on last_change 0 flags hashpspool stripe_width 0 application rbd
+ $ rm -f myosdmap
diff --git a/src/test/cli/osdmaptool/create-racks.t b/src/test/cli/osdmaptool/create-racks.t
new file mode 100644
index 000000000..d1e65d7b5
--- /dev/null
+++ b/src/test/cli/osdmaptool/create-racks.t
@@ -0,0 +1,810 @@
+ $ osdmaptool --create-from-conf om -c $TESTDIR/ceph.conf.withracks --with-default-pool
+ osdmaptool: osdmap file 'om'
+ osdmaptool: writing epoch 1 to om
+ $ osdmaptool --export-crush oc om
+ osdmaptool: osdmap file 'om'
+ osdmaptool: exported crush map to oc
+ $ crushtool --decompile oc
+ # begin crush map
+ tunable choose_local_tries 0
+ tunable choose_local_fallback_tries 0
+ tunable choose_total_tries 50
+ tunable chooseleaf_descend_once 1
+ tunable chooseleaf_vary_r 1
+ tunable chooseleaf_stable 1
+ tunable straw_calc_version 1
+ tunable allowed_bucket_algs 54
+
+ # devices
+ device 1 osd.1
+ device 2 osd.2
+ device 3 osd.3
+ device 4 osd.4
+ device 5 osd.5
+ device 6 osd.6
+ device 7 osd.7
+ device 8 osd.8
+ device 9 osd.9
+ device 10 osd.10
+ device 11 osd.11
+ device 12 osd.12
+ device 13 osd.13
+ device 14 osd.14
+ device 15 osd.15
+ device 16 osd.16
+ device 17 osd.17
+ device 18 osd.18
+ device 19 osd.19
+ device 20 osd.20
+ device 21 osd.21
+ device 22 osd.22
+ device 23 osd.23
+ device 24 osd.24
+ device 25 osd.25
+ device 26 osd.26
+ device 27 osd.27
+ device 28 osd.28
+ device 29 osd.29
+ device 30 osd.30
+ device 31 osd.31
+ device 32 osd.32
+ device 33 osd.33
+ device 34 osd.34
+ device 35 osd.35
+ device 36 osd.36
+ device 37 osd.37
+ device 38 osd.38
+ device 39 osd.39
+ device 40 osd.40
+ device 41 osd.41
+ device 42 osd.42
+ device 43 osd.43
+ device 44 osd.44
+ device 45 osd.45
+ device 46 osd.46
+ device 47 osd.47
+ device 48 osd.48
+ device 49 osd.49
+ device 50 osd.50
+ device 51 osd.51
+ device 52 osd.52
+ device 53 osd.53
+ device 54 osd.54
+ device 55 osd.55
+ device 56 osd.56
+ device 57 osd.57
+ device 58 osd.58
+ device 59 osd.59
+ device 60 osd.60
+ device 61 osd.61
+ device 62 osd.62
+ device 63 osd.63
+ device 64 osd.64
+ device 65 osd.65
+ device 66 osd.66
+ device 67 osd.67
+ device 68 osd.68
+ device 69 osd.69
+ device 70 osd.70
+ device 71 osd.71
+ device 72 osd.72
+ device 73 osd.73
+ device 74 osd.74
+ device 75 osd.75
+ device 76 osd.76
+ device 77 osd.77
+ device 78 osd.78
+ device 79 osd.79
+ device 80 osd.80
+ device 81 osd.81
+ device 82 osd.82
+ device 83 osd.83
+ device 84 osd.84
+ device 85 osd.85
+ device 86 osd.86
+ device 87 osd.87
+ device 88 osd.88
+ device 89 osd.89
+ device 90 osd.90
+ device 91 osd.91
+ device 92 osd.92
+ device 93 osd.93
+ device 94 osd.94
+ device 95 osd.95
+ device 96 osd.96
+ device 97 osd.97
+ device 98 osd.98
+ device 99 osd.99
+ device 100 osd.100
+ device 101 osd.101
+ device 102 osd.102
+ device 103 osd.103
+ device 104 osd.104
+ device 105 osd.105
+ device 106 osd.106
+ device 107 osd.107
+ device 108 osd.108
+ device 109 osd.109
+ device 110 osd.110
+ device 111 osd.111
+ device 112 osd.112
+ device 113 osd.113
+ device 114 osd.114
+ device 115 osd.115
+ device 116 osd.116
+ device 117 osd.117
+ device 118 osd.118
+ device 119 osd.119
+ device 120 osd.120
+ device 121 osd.121
+ device 122 osd.122
+ device 123 osd.123
+ device 124 osd.124
+ device 125 osd.125
+ device 126 osd.126
+ device 127 osd.127
+ device 128 osd.128
+ device 129 osd.129
+ device 130 osd.130
+ device 131 osd.131
+ device 132 osd.132
+ device 133 osd.133
+ device 134 osd.134
+ device 135 osd.135
+ device 136 osd.136
+ device 137 osd.137
+ device 138 osd.138
+ device 139 osd.139
+ device 140 osd.140
+ device 141 osd.141
+ device 142 osd.142
+ device 143 osd.143
+ device 144 osd.144
+ device 145 osd.145
+ device 146 osd.146
+ device 147 osd.147
+ device 148 osd.148
+ device 149 osd.149
+ device 150 osd.150
+ device 151 osd.151
+ device 152 osd.152
+ device 153 osd.153
+ device 154 osd.154
+ device 155 osd.155
+ device 156 osd.156
+ device 157 osd.157
+ device 158 osd.158
+ device 159 osd.159
+ device 160 osd.160
+ device 161 osd.161
+ device 162 osd.162
+ device 163 osd.163
+ device 164 osd.164
+ device 165 osd.165
+ device 166 osd.166
+ device 167 osd.167
+ device 168 osd.168
+ device 169 osd.169
+ device 170 osd.170
+ device 171 osd.171
+ device 172 osd.172
+ device 173 osd.173
+ device 174 osd.174
+ device 175 osd.175
+ device 176 osd.176
+ device 177 osd.177
+ device 178 osd.178
+ device 179 osd.179
+ device 180 osd.180
+ device 181 osd.181
+ device 182 osd.182
+ device 183 osd.183
+ device 184 osd.184
+ device 185 osd.185
+ device 186 osd.186
+ device 187 osd.187
+ device 188 osd.188
+ device 189 osd.189
+ device 190 osd.190
+ device 191 osd.191
+ device 192 osd.192
+ device 193 osd.193
+ device 194 osd.194
+ device 195 osd.195
+ device 196 osd.196
+ device 197 osd.197
+ device 198 osd.198
+ device 199 osd.199
+ device 200 osd.200
+ device 201 osd.201
+ device 202 osd.202
+ device 203 osd.203
+ device 204 osd.204
+ device 205 osd.205
+ device 206 osd.206
+ device 207 osd.207
+ device 208 osd.208
+ device 209 osd.209
+ device 210 osd.210
+ device 211 osd.211
+ device 212 osd.212
+ device 213 osd.213
+ device 214 osd.214
+ device 215 osd.215
+ device 216 osd.216
+ device 217 osd.217
+ device 218 osd.218
+ device 219 osd.219
+ device 220 osd.220
+ device 221 osd.221
+ device 222 osd.222
+ device 223 osd.223
+ device 224 osd.224
+ device 225 osd.225
+ device 226 osd.226
+ device 227 osd.227
+ device 228 osd.228
+ device 229 osd.229
+ device 230 osd.230
+ device 231 osd.231
+ device 232 osd.232
+ device 233 osd.233
+ device 234 osd.234
+ device 235 osd.235
+ device 236 osd.236
+ device 237 osd.237
+ device 238 osd.238
+
+ # types
+ type 0 osd
+ type 1 host
+ type 2 chassis
+ type 3 rack
+ type 4 row
+ type 5 pdu
+ type 6 pod
+ type 7 room
+ type 8 datacenter
+ type 9 zone
+ type 10 region
+ type 11 root
+
+ # buckets
+ host cephstore5522 {
+ \tid -2\t\t# do not change unnecessarily (esc)
+ \t# weight 7.00000 (esc)
+ \talg straw2 (esc)
+ \thash 0\t# rjenkins1 (esc)
+ \titem osd.1 weight 1.00000 (esc)
+ \titem osd.2 weight 1.00000 (esc)
+ \titem osd.3 weight 1.00000 (esc)
+ \titem osd.4 weight 1.00000 (esc)
+ \titem osd.5 weight 1.00000 (esc)
+ \titem osd.6 weight 1.00000 (esc)
+ \titem osd.7 weight 1.00000 (esc)
+ }
+ host cephstore5523 {
+ \tid -4\t\t# do not change unnecessarily (esc)
+ \t# weight 7.00000 (esc)
+ \talg straw2 (esc)
+ \thash 0\t# rjenkins1 (esc)
+ \titem osd.10 weight 1.00000 (esc)
+ \titem osd.11 weight 1.00000 (esc)
+ \titem osd.12 weight 1.00000 (esc)
+ \titem osd.13 weight 1.00000 (esc)
+ \titem osd.14 weight 1.00000 (esc)
+ \titem osd.8 weight 1.00000 (esc)
+ \titem osd.9 weight 1.00000 (esc)
+ }
+ host cephstore6238 {
+ \tid -8\t\t# do not change unnecessarily (esc)
+ \t# weight 7.00000 (esc)
+ \talg straw2 (esc)
+ \thash 0\t# rjenkins1 (esc)
+ \titem osd.113 weight 1.00000 (esc)
+ \titem osd.114 weight 1.00000 (esc)
+ \titem osd.115 weight 1.00000 (esc)
+ \titem osd.116 weight 1.00000 (esc)
+ \titem osd.117 weight 1.00000 (esc)
+ \titem osd.118 weight 1.00000 (esc)
+ \titem osd.119 weight 1.00000 (esc)
+ }
+ host cephstore6240 {
+ \tid -10\t\t# do not change unnecessarily (esc)
+ \t# weight 7.00000 (esc)
+ \talg straw2 (esc)
+ \thash 0\t# rjenkins1 (esc)
+ \titem osd.127 weight 1.00000 (esc)
+ \titem osd.128 weight 1.00000 (esc)
+ \titem osd.129 weight 1.00000 (esc)
+ \titem osd.130 weight 1.00000 (esc)
+ \titem osd.131 weight 1.00000 (esc)
+ \titem osd.132 weight 1.00000 (esc)
+ \titem osd.133 weight 1.00000 (esc)
+ }
+ host cephstore6242 {
+ \tid -12\t\t# do not change unnecessarily (esc)
+ \t# weight 7.00000 (esc)
+ \talg straw2 (esc)
+ \thash 0\t# rjenkins1 (esc)
+ \titem osd.141 weight 1.00000 (esc)
+ \titem osd.142 weight 1.00000 (esc)
+ \titem osd.143 weight 1.00000 (esc)
+ \titem osd.144 weight 1.00000 (esc)
+ \titem osd.145 weight 1.00000 (esc)
+ \titem osd.146 weight 1.00000 (esc)
+ \titem osd.147 weight 1.00000 (esc)
+ }
+ host cephstore5524 {
+ \tid -14\t\t# do not change unnecessarily (esc)
+ \t# weight 7.00000 (esc)
+ \talg straw2 (esc)
+ \thash 0\t# rjenkins1 (esc)
+ \titem osd.15 weight 1.00000 (esc)
+ \titem osd.16 weight 1.00000 (esc)
+ \titem osd.17 weight 1.00000 (esc)
+ \titem osd.18 weight 1.00000 (esc)
+ \titem osd.19 weight 1.00000 (esc)
+ \titem osd.20 weight 1.00000 (esc)
+ \titem osd.21 weight 1.00000 (esc)
+ }
+ host cephstore6244 {
+ \tid -15\t\t# do not change unnecessarily (esc)
+ \t# weight 7.00000 (esc)
+ \talg straw2 (esc)
+ \thash 0\t# rjenkins1 (esc)
+ \titem osd.155 weight 1.00000 (esc)
+ \titem osd.156 weight 1.00000 (esc)
+ \titem osd.157 weight 1.00000 (esc)
+ \titem osd.158 weight 1.00000 (esc)
+ \titem osd.159 weight 1.00000 (esc)
+ \titem osd.160 weight 1.00000 (esc)
+ \titem osd.161 weight 1.00000 (esc)
+ }
+ host cephstore6246 {
+ \tid -17\t\t# do not change unnecessarily (esc)
+ \t# weight 7.00000 (esc)
+ \talg straw2 (esc)
+ \thash 0\t# rjenkins1 (esc)
+ \titem osd.169 weight 1.00000 (esc)
+ \titem osd.170 weight 1.00000 (esc)
+ \titem osd.171 weight 1.00000 (esc)
+ \titem osd.172 weight 1.00000 (esc)
+ \titem osd.173 weight 1.00000 (esc)
+ \titem osd.174 weight 1.00000 (esc)
+ \titem osd.175 weight 1.00000 (esc)
+ }
+ host cephstore6337 {
+ \tid -19\t\t# do not change unnecessarily (esc)
+ \t# weight 7.00000 (esc)
+ \talg straw2 (esc)
+ \thash 0\t# rjenkins1 (esc)
+ \titem osd.183 weight 1.00000 (esc)
+ \titem osd.184 weight 1.00000 (esc)
+ \titem osd.185 weight 1.00000 (esc)
+ \titem osd.186 weight 1.00000 (esc)
+ \titem osd.187 weight 1.00000 (esc)
+ \titem osd.188 weight 1.00000 (esc)
+ \titem osd.189 weight 1.00000 (esc)
+ }
+ host cephstore6341 {
+ \tid -23\t\t# do not change unnecessarily (esc)
+ \t# weight 7.00000 (esc)
+ \talg straw2 (esc)
+ \thash 0\t# rjenkins1 (esc)
+ \titem osd.211 weight 1.00000 (esc)
+ \titem osd.212 weight 1.00000 (esc)
+ \titem osd.213 weight 1.00000 (esc)
+ \titem osd.214 weight 1.00000 (esc)
+ \titem osd.215 weight 1.00000 (esc)
+ \titem osd.216 weight 1.00000 (esc)
+ \titem osd.217 weight 1.00000 (esc)
+ }
+ host cephstore6342 {
+ \tid -24\t\t# do not change unnecessarily (esc)
+ \t# weight 7.00000 (esc)
+ \talg straw2 (esc)
+ \thash 0\t# rjenkins1 (esc)
+ \titem osd.218 weight 1.00000 (esc)
+ \titem osd.219 weight 1.00000 (esc)
+ \titem osd.220 weight 1.00000 (esc)
+ \titem osd.221 weight 1.00000 (esc)
+ \titem osd.222 weight 1.00000 (esc)
+ \titem osd.223 weight 1.00000 (esc)
+ \titem osd.224 weight 1.00000 (esc)
+ }
+ host cephstore5525 {
+ \tid -25\t\t# do not change unnecessarily (esc)
+ \t# weight 7.00000 (esc)
+ \talg straw2 (esc)
+ \thash 0\t# rjenkins1 (esc)
+ \titem osd.22 weight 1.00000 (esc)
+ \titem osd.23 weight 1.00000 (esc)
+ \titem osd.24 weight 1.00000 (esc)
+ \titem osd.25 weight 1.00000 (esc)
+ \titem osd.26 weight 1.00000 (esc)
+ \titem osd.27 weight 1.00000 (esc)
+ \titem osd.28 weight 1.00000 (esc)
+ }
+ host cephstore6345 {
+ \tid -27\t\t# do not change unnecessarily (esc)
+ \t# weight 7.00000 (esc)
+ \talg straw2 (esc)
+ \thash 0\t# rjenkins1 (esc)
+ \titem osd.232 weight 1.00000 (esc)
+ \titem osd.233 weight 1.00000 (esc)
+ \titem osd.234 weight 1.00000 (esc)
+ \titem osd.235 weight 1.00000 (esc)
+ \titem osd.236 weight 1.00000 (esc)
+ \titem osd.237 weight 1.00000 (esc)
+ \titem osd.238 weight 1.00000 (esc)
+ }
+ host cephstore5526 {
+ \tid -28\t\t# do not change unnecessarily (esc)
+ \t# weight 7.00000 (esc)
+ \talg straw2 (esc)
+ \thash 0\t# rjenkins1 (esc)
+ \titem osd.29 weight 1.00000 (esc)
+ \titem osd.30 weight 1.00000 (esc)
+ \titem osd.31 weight 1.00000 (esc)
+ \titem osd.32 weight 1.00000 (esc)
+ \titem osd.33 weight 1.00000 (esc)
+ \titem osd.34 weight 1.00000 (esc)
+ \titem osd.35 weight 1.00000 (esc)
+ }
+ host cephstore5527 {
+ \tid -29\t\t# do not change unnecessarily (esc)
+ \t# weight 7.00000 (esc)
+ \talg straw2 (esc)
+ \thash 0\t# rjenkins1 (esc)
+ \titem osd.36 weight 1.00000 (esc)
+ \titem osd.37 weight 1.00000 (esc)
+ \titem osd.38 weight 1.00000 (esc)
+ \titem osd.39 weight 1.00000 (esc)
+ \titem osd.40 weight 1.00000 (esc)
+ \titem osd.41 weight 1.00000 (esc)
+ \titem osd.42 weight 1.00000 (esc)
+ }
+ host cephstore5529 {
+ \tid -30\t\t# do not change unnecessarily (esc)
+ \t# weight 7.00000 (esc)
+ \talg straw2 (esc)
+ \thash 0\t# rjenkins1 (esc)
+ \titem osd.43 weight 1.00000 (esc)
+ \titem osd.44 weight 1.00000 (esc)
+ \titem osd.45 weight 1.00000 (esc)
+ \titem osd.46 weight 1.00000 (esc)
+ \titem osd.47 weight 1.00000 (esc)
+ \titem osd.48 weight 1.00000 (esc)
+ \titem osd.49 weight 1.00000 (esc)
+ }
+ host cephstore5530 {
+ \tid -31\t\t# do not change unnecessarily (esc)
+ \t# weight 7.00000 (esc)
+ \talg straw2 (esc)
+ \thash 0\t# rjenkins1 (esc)
+ \titem osd.50 weight 1.00000 (esc)
+ \titem osd.51 weight 1.00000 (esc)
+ \titem osd.52 weight 1.00000 (esc)
+ \titem osd.53 weight 1.00000 (esc)
+ \titem osd.54 weight 1.00000 (esc)
+ \titem osd.55 weight 1.00000 (esc)
+ \titem osd.56 weight 1.00000 (esc)
+ }
+ rack irv-n2 {
+ \tid -3\t\t# do not change unnecessarily (esc)
+ \t# weight 119.00000 (esc)
+ \talg straw2 (esc)
+ \thash 0\t# rjenkins1 (esc)
+ \titem cephstore5522 weight 7.00000 (esc)
+ \titem cephstore5523 weight 7.00000 (esc)
+ \titem cephstore6238 weight 7.00000 (esc)
+ \titem cephstore6240 weight 7.00000 (esc)
+ \titem cephstore6242 weight 7.00000 (esc)
+ \titem cephstore5524 weight 7.00000 (esc)
+ \titem cephstore6244 weight 7.00000 (esc)
+ \titem cephstore6246 weight 7.00000 (esc)
+ \titem cephstore6337 weight 7.00000 (esc)
+ \titem cephstore6341 weight 7.00000 (esc)
+ \titem cephstore6342 weight 7.00000 (esc)
+ \titem cephstore5525 weight 7.00000 (esc)
+ \titem cephstore6345 weight 7.00000 (esc)
+ \titem cephstore5526 weight 7.00000 (esc)
+ \titem cephstore5527 weight 7.00000 (esc)
+ \titem cephstore5529 weight 7.00000 (esc)
+ \titem cephstore5530 weight 7.00000 (esc)
+ }
+ host cephstore6236 {
+ \tid -5\t\t# do not change unnecessarily (esc)
+ \t# weight 7.00000 (esc)
+ \talg straw2 (esc)
+ \thash 0\t# rjenkins1 (esc)
+ \titem osd.100 weight 1.00000 (esc)
+ \titem osd.101 weight 1.00000 (esc)
+ \titem osd.102 weight 1.00000 (esc)
+ \titem osd.103 weight 1.00000 (esc)
+ \titem osd.104 weight 1.00000 (esc)
+ \titem osd.105 weight 1.00000 (esc)
+ \titem osd.99 weight 1.00000 (esc)
+ }
+ host cephstore6237 {
+ \tid -7\t\t# do not change unnecessarily (esc)
+ \t# weight 7.00000 (esc)
+ \talg straw2 (esc)
+ \thash 0\t# rjenkins1 (esc)
+ \titem osd.106 weight 1.00000 (esc)
+ \titem osd.107 weight 1.00000 (esc)
+ \titem osd.108 weight 1.00000 (esc)
+ \titem osd.109 weight 1.00000 (esc)
+ \titem osd.110 weight 1.00000 (esc)
+ \titem osd.111 weight 1.00000 (esc)
+ \titem osd.112 weight 1.00000 (esc)
+ }
+ host cephstore6239 {
+ \tid -9\t\t# do not change unnecessarily (esc)
+ \t# weight 7.00000 (esc)
+ \talg straw2 (esc)
+ \thash 0\t# rjenkins1 (esc)
+ \titem osd.120 weight 1.00000 (esc)
+ \titem osd.121 weight 1.00000 (esc)
+ \titem osd.122 weight 1.00000 (esc)
+ \titem osd.123 weight 1.00000 (esc)
+ \titem osd.124 weight 1.00000 (esc)
+ \titem osd.125 weight 1.00000 (esc)
+ \titem osd.126 weight 1.00000 (esc)
+ }
+ host cephstore6241 {
+ \tid -11\t\t# do not change unnecessarily (esc)
+ \t# weight 7.00000 (esc)
+ \talg straw2 (esc)
+ \thash 0\t# rjenkins1 (esc)
+ \titem osd.134 weight 1.00000 (esc)
+ \titem osd.135 weight 1.00000 (esc)
+ \titem osd.136 weight 1.00000 (esc)
+ \titem osd.137 weight 1.00000 (esc)
+ \titem osd.138 weight 1.00000 (esc)
+ \titem osd.139 weight 1.00000 (esc)
+ \titem osd.140 weight 1.00000 (esc)
+ }
+ host cephstore6243 {
+ \tid -13\t\t# do not change unnecessarily (esc)
+ \t# weight 7.00000 (esc)
+ \talg straw2 (esc)
+ \thash 0\t# rjenkins1 (esc)
+ \titem osd.148 weight 1.00000 (esc)
+ \titem osd.149 weight 1.00000 (esc)
+ \titem osd.150 weight 1.00000 (esc)
+ \titem osd.151 weight 1.00000 (esc)
+ \titem osd.152 weight 1.00000 (esc)
+ \titem osd.153 weight 1.00000 (esc)
+ \titem osd.154 weight 1.00000 (esc)
+ }
+ host cephstore6245 {
+ \tid -16\t\t# do not change unnecessarily (esc)
+ \t# weight 7.00000 (esc)
+ \talg straw2 (esc)
+ \thash 0\t# rjenkins1 (esc)
+ \titem osd.162 weight 1.00000 (esc)
+ \titem osd.163 weight 1.00000 (esc)
+ \titem osd.164 weight 1.00000 (esc)
+ \titem osd.165 weight 1.00000 (esc)
+ \titem osd.166 weight 1.00000 (esc)
+ \titem osd.167 weight 1.00000 (esc)
+ \titem osd.168 weight 1.00000 (esc)
+ }
+ host cephstore6336 {
+ \tid -18\t\t# do not change unnecessarily (esc)
+ \t# weight 7.00000 (esc)
+ \talg straw2 (esc)
+ \thash 0\t# rjenkins1 (esc)
+ \titem osd.176 weight 1.00000 (esc)
+ \titem osd.177 weight 1.00000 (esc)
+ \titem osd.178 weight 1.00000 (esc)
+ \titem osd.179 weight 1.00000 (esc)
+ \titem osd.180 weight 1.00000 (esc)
+ \titem osd.181 weight 1.00000 (esc)
+ \titem osd.182 weight 1.00000 (esc)
+ }
+ host cephstore6338 {
+ \tid -20\t\t# do not change unnecessarily (esc)
+ \t# weight 7.00000 (esc)
+ \talg straw2 (esc)
+ \thash 0\t# rjenkins1 (esc)
+ \titem osd.190 weight 1.00000 (esc)
+ \titem osd.191 weight 1.00000 (esc)
+ \titem osd.192 weight 1.00000 (esc)
+ \titem osd.193 weight 1.00000 (esc)
+ \titem osd.194 weight 1.00000 (esc)
+ \titem osd.195 weight 1.00000 (esc)
+ \titem osd.196 weight 1.00000 (esc)
+ }
+ host cephstore6339 {
+ \tid -21\t\t# do not change unnecessarily (esc)
+ \t# weight 7.00000 (esc)
+ \talg straw2 (esc)
+ \thash 0\t# rjenkins1 (esc)
+ \titem osd.197 weight 1.00000 (esc)
+ \titem osd.198 weight 1.00000 (esc)
+ \titem osd.199 weight 1.00000 (esc)
+ \titem osd.200 weight 1.00000 (esc)
+ \titem osd.201 weight 1.00000 (esc)
+ \titem osd.202 weight 1.00000 (esc)
+ \titem osd.203 weight 1.00000 (esc)
+ }
+ host cephstore6340 {
+ \tid -22\t\t# do not change unnecessarily (esc)
+ \t# weight 7.00000 (esc)
+ \talg straw2 (esc)
+ \thash 0\t# rjenkins1 (esc)
+ \titem osd.204 weight 1.00000 (esc)
+ \titem osd.205 weight 1.00000 (esc)
+ \titem osd.206 weight 1.00000 (esc)
+ \titem osd.207 weight 1.00000 (esc)
+ \titem osd.208 weight 1.00000 (esc)
+ \titem osd.209 weight 1.00000 (esc)
+ \titem osd.210 weight 1.00000 (esc)
+ }
+ host cephstore6343 {
+ \tid -26\t\t# do not change unnecessarily (esc)
+ \t# weight 7.00000 (esc)
+ \talg straw2 (esc)
+ \thash 0\t# rjenkins1 (esc)
+ \titem osd.225 weight 1.00000 (esc)
+ \titem osd.226 weight 1.00000 (esc)
+ \titem osd.227 weight 1.00000 (esc)
+ \titem osd.228 weight 1.00000 (esc)
+ \titem osd.229 weight 1.00000 (esc)
+ \titem osd.230 weight 1.00000 (esc)
+ \titem osd.231 weight 1.00000 (esc)
+ }
+ host cephstore6230 {
+ \tid -32\t\t# do not change unnecessarily (esc)
+ \t# weight 7.00000 (esc)
+ \talg straw2 (esc)
+ \thash 0\t# rjenkins1 (esc)
+ \titem osd.57 weight 1.00000 (esc)
+ \titem osd.58 weight 1.00000 (esc)
+ \titem osd.59 weight 1.00000 (esc)
+ \titem osd.60 weight 1.00000 (esc)
+ \titem osd.61 weight 1.00000 (esc)
+ \titem osd.62 weight 1.00000 (esc)
+ \titem osd.63 weight 1.00000 (esc)
+ }
+ host cephstore6231 {
+ \tid -33\t\t# do not change unnecessarily (esc)
+ \t# weight 7.00000 (esc)
+ \talg straw2 (esc)
+ \thash 0\t# rjenkins1 (esc)
+ \titem osd.64 weight 1.00000 (esc)
+ \titem osd.65 weight 1.00000 (esc)
+ \titem osd.66 weight 1.00000 (esc)
+ \titem osd.67 weight 1.00000 (esc)
+ \titem osd.68 weight 1.00000 (esc)
+ \titem osd.69 weight 1.00000 (esc)
+ \titem osd.70 weight 1.00000 (esc)
+ }
+ host cephstore6232 {
+ \tid -34\t\t# do not change unnecessarily (esc)
+ \t# weight 7.00000 (esc)
+ \talg straw2 (esc)
+ \thash 0\t# rjenkins1 (esc)
+ \titem osd.71 weight 1.00000 (esc)
+ \titem osd.72 weight 1.00000 (esc)
+ \titem osd.73 weight 1.00000 (esc)
+ \titem osd.74 weight 1.00000 (esc)
+ \titem osd.75 weight 1.00000 (esc)
+ \titem osd.76 weight 1.00000 (esc)
+ \titem osd.77 weight 1.00000 (esc)
+ }
+ host cephstore6233 {
+ \tid -35\t\t# do not change unnecessarily (esc)
+ \t# weight 7.00000 (esc)
+ \talg straw2 (esc)
+ \thash 0\t# rjenkins1 (esc)
+ \titem osd.78 weight 1.00000 (esc)
+ \titem osd.79 weight 1.00000 (esc)
+ \titem osd.80 weight 1.00000 (esc)
+ \titem osd.81 weight 1.00000 (esc)
+ \titem osd.82 weight 1.00000 (esc)
+ \titem osd.83 weight 1.00000 (esc)
+ \titem osd.84 weight 1.00000 (esc)
+ }
+ host cephstore6234 {
+ \tid -36\t\t# do not change unnecessarily (esc)
+ \t# weight 7.00000 (esc)
+ \talg straw2 (esc)
+ \thash 0\t# rjenkins1 (esc)
+ \titem osd.85 weight 1.00000 (esc)
+ \titem osd.86 weight 1.00000 (esc)
+ \titem osd.87 weight 1.00000 (esc)
+ \titem osd.88 weight 1.00000 (esc)
+ \titem osd.89 weight 1.00000 (esc)
+ \titem osd.90 weight 1.00000 (esc)
+ \titem osd.91 weight 1.00000 (esc)
+ }
+ host cephstore6235 {
+ \tid -37\t\t# do not change unnecessarily (esc)
+ \t# weight 7.00000 (esc)
+ \talg straw2 (esc)
+ \thash 0\t# rjenkins1 (esc)
+ \titem osd.92 weight 1.00000 (esc)
+ \titem osd.93 weight 1.00000 (esc)
+ \titem osd.94 weight 1.00000 (esc)
+ \titem osd.95 weight 1.00000 (esc)
+ \titem osd.96 weight 1.00000 (esc)
+ \titem osd.97 weight 1.00000 (esc)
+ \titem osd.98 weight 1.00000 (esc)
+ }
+ rack irv-n1 {
+ \tid -6\t\t# do not change unnecessarily (esc)
+ \t# weight 119.00000 (esc)
+ \talg straw2 (esc)
+ \thash 0\t# rjenkins1 (esc)
+ \titem cephstore6236 weight 7.00000 (esc)
+ \titem cephstore6237 weight 7.00000 (esc)
+ \titem cephstore6239 weight 7.00000 (esc)
+ \titem cephstore6241 weight 7.00000 (esc)
+ \titem cephstore6243 weight 7.00000 (esc)
+ \titem cephstore6245 weight 7.00000 (esc)
+ \titem cephstore6336 weight 7.00000 (esc)
+ \titem cephstore6338 weight 7.00000 (esc)
+ \titem cephstore6339 weight 7.00000 (esc)
+ \titem cephstore6340 weight 7.00000 (esc)
+ \titem cephstore6343 weight 7.00000 (esc)
+ \titem cephstore6230 weight 7.00000 (esc)
+ \titem cephstore6231 weight 7.00000 (esc)
+ \titem cephstore6232 weight 7.00000 (esc)
+ \titem cephstore6233 weight 7.00000 (esc)
+ \titem cephstore6234 weight 7.00000 (esc)
+ \titem cephstore6235 weight 7.00000 (esc)
+ }
+ root default {
+ \tid -1\t\t# do not change unnecessarily (esc)
+ \t# weight 238.00000 (esc)
+ \talg straw2 (esc)
+ \thash 0\t# rjenkins1 (esc)
+ \titem irv-n2 weight 119.00000 (esc)
+ \titem irv-n1 weight 119.00000 (esc)
+ }
+
+ # rules
+ rule replicated_rule {
+ \tid 0 (esc)
+ \ttype replicated (esc)
+ \tstep take default (esc)
+ \tstep chooseleaf firstn 0 type host (esc)
+ \tstep emit (esc)
+ }
+
+ # end crush map
+ $ rm oc
+ $ osdmaptool --test-map-pg 0.0 om
+ osdmaptool: osdmap file 'om'
+ parsed '0.0' -> 0.0
+ 0.0 raw ([], p-1) up ([], p-1) acting ([], p-1)
+ $ osdmaptool --print om
+ osdmaptool: osdmap file 'om'
+ epoch 1
+ fsid [0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12} (re)
+ created \d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d+.\d\d\d\d (re)
+ modified \d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d+.\d\d\d\d (re)
+ flags
+ crush_version 1
+ full_ratio 0
+ backfillfull_ratio 0
+ nearfull_ratio 0
+ min_compat_client jewel
+ stretch_mode_enabled false
+
+ pool 1 'rbd' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 15296 pgp_num 15296 autoscale_mode on last_change 0 flags hashpspool stripe_width 0 application rbd
+
+ max_osd 239
+
+
+ $ osdmaptool --clobber --create-from-conf --with-default-pool om -c $TESTDIR/ceph.conf.withracks
+ osdmaptool: osdmap file 'om'
+ osdmaptool: writing epoch 1 to om
+ $ osdmaptool --print om | grep 'pool 1'
+ osdmaptool: osdmap file 'om'
+ pool 1 'rbd' replicated size 3 min_size 2 crush_rule 0 object_hash rjenkins pg_num 15296 pgp_num 15296 autoscale_mode on last_change 0 flags hashpspool stripe_width 0 application rbd
+ $ rm -f om
diff --git a/src/test/cli/osdmaptool/crush.t b/src/test/cli/osdmaptool/crush.t
new file mode 100644
index 000000000..520f11e50
--- /dev/null
+++ b/src/test/cli/osdmaptool/crush.t
@@ -0,0 +1,17 @@
+ $ osdmaptool --createsimple 3 myosdmap --with-default-pool
+ osdmaptool: osdmap file 'myosdmap'
+ osdmaptool: writing epoch 1 to myosdmap
+ $ osdmaptool --export-crush oc myosdmap
+ osdmaptool: osdmap file 'myosdmap'
+ osdmaptool: exported crush map to oc
+ $ osdmaptool --import-crush oc myosdmap
+ osdmaptool: osdmap file 'myosdmap'
+ osdmaptool: imported 497 byte crush map from oc
+ osdmaptool: writing epoch 3 to myosdmap
+ $ osdmaptool --adjust-crush-weight 0:5 myosdmap
+ osdmaptool: osdmap file 'myosdmap'
+ Adjusted osd.0 CRUSH weight to 5
+ $ osdmaptool --adjust-crush-weight 0:5 myosdmap --save
+ osdmaptool: osdmap file 'myosdmap'
+ Adjusted osd.0 CRUSH weight to 5
+ osdmaptool: writing epoch 5 to myosdmap \ No newline at end of file
diff --git a/src/test/cli/osdmaptool/help.t b/src/test/cli/osdmaptool/help.t
new file mode 100644
index 000000000..624fe9102
--- /dev/null
+++ b/src/test/cli/osdmaptool/help.t
@@ -0,0 +1,42 @@
+# TODO be user-friendly
+ $ osdmaptool --help
+ usage: [--print] <mapfilename>
+ --create-from-conf creates an osd map with default configurations
+ --createsimple <numosd> [--clobber] [--pg-bits <bitsperosd>] [--pgp-bits <bits>] creates a relatively generic OSD map with <numosd> devices
+ --pgp-bits <bits> pgp_num map attribute will be shifted by <bits>
+ --pg-bits <bits> pg_num map attribute will be shifted by <bits>
+ --clobber allows osdmaptool to overwrite <mapfilename> if it already exists
+ --export-crush <file> write osdmap's crush map to <file>
+ --import-crush <file> replace osdmap's crush map with <file>
+ --health dump health checks
+ --test-map-pgs [--pool <poolid>] [--pg_num <pg_num>] [--range-first <first> --range-last <last>] map all pgs
+ --test-map-pgs-dump [--pool <poolid>] [--range-first <first> --range-last <last>] map all pgs
+ --test-map-pgs-dump-all [--pool <poolid>] [--range-first <first> --range-last <last>] map all pgs to osds
+ --mark-up-in mark osds up and in (but do not persist)
+ --mark-out <osdid> mark an osd as out (but do not persist)
+ --mark-up <osdid> mark an osd as up (but do not persist)
+ --mark-in <osdid> mark an osd as in (but do not persist)
+ --with-default-pool include default pool when creating map
+ --clear-temp clear pg_temp and primary_temp
+ --clean-temps clean pg_temps
+ --test-random do random placements
+ --test-map-pg <pgid> map a pgid to osds
+ --test-map-object <objectname> [--pool <poolid>] map an object to osds
+ --upmap-cleanup <file> clean up pg_upmap[_items] entries, writing
+ commands to <file> [default: - for stdout]
+ --upmap <file> calculate pg upmap entries to balance pg layout
+ writing commands to <file> [default: - for stdout]
+ --upmap-max <max-count> set max upmap entries to calculate [default: 10]
+ --upmap-deviation <max-deviation>
+ max deviation from target [default: 5]
+ --upmap-pool <poolname> restrict upmap balancing to 1 or more pools
+ --upmap-active Act like an active balancer, keep applying changes until balanced
+ --dump <format> displays the map in plain text when <format> is 'plain', 'json' if specified format is not supported
+ --tree displays a tree of the map
+ --test-crush [--range-first <first> --range-last <last>] map pgs to acting osds
+ --adjust-crush-weight <osdid:weight>[,<osdid:weight>,<...>] change <osdid> CRUSH <weight> (but do not persist)
+ --save write modified osdmap with upmap or crush-adjust changes
+ --read <file> calculate pg upmap entries to balance pg primaries
+ --read-pool <poolname> specify which pool the read balancer should adjust
+ --vstart prefix upmap and read output with './bin/'
+ [1]
diff --git a/src/test/cli/osdmaptool/missing-argument.t b/src/test/cli/osdmaptool/missing-argument.t
new file mode 100644
index 000000000..de9b80073
--- /dev/null
+++ b/src/test/cli/osdmaptool/missing-argument.t
@@ -0,0 +1,3 @@
+ $ osdmaptool
+ osdmaptool: -h or --help for usage
+ [1]
diff --git a/src/test/cli/osdmaptool/pool.t b/src/test/cli/osdmaptool/pool.t
new file mode 100644
index 000000000..4a967843f
--- /dev/null
+++ b/src/test/cli/osdmaptool/pool.t
@@ -0,0 +1,54 @@
+ $ osdmaptool --createsimple 3 myosdmap --with-default-pool
+ osdmaptool: osdmap file 'myosdmap'
+ osdmaptool: writing epoch 1 to myosdmap
+
+#
+# --test-map-object / --pool
+#
+ $ osdmaptool myosdmap --test-map-object foo --pool
+ Option --pool requires an argument.
+
+ [1]
+
+ $ osdmaptool myosdmap --test-map-object foo --pool bar
+ The option value 'bar' is invalid
+ [1]
+
+ $ osdmaptool myosdmap --test-map-object foo --pool 123
+ osdmaptool: osdmap file 'myosdmap'
+ There is no pool 123
+ [1]
+
+ $ osdmaptool myosdmap --test-map-object foo --pool 1
+ osdmaptool: osdmap file 'myosdmap'
+ object 'foo' \-\> 1\..* (re)
+
+ $ osdmaptool myosdmap --test-map-object foo
+ osdmaptool: osdmap file 'myosdmap'
+ osdmaptool: assuming pool 1 (use --pool to override)
+ object 'foo' \-\> 1\..* (re)
+
+#
+# --test-map-pgs / --pool
+#
+ $ osdmaptool myosdmap --test-map-pgs --pool
+ Option --pool requires an argument.
+
+ [1]
+
+ $ osdmaptool myosdmap --test-map-pgs --pool baz
+ The option value 'baz' is invalid
+ [1]
+
+ $ osdmaptool myosdmap --test-map-pgs --pool 123
+ osdmaptool: osdmap file 'myosdmap'
+ There is no pool 123
+ [1]
+
+ $ osdmaptool myosdmap --mark-up-in --test-map-pgs --pool 1 | grep pool
+ osdmaptool: osdmap file 'myosdmap'
+ pool 1 pg_num .* (re)
+
+ $ osdmaptool myosdmap --mark-up-in --test-map-pgs | grep pool
+ osdmaptool: osdmap file 'myosdmap'
+ pool 1 pg_num .* (re)
diff --git a/src/test/cli/osdmaptool/print-empty.t b/src/test/cli/osdmaptool/print-empty.t
new file mode 100644
index 000000000..a629f7717
--- /dev/null
+++ b/src/test/cli/osdmaptool/print-empty.t
@@ -0,0 +1,5 @@
+ $ touch empty
+ $ osdmaptool --print empty
+ osdmaptool: osdmap file 'empty'
+ osdmaptool: error decoding osdmap 'empty'
+ [255]
diff --git a/src/test/cli/osdmaptool/print-nonexistent.t b/src/test/cli/osdmaptool/print-nonexistent.t
new file mode 100644
index 000000000..88f7e6182
--- /dev/null
+++ b/src/test/cli/osdmaptool/print-nonexistent.t
@@ -0,0 +1,4 @@
+ $ osdmaptool --print nonexistent
+ osdmaptool: osdmap file 'nonexistent'
+ osdmaptool: couldn't open nonexistent: can't open nonexistent: (2) No such file or directory
+ [255]
diff --git a/src/test/cli/osdmaptool/test-map-pgs.t b/src/test/cli/osdmaptool/test-map-pgs.t
new file mode 100644
index 000000000..f9f7897b2
--- /dev/null
+++ b/src/test/cli/osdmaptool/test-map-pgs.t
@@ -0,0 +1,43 @@
+ $ NUM_OSDS=500
+ $ POOL_COUNT=1 # data + metadata + rbd
+ $ SIZE=3
+ $ PG_BITS=4
+#
+# create an osdmap with a few hundred devices and a realistic crushmap
+#
+ $ OSD_MAP="osdmap"
+ $ osdmaptool --osd_pool_default_size $SIZE --pg_bits $PG_BITS --createsimple $NUM_OSDS "$OSD_MAP" > /dev/null --with-default-pool
+ osdmaptool: osdmap file 'osdmap'
+ $ CRUSH_MAP="crushmap"
+ $ CEPH_ARGS="--debug-crush 0" crushtool --outfn "$CRUSH_MAP" --build --num_osds $NUM_OSDS node straw 10 rack straw 10 root straw 0
+ $ osdmaptool --import-crush "$CRUSH_MAP" "$OSD_MAP" > /dev/null
+ osdmaptool: osdmap file 'osdmap'
+ $ OUT="$TESTDIR/out"
+#
+# --test-map-pgs
+#
+ $ osdmaptool --mark-up-in --test-map-pgs "$OSD_MAP" > "$OUT"
+ osdmaptool: osdmap file 'osdmap'
+ $ PG_NUM=$(($NUM_OSDS << $PG_BITS))
+ $ grep "pg_num $PG_NUM" "$OUT" || cat $OUT
+ pool 1 pg_num 8000
+ $ TOTAL=$((POOL_COUNT * $PG_NUM))
+ $ grep -E "size $SIZE[[:space:]]$TOTAL" $OUT || cat $OUT
+ size 3\t8000 (esc)
+ $ STATS_CRUSH=$(grep '^ avg ' "$OUT")
+#
+# --test-map-pgs --test-random is expected to change nothing regarding the totals
+#
+ $ osdmaptool --mark-up-in --test-random --test-map-pgs "$OSD_MAP" > "$OUT"
+ osdmaptool: osdmap file 'osdmap'
+ $ PG_NUM=$(($NUM_OSDS << $PG_BITS))
+ $ grep "pg_num $PG_NUM" "$OUT" || cat $OUT
+ pool 1 pg_num 8000
+ $ TOTAL=$((POOL_COUNT * $PG_NUM))
+ $ grep -E "size $SIZE[[:space:]]$TOTAL" $OUT || cat $OUT
+ size 3\t8000 (esc)
+ $ STATS_RANDOM=$(grep '^ avg ' "$OUT")
+#
+# cleanup
+#
+ $ rm -f "$CRUSH_MAP" "$OSD_MAP" "$OUT"
diff --git a/src/test/cli/osdmaptool/tree.t b/src/test/cli/osdmaptool/tree.t
new file mode 100644
index 000000000..387f564b0
--- /dev/null
+++ b/src/test/cli/osdmaptool/tree.t
@@ -0,0 +1,95 @@
+ $ osdmaptool --createsimple 3 om --with-default-pool
+ osdmaptool: osdmap file 'om'
+ osdmaptool: writing epoch 1 to om
+
+ $ osdmaptool --tree=plain om
+ osdmaptool: osdmap file 'om'
+ ID CLASS WEIGHT TYPE NAME STATUS REWEIGHT PRI-AFF
+ -1 3.00000 root default
+ -3 3.00000 rack localrack
+ -2 3.00000 host localhost
+ 0 1.00000 osd.0 DNE 0
+ 1 1.00000 osd.1 DNE 0
+ 2 1.00000 osd.2 DNE 0
+
+ $ osdmaptool --tree=json-pretty om
+ osdmaptool: osdmap file 'om'
+ {
+ "nodes": [
+ {
+ "id": -1,
+ "name": "default",
+ "type": "root",
+ "type_id": 11,
+ "children": [
+ -3
+ ]
+ },
+ {
+ "id": -3,
+ "name": "localrack",
+ "type": "rack",
+ "type_id": 3,
+ "pool_weights": {},
+ "children": [
+ -2
+ ]
+ },
+ {
+ "id": -2,
+ "name": "localhost",
+ "type": "host",
+ "type_id": 1,
+ "pool_weights": {},
+ "children": [
+ 2,
+ 1,
+ 0
+ ]
+ },
+ {
+ "id": 0,
+ "name": "osd.0",
+ "type": "osd",
+ "type_id": 0,
+ "crush_weight": 1,
+ "depth": 3,
+ "pool_weights": {},
+ "exists": 0,
+ "status": "down",
+ "reweight": 0,
+ "primary_affinity": 1
+ },
+ {
+ "id": 1,
+ "name": "osd.1",
+ "type": "osd",
+ "type_id": 0,
+ "crush_weight": 1,
+ "depth": 3,
+ "pool_weights": {},
+ "exists": 0,
+ "status": "down",
+ "reweight": 0,
+ "primary_affinity": 1
+ },
+ {
+ "id": 2,
+ "name": "osd.2",
+ "type": "osd",
+ "type_id": 0,
+ "crush_weight": 1,
+ "depth": 3,
+ "pool_weights": {},
+ "exists": 0,
+ "status": "down",
+ "reweight": 0,
+ "primary_affinity": 1
+ }
+ ],
+ "stray": []
+ }
+
+
+ $ rm -f om
+
diff --git a/src/test/cli/osdmaptool/upmap-out.t b/src/test/cli/osdmaptool/upmap-out.t
new file mode 100644
index 000000000..02b13ec56
--- /dev/null
+++ b/src/test/cli/osdmaptool/upmap-out.t
@@ -0,0 +1,24 @@
+ $ osdmaptool --create-from-conf om -c $TESTDIR/ceph.conf.withracks --with-default-pool
+ osdmaptool: osdmap file 'om'
+ osdmaptool: writing epoch 1 to om
+ $ osdmaptool --osd_calc_pg_upmaps_aggressively=false om --mark-up-in --mark-out 147 --upmap-max 11 --upmap c
+ osdmaptool: osdmap file 'om'
+ marking all OSDs up and in
+ marking OSD@147 as out
+ writing upmap command output to: c
+ checking for upmap cleanups
+ upmap, max-count 11, max deviation 5
+ pools rbd
+ prepared 11/11 changes
+ $ cat c
+ ceph osd pg-upmap-items 1.7 142 145
+ ceph osd pg-upmap-items 1.8 219 223
+ ceph osd pg-upmap-items 1.17 201 202 171 173
+ ceph osd pg-upmap-items 1.1a 201 202
+ ceph osd pg-upmap-items 1.1c 201 202
+ ceph osd pg-upmap-items 1.20 201 202
+ ceph osd pg-upmap-items 1.51 201 202
+ ceph osd pg-upmap-items 1.62 219 223
+ ceph osd pg-upmap-items 1.6f 219 223
+ ceph osd pg-upmap-items 1.82 219 223
+ $ rm -f om c
diff --git a/src/test/cli/osdmaptool/upmap.t b/src/test/cli/osdmaptool/upmap.t
new file mode 100644
index 000000000..b84fea28c
--- /dev/null
+++ b/src/test/cli/osdmaptool/upmap.t
@@ -0,0 +1,37 @@
+ $ osdmaptool --create-from-conf om -c $TESTDIR/ceph.conf.withracks --with-default-pool
+ osdmaptool: osdmap file 'om'
+ osdmaptool: writing epoch 1 to om
+ $ osdmaptool --osd_calc_pg_upmaps_aggressively=false om --mark-up-in --upmap-max 11 --upmap c --save
+ osdmaptool: osdmap file 'om'
+ marking all OSDs up and in
+ writing upmap command output to: c
+ checking for upmap cleanups
+ upmap, max-count 11, max deviation 5
+ pools rbd
+ prepared 11/11 changes
+ osdmaptool: writing epoch 3 to om
+ $ cat c
+ ceph osd pg-upmap-items 1.7 142 147
+ ceph osd pg-upmap-items 1.8 219 223
+ ceph osd pg-upmap-items 1.17 201 202 171 173
+ ceph osd pg-upmap-items 1.1a 201 202
+ ceph osd pg-upmap-items 1.1c 201 202
+ ceph osd pg-upmap-items 1.20 201 202
+ ceph osd pg-upmap-items 1.24 232 233
+ ceph osd pg-upmap-items 1.51 201 202
+ ceph osd pg-upmap-items 1.62 219 223
+ ceph osd pg-upmap-items 1.6f 219 223
+ $ osdmaptool --print om | grep pg_upmap_items
+ osdmaptool: osdmap file 'om'
+ pg_upmap_items 1.7 [142,147]
+ pg_upmap_items 1.8 [219,223]
+ pg_upmap_items 1.17 [201,202,171,173]
+ pg_upmap_items 1.1a [201,202]
+ pg_upmap_items 1.1c [201,202]
+ pg_upmap_items 1.20 [201,202]
+ pg_upmap_items 1.24 [232,233]
+ pg_upmap_items 1.51 [201,202]
+ pg_upmap_items 1.62 [219,223]
+ pg_upmap_items 1.6f [219,223]
+ $ rm -f om c
+