summaryrefslogtreecommitdiffstats
path: root/qa/workunits/rbd/krbd_huge_osdmap.sh
blob: 0a550d67406e4fcc9effe47ffd2947286543a83e (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
#!/usr/bin/env bash

# This is a test for https://tracker.ceph.com/issues/40481.
#
# An osdmap with 60000 slots encodes to ~16M, of which the ignored portion
# is ~13M.  However in-memory osdmap is larger than ~3M: in-memory osd_addr
# array for 60000 OSDs is ~8M because of sockaddr_storage.
#
# Set mon_max_osd = 60000 in ceph.conf.

set -ex

function expect_false() {
    if "$@"; then return 1; else return 0; fi
}

function run_test() {
    local dev

    # initially tiny, grow via incrementals
    dev=$(sudo rbd map img)
    for max in 8 60 600 6000 60000; do
        ceph osd setmaxosd $max
        expect_false sudo rbd map wait_for/latest_osdmap
        xfs_io -c 'pwrite -w 0 12M' $DEV
    done
    ceph osd getcrushmap -o /dev/stdout | ceph osd setcrushmap -i /dev/stdin
    expect_false sudo rbd map wait_for/latest_osdmap
    xfs_io -c 'pwrite -w 0 12M' $DEV
    sudo rbd unmap $dev

    # initially huge, shrink via incrementals
    dev=$(sudo rbd map img)
    for max in 60000 6000 600 60 8; do
        ceph osd setmaxosd $max
        expect_false sudo rbd map wait_for/latest_osdmap
        xfs_io -c 'pwrite -w 0 12M' $DEV
    done
    ceph osd getcrushmap -o /dev/stdout | ceph osd setcrushmap -i /dev/stdin
    expect_false sudo rbd map wait_for/latest_osdmap
    xfs_io -c 'pwrite -w 0 12M' $DEV
    sudo rbd unmap $dev
}

rbd create --size 12M img
run_test
# repeat with primary affinity (adds an extra array)
ceph osd primary-affinity osd.0 0.5
run_test

echo OK