blob: 8c0f38adc92d1d72444ed19b96d9f29a346990a3 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
|
#!/bin/sh
# storage-tools - Additional utilities to manage storage related tasks
# Copyright (C) 2014-2017 Daniel Baumann <daniel.baumann@open-infrastructure.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
set -e
PROGRAM="$(basename ${0})"
OSDS="${@}"
Usage ()
{
echo "Usage: ${PROGRAM} OSD1 OSD2.. OSDn"
exit 1
}
if [ -z "${OSDS}" ]
then
Usage
fi
if [ ! -x /usr/bin/jq ]
then
echo "'${PROGRAM}': /usr/bin/jp - no such file." >&2
exit 1
fi
# Run
for OSD in ${OSDS}
do
HOST="$(ceph osd find ${OSD} | jq -r '.["crush_location"]["host"]')"
# remove the failed OSD
ceph osd out ${OSD}
# stop the osd.${OSD} daemon on the respective storage host
ssh ceph-deploy@${HOST} sudo service ceph-osd@${OSD} stop
# unmount the disk
ssh ceph-deploy@${HOST} sudo umount /var/lib/ceph/osd/ceph-${OSD}
# remove osd from crush map
ceph osd crush remove osd.${OSD}
# remove cephx key
ceph auth del osd.${OSD}
# mark osd as down
ceph osd down osd.${OSD}
# remove the osd
ceph osd rm osd.${OSD}
done
|