summaryrefslogtreecommitdiffstats
path: root/src/script/kubejacker/kubejacker.sh
blob: e013669e993e7dd87a0a4694e4785c5747e52b70 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
#!/bin/bash

set -x
set -e
SCRIPT=$(readlink -f "$0")
SCRIPTPATH=$(dirname "$SCRIPT")

# Run me from your build dir!  I look for binaries in bin/, lib/ etc.
BUILDPATH=$(pwd)


# PREREQUISITE: a repo that you can push to.  You are probably running
# a local docker registry that your kubelet nodes also have access to.
REPO=${REPO:-"$1"}

if [ -z "$REPO" ]
then
    echo "ERROR: no \$REPO set!"
    echo "Run a docker repository and set REPO to <hostname>:<port>"
    exit -1
fi

# The output image name: this should match whatever is configured as
# the image name in your Rook cluster CRD object.
IMAGE=ceph/ceph
TAG=latest

# The namespace where ceph containers are running in your
# test cluster: used for bouncing the containers.
NAMESPACE=rook-ceph

mkdir -p kubejacker
cp $SCRIPTPATH/Dockerfile kubejacker

# TODO: let user specify which daemon they're interested
# in -- doing all bins all the time is too slow and bloaty
#BINS="ceph-mgr ceph-mon ceph-mds ceph-osd rados radosgw-admin radosgw"
#pushd bin
#strip $BINS  #TODO: make stripping optional
#tar czf $BUILDPATH/kubejacker/bin.tar.gz $BINS
#popd

# We need ceph-common to support the binaries
# We need librados/rbd to support mgr modules
# that import the python bindings
#LIBS="libceph-common.so.0 libceph-common.so librados.so.2 librados.so librados.so.2.0.0 librbd.so librbd.so.1 librbd.so.1.12.0"
#pushd lib
#strip $LIBS  #TODO: make stripping optional
#tar czf $BUILDPATH/kubejacker/lib.tar.gz $LIBS
#popd

pushd ../src/python-common/ceph
tar --exclude=__pycache__ --exclude=tests -czf $BUILDPATH/kubejacker/python_common.tar.gz *
popd

pushd ../src/pybind/mgr
find ./ -name "*.pyc" -exec rm -f {} \;
# Exclude node_modules because it's the huge sources in dashboard/frontend
tar --exclude=node_modules --exclude=tests --exclude-backups -czf $BUILDPATH/kubejacker/mgr_plugins.tar.gz *
popd

#ECLIBS="libec_*.so*"
#pushd lib
#strip $ECLIBS  #TODO: make stripping optional
#tar czf $BUILDPATH/kubejacker/eclib.tar.gz $ECLIBS
#popd

#CLSLIBS="libcls_*.so*"
#pushd lib
#strip $CLSLIBS  #TODO: make stripping optional
#tar czf $BUILDPATH/kubejacker/clslib.tar.gz $CLSLIBS
#popd

pushd kubejacker
docker build -t $REPO/ceph/ceph:latest .
popd

# Push the image to the repository
#docker tag $REPO/$IMAGE:$TAG $REPO/$IMAGE:latest
docker push $REPO/ceph/ceph:latest
#docker push $REPO/$IMAGE:$TAG
# With a plain HTTP registry
#podman push $REPO/ceph/ceph:latest --tls-verify=false

# Finally, bounce the containers to pick up the new image
kubectl -n $NAMESPACE delete pod -l app=rook-ceph-mds
kubectl -n $NAMESPACE delete pod -l app=rook-ceph-mgr
kubectl -n $NAMESPACE delete pod -l app=rook-ceph-mon