summaryrefslogtreecommitdiffstats
path: root/qa/suites/orch
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-23 16:45:17 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-23 16:45:44 +0000
commit17d6a993fc17d533460c5f40f3908c708e057c18 (patch)
tree1a3bd93e0ecd74fa02f93a528fe2f87e5314c4b5 /qa/suites/orch
parentReleasing progress-linux version 18.2.2-0progress7.99u1. (diff)
downloadceph-17d6a993fc17d533460c5f40f3908c708e057c18.tar.xz
ceph-17d6a993fc17d533460c5f40f3908c708e057c18.zip
Merging upstream version 18.2.3.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'qa/suites/orch')
-rw-r--r--qa/suites/orch/cephadm/no-agent-workunits/%0
l---------qa/suites/orch/cephadm/no-agent-workunits/.qa1
l---------qa/suites/orch/cephadm/no-agent-workunits/0-distro1
l---------qa/suites/orch/cephadm/no-agent-workunits/mon_election1
-rw-r--r--qa/suites/orch/cephadm/no-agent-workunits/task/test_adoption.yaml (renamed from qa/suites/orch/cephadm/workunits/task/test_adoption.yaml)0
-rw-r--r--qa/suites/orch/cephadm/no-agent-workunits/task/test_cephadm_timeout.yaml13
-rw-r--r--qa/suites/orch/cephadm/no-agent-workunits/task/test_orch_cli.yaml (renamed from qa/suites/orch/cephadm/workunits/task/test_orch_cli.yaml)0
-rw-r--r--qa/suites/orch/cephadm/no-agent-workunits/task/test_orch_cli_mon.yaml (renamed from qa/suites/orch/cephadm/workunits/task/test_orch_cli_mon.yaml)0
-rw-r--r--qa/suites/orch/cephadm/workunits/task/test_extra_daemon_features.yaml74
-rw-r--r--qa/suites/orch/cephadm/workunits/task/test_host_drain.yaml72
10 files changed, 162 insertions, 0 deletions
diff --git a/qa/suites/orch/cephadm/no-agent-workunits/% b/qa/suites/orch/cephadm/no-agent-workunits/%
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/qa/suites/orch/cephadm/no-agent-workunits/%
diff --git a/qa/suites/orch/cephadm/no-agent-workunits/.qa b/qa/suites/orch/cephadm/no-agent-workunits/.qa
new file mode 120000
index 000000000..a602a0353
--- /dev/null
+++ b/qa/suites/orch/cephadm/no-agent-workunits/.qa
@@ -0,0 +1 @@
+../.qa/ \ No newline at end of file
diff --git a/qa/suites/orch/cephadm/no-agent-workunits/0-distro b/qa/suites/orch/cephadm/no-agent-workunits/0-distro
new file mode 120000
index 000000000..4b341719d
--- /dev/null
+++ b/qa/suites/orch/cephadm/no-agent-workunits/0-distro
@@ -0,0 +1 @@
+.qa/distros/container-hosts \ No newline at end of file
diff --git a/qa/suites/orch/cephadm/no-agent-workunits/mon_election b/qa/suites/orch/cephadm/no-agent-workunits/mon_election
new file mode 120000
index 000000000..3f331e621
--- /dev/null
+++ b/qa/suites/orch/cephadm/no-agent-workunits/mon_election
@@ -0,0 +1 @@
+.qa/mon_election \ No newline at end of file
diff --git a/qa/suites/orch/cephadm/workunits/task/test_adoption.yaml b/qa/suites/orch/cephadm/no-agent-workunits/task/test_adoption.yaml
index e04fc1eea..e04fc1eea 100644
--- a/qa/suites/orch/cephadm/workunits/task/test_adoption.yaml
+++ b/qa/suites/orch/cephadm/no-agent-workunits/task/test_adoption.yaml
diff --git a/qa/suites/orch/cephadm/no-agent-workunits/task/test_cephadm_timeout.yaml b/qa/suites/orch/cephadm/no-agent-workunits/task/test_cephadm_timeout.yaml
new file mode 100644
index 000000000..24b53d029
--- /dev/null
+++ b/qa/suites/orch/cephadm/no-agent-workunits/task/test_cephadm_timeout.yaml
@@ -0,0 +1,13 @@
+roles:
+- - host.a
+ - mon.a
+ - mgr.a
+ - osd.0
+ - client.0
+tasks:
+- install:
+- cephadm:
+- workunit:
+ clients:
+ client.0:
+ - cephadm/test_cephadm_timeout.py \ No newline at end of file
diff --git a/qa/suites/orch/cephadm/workunits/task/test_orch_cli.yaml b/qa/suites/orch/cephadm/no-agent-workunits/task/test_orch_cli.yaml
index ec65fb116..ec65fb116 100644
--- a/qa/suites/orch/cephadm/workunits/task/test_orch_cli.yaml
+++ b/qa/suites/orch/cephadm/no-agent-workunits/task/test_orch_cli.yaml
diff --git a/qa/suites/orch/cephadm/workunits/task/test_orch_cli_mon.yaml b/qa/suites/orch/cephadm/no-agent-workunits/task/test_orch_cli_mon.yaml
index 2a33dc839..2a33dc839 100644
--- a/qa/suites/orch/cephadm/workunits/task/test_orch_cli_mon.yaml
+++ b/qa/suites/orch/cephadm/no-agent-workunits/task/test_orch_cli_mon.yaml
diff --git a/qa/suites/orch/cephadm/workunits/task/test_extra_daemon_features.yaml b/qa/suites/orch/cephadm/workunits/task/test_extra_daemon_features.yaml
new file mode 100644
index 000000000..b5e0ec98f
--- /dev/null
+++ b/qa/suites/orch/cephadm/workunits/task/test_extra_daemon_features.yaml
@@ -0,0 +1,74 @@
+roles:
+- - host.a
+ - mon.a
+ - mgr.a
+ - osd.0
+- - host.b
+ - mon.b
+ - mgr.b
+ - osd.1
+tasks:
+- install:
+- cephadm:
+- exec:
+ all-hosts:
+ - mkdir /etc/cephadm_testing
+- cephadm.apply:
+ specs:
+ - service_type: mon
+ placement:
+ host_pattern: '*'
+ extra_container_args:
+ - "--cpus=2"
+ extra_entrypoint_args:
+ - "--debug_ms 10"
+ - service_type: container
+ service_id: foo
+ placement:
+ host_pattern: '*'
+ spec:
+ image: "quay.io/fedora/fedora:latest"
+ entrypoint: "bash"
+ extra_container_args:
+ - "-v"
+ - "/etc/cephadm_testing:/root/cephadm_testing"
+ extra_entrypoint_args:
+ - "/root/write_thing_to_file.sh"
+ - "-c"
+ - "testing_custom_containers"
+ - "-o"
+ - "/root/cephadm_testing/testing.txt"
+ custom_configs:
+ - mount_path: "/root/write_thing_to_file.sh"
+ content: |
+ while getopts "o:c:" opt; do
+ case ${opt} in
+ o )
+ OUT_FILE=${OPTARG}
+ ;;
+ c )
+ CONTENT=${OPTARG}
+ esac
+ done
+ echo $CONTENT > $OUT_FILE
+ sleep infinity
+- cephadm.wait_for_service:
+ service: mon
+- cephadm.wait_for_service:
+ service: container.foo
+- exec:
+ host.a:
+ - |
+ set -ex
+ FSID=$(/home/ubuntu/cephtest/cephadm shell -- ceph fsid)
+ sleep 60
+ # check extra container and entrypoint args written to mon unit run file
+ grep "\-\-cpus=2" /var/lib/ceph/$FSID/mon.*/unit.run
+ grep "\-\-debug_ms 10" /var/lib/ceph/$FSID/mon.*/unit.run
+ # check that custom container properly wrote content to file.
+ # This requires the custom config, extra container args, and
+ # entrypoint args to all be working in order for this to have
+ # been written. The container entrypoint was set up with custom_configs,
+ # the content and where to write to with the entrypoint args, and the mounting
+ # of the /etc/cephadm_testing dir with extra container args
+ grep "testing_custom_containers" /etc/cephadm_testing/testing.txt
diff --git a/qa/suites/orch/cephadm/workunits/task/test_host_drain.yaml b/qa/suites/orch/cephadm/workunits/task/test_host_drain.yaml
new file mode 100644
index 000000000..c195bc052
--- /dev/null
+++ b/qa/suites/orch/cephadm/workunits/task/test_host_drain.yaml
@@ -0,0 +1,72 @@
+roles:
+- - host.a
+ - mon.a
+ - mgr.a
+ - osd.0
+ - osd.1
+- - host.b
+ - mon.b
+ - mgr.b
+ - osd.2
+ - osd.3
+- - host.c
+ - mon.c
+ - osd.4
+ - osd.5
+tasks:
+- install:
+- cephadm:
+- cephadm.shell:
+ host.a:
+ - |
+ set -ex
+ HOSTNAMES=$(ceph orch host ls --format json | jq -r '.[] | .hostname')
+ for host in $HOSTNAMES; do
+ # find the hostname for "host.c" which will have no mgr
+ HAS_MGRS=$(ceph orch ps --hostname ${host} --format json | jq 'any(.daemon_type == "mgr")')
+ if [ "$HAS_MGRS" == "false" ]; then
+ HOST_C="${host}"
+ fi
+ done
+ # One last thing to worry about before draining the host
+ # is that the teuthology test tends to put the explicit
+ # hostnames in the placement for the mon service.
+ # We want to make sure we can drain without providing
+ # --force and there is a check for the host being removed
+ # being listed explicitly in the placements. Therefore,
+ # we should remove it from the mon placement.
+ ceph orch ls mon --export > mon.yaml
+ sed /"$HOST_C"/d mon.yaml > mon_adjusted.yaml
+ ceph orch apply -i mon_adjusted.yaml
+ # now drain that host
+ ceph orch host drain $HOST_C --zap-osd-devices
+ # wait for drain to complete
+ HOST_C_DAEMONS=$(ceph orch ps --hostname $HOST_C)
+ while [ "$HOST_C_DAEMONS" != "No daemons reported" ]; do
+ sleep 15
+ HOST_C_DAEMONS=$(ceph orch ps --hostname $HOST_C)
+ done
+ # we want to check the ability to remove the host from
+ # the CRUSH map, so we should first verify the host is in
+ # the CRUSH map.
+ ceph osd getcrushmap -o compiled-crushmap
+ crushtool -d compiled-crushmap -o crushmap.txt
+ CRUSH_MAP=$(cat crushmap.txt)
+ if ! grep -q "$HOST_C" <<< "$CRUSH_MAP"; then
+ printf "Expected to see $HOST_C in CRUSH map. Saw:\n\n$CRUSH_MAP"
+ exit 1
+ fi
+ # If the drain was successful, we should be able to remove the
+ # host without force with no issues. If there are still daemons
+ # we will get a response telling us to drain the host and a
+ # non-zero return code
+ ceph orch host rm $HOST_C --rm-crush-entry
+ # verify we've successfully removed the host from the CRUSH map
+ sleep 30
+ ceph osd getcrushmap -o compiled-crushmap
+ crushtool -d compiled-crushmap -o crushmap.txt
+ CRUSH_MAP=$(cat crushmap.txt)
+ if grep -q "$HOST_C" <<< "$CRUSH_MAP"; then
+ printf "Saw $HOST_C in CRUSH map after it should have been removed.\n\n$CRUSH_MAP"
+ exit 1
+ fi