From e6918187568dbd01842d8d1d2c808ce16a894239 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 21 Apr 2024 13:54:28 +0200 Subject: Adding upstream version 18.2.2. Signed-off-by: Daniel Baumann --- qa/suites/orch/cephadm/smoke-roleless/% | 0 qa/suites/orch/cephadm/smoke-roleless/.qa | 1 + qa/suites/orch/cephadm/smoke-roleless/0-distro | 1 + .../orch/cephadm/smoke-roleless/0-nvme-loop.yaml | 1 + qa/suites/orch/cephadm/smoke-roleless/1-start.yaml | 24 ++++++ .../orch/cephadm/smoke-roleless/2-services/.qa | 1 + .../cephadm/smoke-roleless/2-services/basic.yaml | 0 .../smoke-roleless/2-services/client-keyring.yaml | 40 ++++++++++ .../cephadm/smoke-roleless/2-services/iscsi.yaml | 8 ++ .../cephadm/smoke-roleless/2-services/jaeger.yaml | 12 +++ .../cephadm/smoke-roleless/2-services/mirror.yaml | 9 +++ .../2-services/nfs-haproxy-proto.yaml | 35 +++++++++ .../2-services/nfs-ingress-rgw-bucket.yaml | 89 +++++++++++++++++++++ .../2-services/nfs-ingress-rgw-user.yaml | 90 ++++++++++++++++++++++ .../smoke-roleless/2-services/nfs-ingress.yaml | 68 ++++++++++++++++ .../smoke-roleless/2-services/nfs-ingress2.yaml | 70 +++++++++++++++++ .../2-services/nfs-keepalive-only.yaml | 55 +++++++++++++ .../cephadm/smoke-roleless/2-services/nfs.yaml | 13 ++++ .../cephadm/smoke-roleless/2-services/nfs2.yaml | 12 +++ .../cephadm/smoke-roleless/2-services/nvmeof.yaml | 8 ++ .../smoke-roleless/2-services/rgw-ingress.yaml | 60 +++++++++++++++ .../cephadm/smoke-roleless/2-services/rgw.yaml | 12 +++ qa/suites/orch/cephadm/smoke-roleless/3-final.yaml | 10 +++ 23 files changed, 619 insertions(+) create mode 100644 qa/suites/orch/cephadm/smoke-roleless/% create mode 120000 qa/suites/orch/cephadm/smoke-roleless/.qa create mode 120000 qa/suites/orch/cephadm/smoke-roleless/0-distro create mode 120000 qa/suites/orch/cephadm/smoke-roleless/0-nvme-loop.yaml create mode 100644 qa/suites/orch/cephadm/smoke-roleless/1-start.yaml create mode 120000 qa/suites/orch/cephadm/smoke-roleless/2-services/.qa create mode 100644 qa/suites/orch/cephadm/smoke-roleless/2-services/basic.yaml create mode 100644 qa/suites/orch/cephadm/smoke-roleless/2-services/client-keyring.yaml create mode 100644 qa/suites/orch/cephadm/smoke-roleless/2-services/iscsi.yaml create mode 100644 qa/suites/orch/cephadm/smoke-roleless/2-services/jaeger.yaml create mode 100644 qa/suites/orch/cephadm/smoke-roleless/2-services/mirror.yaml create mode 100644 qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-haproxy-proto.yaml create mode 100644 qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress-rgw-bucket.yaml create mode 100644 qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress-rgw-user.yaml create mode 100644 qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress.yaml create mode 100644 qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress2.yaml create mode 100644 qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-keepalive-only.yaml create mode 100644 qa/suites/orch/cephadm/smoke-roleless/2-services/nfs.yaml create mode 100644 qa/suites/orch/cephadm/smoke-roleless/2-services/nfs2.yaml create mode 100644 qa/suites/orch/cephadm/smoke-roleless/2-services/nvmeof.yaml create mode 100644 qa/suites/orch/cephadm/smoke-roleless/2-services/rgw-ingress.yaml create mode 100644 qa/suites/orch/cephadm/smoke-roleless/2-services/rgw.yaml create mode 100644 qa/suites/orch/cephadm/smoke-roleless/3-final.yaml (limited to 'qa/suites/orch/cephadm/smoke-roleless') diff --git a/qa/suites/orch/cephadm/smoke-roleless/% b/qa/suites/orch/cephadm/smoke-roleless/% new file mode 100644 index 000000000..e69de29bb diff --git a/qa/suites/orch/cephadm/smoke-roleless/.qa b/qa/suites/orch/cephadm/smoke-roleless/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/orch/cephadm/smoke-roleless/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/orch/cephadm/smoke-roleless/0-distro b/qa/suites/orch/cephadm/smoke-roleless/0-distro new file mode 120000 index 000000000..4b341719d --- /dev/null +++ b/qa/suites/orch/cephadm/smoke-roleless/0-distro @@ -0,0 +1 @@ +.qa/distros/container-hosts \ No newline at end of file diff --git a/qa/suites/orch/cephadm/smoke-roleless/0-nvme-loop.yaml b/qa/suites/orch/cephadm/smoke-roleless/0-nvme-loop.yaml new file mode 120000 index 000000000..5206b6edd --- /dev/null +++ b/qa/suites/orch/cephadm/smoke-roleless/0-nvme-loop.yaml @@ -0,0 +1 @@ +.qa/overrides/nvme_loop.yaml \ No newline at end of file diff --git a/qa/suites/orch/cephadm/smoke-roleless/1-start.yaml b/qa/suites/orch/cephadm/smoke-roleless/1-start.yaml new file mode 100644 index 000000000..018356f8f --- /dev/null +++ b/qa/suites/orch/cephadm/smoke-roleless/1-start.yaml @@ -0,0 +1,24 @@ +tasks: +- cephadm: + roleless: true +- cephadm.shell: + host.a: + - ceph orch status + - ceph orch ps + - ceph orch ls + - ceph orch host ls + - ceph orch device ls +roles: +- - host.a + - client.0 +- - host.b + - client.1 +openstack: +- volumes: # attached to each instance + count: 4 + size: 10 # GB +overrides: + ceph: + conf: + osd: + osd shutdown pgref assert: true diff --git a/qa/suites/orch/cephadm/smoke-roleless/2-services/.qa b/qa/suites/orch/cephadm/smoke-roleless/2-services/.qa new file mode 120000 index 000000000..a602a0353 --- /dev/null +++ b/qa/suites/orch/cephadm/smoke-roleless/2-services/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/orch/cephadm/smoke-roleless/2-services/basic.yaml b/qa/suites/orch/cephadm/smoke-roleless/2-services/basic.yaml new file mode 100644 index 000000000..e69de29bb diff --git a/qa/suites/orch/cephadm/smoke-roleless/2-services/client-keyring.yaml b/qa/suites/orch/cephadm/smoke-roleless/2-services/client-keyring.yaml new file mode 100644 index 000000000..f00800471 --- /dev/null +++ b/qa/suites/orch/cephadm/smoke-roleless/2-services/client-keyring.yaml @@ -0,0 +1,40 @@ +tasks: +- cephadm.shell: + host.a: + - ceph orch host label add `hostname` foo + - ceph auth get-or-create client.foo mon 'allow r' + - ceph orch client-keyring set client.foo label:foo --mode 770 --owner 11111:22222 +- exec: + host.a: + - while ! test -e /etc/ceph/ceph.client.foo.keyring ; do sleep 1 ; done + - ls -al /etc/ceph/ceph.client.foo.keyring | grep rwxrwx--- + - ls -al /etc/ceph/ceph.client.foo.keyring | grep 11111 + - ls -al /etc/ceph/ceph.client.foo.keyring | grep 22222 + - test -e /etc/ceph/ceph.conf +- exec: + host.b: + - test ! -e /etc/ceph/ceph.client.foo.keyring +- cephadm.shell: + host.b: + - ceph orch host label add `hostname` foo +- exec: + host.b: + - while ! test -e /etc/ceph/ceph.client.foo.keyring ; do sleep 1 ; done + - ls -al /etc/ceph/ceph.client.foo.keyring | grep rwxrwx--- + - ls -al /etc/ceph/ceph.client.foo.keyring | grep 11111 + - ls -al /etc/ceph/ceph.client.foo.keyring | grep 22222 +- cephadm.shell: + host.b: + - ceph orch host label rm `hostname` foo +- exec: + host.b: + - while test -e /etc/ceph/ceph.client.foo.keyring ; do sleep 1 ; done +- exec: + host.a: + - test -e /etc/ceph/ceph.client.foo.keyring +- cephadm.shell: + host.a: + - ceph orch client-keyring rm client.foo +- exec: + host.a: + - while test -e /etc/ceph/ceph.client.foo.keyring ; do sleep 1 ; done diff --git a/qa/suites/orch/cephadm/smoke-roleless/2-services/iscsi.yaml b/qa/suites/orch/cephadm/smoke-roleless/2-services/iscsi.yaml new file mode 100644 index 000000000..7f57076db --- /dev/null +++ b/qa/suites/orch/cephadm/smoke-roleless/2-services/iscsi.yaml @@ -0,0 +1,8 @@ +tasks: +- cephadm.shell: + host.a: + - ceph osd pool create foo + - rbd pool init foo + - ceph orch apply iscsi foo u p +- cephadm.wait_for_service: + service: iscsi.foo diff --git a/qa/suites/orch/cephadm/smoke-roleless/2-services/jaeger.yaml b/qa/suites/orch/cephadm/smoke-roleless/2-services/jaeger.yaml new file mode 100644 index 000000000..ad102fedd --- /dev/null +++ b/qa/suites/orch/cephadm/smoke-roleless/2-services/jaeger.yaml @@ -0,0 +1,12 @@ +tasks: +- cephadm.shell: + host.a: + - ceph orch apply jaeger +- cephadm.wait_for_service: + service: elasticsearch +- cephadm.wait_for_service: + service: jaeger-collector +- cephadm.wait_for_service: + service: jaeger-query +- cephadm.wait_for_service: + service: jaeger-agent \ No newline at end of file diff --git a/qa/suites/orch/cephadm/smoke-roleless/2-services/mirror.yaml b/qa/suites/orch/cephadm/smoke-roleless/2-services/mirror.yaml new file mode 100644 index 000000000..681e1e04a --- /dev/null +++ b/qa/suites/orch/cephadm/smoke-roleless/2-services/mirror.yaml @@ -0,0 +1,9 @@ +tasks: +- cephadm.shell: + host.a: + - ceph orch apply rbd-mirror "--placement=*" + - ceph orch apply cephfs-mirror "--placement=*" +- cephadm.wait_for_service: + service: rbd-mirror +- cephadm.wait_for_service: + service: cephfs-mirror diff --git a/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-haproxy-proto.yaml b/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-haproxy-proto.yaml new file mode 100644 index 000000000..477e5c443 --- /dev/null +++ b/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-haproxy-proto.yaml @@ -0,0 +1,35 @@ +tasks: +- vip: + +# make sure cephadm notices the new IP +- cephadm.shell: + host.a: + - ceph orch device ls --refresh + +# stop kernel nfs server, if running +- vip.exec: + all-hosts: + - systemctl stop nfs-server + +# use nfs module to create cluster and export +- cephadm.shell: + host.a: + - ceph fs volume create fs1 + - ceph nfs cluster create happy --ingress --virtual-ip={{VIP0}} --ingress-mode=haproxy-protocol + - ceph nfs export create cephfs --fsname fs1 --cluster-id happy --pseudo-path /d1 + +# wait for services to start +- cephadm.wait_for_service: + service: nfs.happy +- cephadm.wait_for_service: + service: ingress.nfs.happy + +# make sure mount can be reached over VIP, ensuring both that +# keepalived is maintaining the VIP and that the nfs has bound to it +- vip.exec: + host.a: + - mkdir /mnt/happy + - sleep 1 + - mount -t nfs {{VIP0}}:/d1 /mnt/happy + - echo test > /mnt/happy/testfile + - sync diff --git a/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress-rgw-bucket.yaml b/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress-rgw-bucket.yaml new file mode 100644 index 000000000..3f4964978 --- /dev/null +++ b/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress-rgw-bucket.yaml @@ -0,0 +1,89 @@ +tasks: +- vip: + +# make sure cephadm notices the new IP +- cephadm.shell: + host.a: + - ceph orch device ls --refresh + +# stop kernel nfs server, if running +- vip.exec: + all-hosts: + - systemctl stop nfs-server + +- cephadm.shell: + host.a: + - ceph orch apply rgw foorgw --port 8800 + - ceph nfs cluster create foo --ingress --virtual-ip {{VIP0}}/{{VIPPREFIXLEN}} + +- vip.exec: + host.a: + - dnf install -y python3-boto3 || apt install -y python3-boto3 + - /home/ubuntu/cephtest/cephadm shell radosgw-admin user create --uid foouser --display-name foo > /tmp/user.json + +- python: + host.a: | + import boto3 + import json + + with open('/tmp/user.json', 'rt') as f: + info = json.loads(f.read()) + s3 = boto3.resource( + 's3', + aws_access_key_id=info['keys'][0]['access_key'], + aws_secret_access_key=info['keys'][0]['secret_key'], + endpoint_url='http://localhost:8800', + ) + bucket = s3.Bucket('foobucket') + bucket.create() + bucket.put_object(Key='myobject', Body='thebody') + +- cephadm.shell: + host.a: + - ceph nfs export create rgw --bucket foobucket --cluster-id foo --pseudo-path /foobucket + +- cephadm.wait_for_service: + service: nfs.foo +- cephadm.wait_for_service: + service: ingress.nfs.foo + +## export and mount + +- vip.exec: + host.a: + - mkdir /mnt/foo + - sleep 5 + - mount -t nfs {{VIP0}}:/foobucket /mnt/foo + - find /mnt/foo -ls + - grep thebody /mnt/foo/myobject + - echo test > /mnt/foo/newobject + - sync + +- python: + host.a: | + import boto3 + import json + from io import BytesIO + + with open('/tmp/user.json', 'rt') as f: + info = json.loads(f.read()) + s3 = boto3.resource( + 's3', + aws_access_key_id=info['keys'][0]['access_key'], + aws_secret_access_key=info['keys'][0]['secret_key'], + endpoint_url='http://localhost:8800', + ) + bucket = s3.Bucket('foobucket') + data = BytesIO() + bucket.download_fileobj(Fileobj=data, Key='newobject') + print(data.getvalue()) + assert data.getvalue().decode() == 'test\n' + +- vip.exec: + host.a: + - umount /mnt/foo + +- cephadm.shell: + host.a: + - ceph nfs export rm foo /foobucket + - ceph nfs cluster rm foo diff --git a/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress-rgw-user.yaml b/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress-rgw-user.yaml new file mode 100644 index 000000000..721aecfc3 --- /dev/null +++ b/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress-rgw-user.yaml @@ -0,0 +1,90 @@ +tasks: +- vip: + +# make sure cephadm notices the new IP +- cephadm.shell: + host.a: + - ceph orch device ls --refresh + +# stop kernel nfs server, if running +- vip.exec: + all-hosts: + - systemctl stop nfs-server + +- cephadm.shell: + host.a: + - ceph orch apply rgw foorgw --port 8800 + - ceph nfs cluster create foo --ingress --virtual-ip {{VIP0}}/{{VIPPREFIXLEN}} + +- vip.exec: + host.a: + - dnf install -y python3-boto3 || apt install -y python3-boto3 + - /home/ubuntu/cephtest/cephadm shell radosgw-admin user create --uid foouser --display-name foo > /tmp/user.json + +- python: + host.a: | + import boto3 + import json + + with open('/tmp/user.json', 'rt') as f: + info = json.loads(f.read()) + s3 = boto3.resource( + 's3', + aws_access_key_id=info['keys'][0]['access_key'], + aws_secret_access_key=info['keys'][0]['secret_key'], + endpoint_url='http://localhost:8800', + ) + bucket = s3.Bucket('foobucket') + bucket.create() + bucket.put_object(Key='myobject', Body='thebody') + +- cephadm.shell: + host.a: + - ceph nfs export create rgw --cluster-id foo --pseudo-path /foouser --user-id foouser + +- cephadm.wait_for_service: + service: nfs.foo +- cephadm.wait_for_service: + service: ingress.nfs.foo + +## export and mount + +- vip.exec: + host.a: + - mkdir /mnt/foo + - sleep 5 + - mount -t nfs {{VIP0}}:/foouser /mnt/foo + - test -d /mnt/foo/foobucket + - find /mnt/foo -ls + - grep thebody /mnt/foo/foobucket/myobject + - echo test > /mnt/foo/foobucket/newobject + - sync + +- python: + host.a: | + import boto3 + import json + from io import BytesIO + + with open('/tmp/user.json', 'rt') as f: + info = json.loads(f.read()) + s3 = boto3.resource( + 's3', + aws_access_key_id=info['keys'][0]['access_key'], + aws_secret_access_key=info['keys'][0]['secret_key'], + endpoint_url='http://localhost:8800', + ) + bucket = s3.Bucket('foobucket') + data = BytesIO() + bucket.download_fileobj(Fileobj=data, Key='newobject') + print(data.getvalue()) + assert data.getvalue().decode() == 'test\n' + +- vip.exec: + host.a: + - umount /mnt/foo + +- cephadm.shell: + host.a: + - ceph nfs export rm foo /foouser + - ceph nfs cluster rm foo diff --git a/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress.yaml b/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress.yaml new file mode 100644 index 000000000..b4e843df2 --- /dev/null +++ b/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress.yaml @@ -0,0 +1,68 @@ +tasks: +- vip: + +# make sure cephadm notices the new IP +- cephadm.shell: + host.a: + - ceph orch device ls --refresh + +# stop kernel nfs server, if running +- vip.exec: + all-hosts: + - systemctl stop nfs-server + +- cephadm.shell: + host.a: + - ceph fs volume create foofs + +# deploy nfs + ingress +- cephadm.apply: + specs: + - service_type: nfs + service_id: foo + placement: + count: 2 + spec: + port: 12049 + - service_type: ingress + service_id: nfs.foo + spec: + backend_service: nfs.foo + frontend_port: 2049 + monitor_port: 9002 + virtual_ip: "{{VIP0}}/{{VIPPREFIXLEN}}" +- cephadm.wait_for_service: + service: nfs.foo +- cephadm.wait_for_service: + service: ingress.nfs.foo + +## export and mount + +- cephadm.shell: + host.a: + - ceph nfs export create cephfs --fsname foofs --cluster-id foo --pseudo-path /fake + +- vip.exec: + host.a: + - mkdir /mnt/foo + - sleep 5 + - mount -t nfs {{VIP0}}:/fake /mnt/foo + - echo test > /mnt/foo/testfile + - sync + +# take each gateway down in turn and ensure things still work +- cephadm.shell: + volumes: + - /mnt/foo:/mnt/foo + host.a: + - | + echo "Check with each haproxy down in turn..." + for haproxy in `ceph orch ps | grep ^haproxy.nfs.foo. | awk '{print $1}'`; do + ceph orch daemon stop $haproxy + while ! ceph orch ps | grep $haproxy | grep stopped; do sleep 1 ; done + cat /mnt/foo/testfile + echo $haproxy > /mnt/foo/testfile + sync + ceph orch daemon start $haproxy + while ! ceph orch ps | grep $haproxy | grep running; do sleep 1 ; done + done diff --git a/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress2.yaml b/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress2.yaml new file mode 100644 index 000000000..a47dd9d76 --- /dev/null +++ b/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress2.yaml @@ -0,0 +1,70 @@ +tasks: +- vip: + +# make sure cephadm notices the new IP +- cephadm.shell: + host.a: + - ceph orch device ls --refresh + +# stop kernel nfs server, if running +- vip.exec: + all-hosts: + - systemctl stop nfs-server + +- cephadm.shell: + host.a: + - ceph fs volume create foofs + - ceph nfs cluster create foo --ingress --virtual-ip {{VIP0}}/{{VIPPREFIXLEN}} --port 2999 + - ceph nfs export create cephfs --fsname foofs --cluster-id foo --pseudo-path /fake + +- cephadm.wait_for_service: + service: nfs.foo +- cephadm.wait_for_service: + service: ingress.nfs.foo + +## export and mount + +- vip.exec: + host.a: + - mkdir /mnt/foo + - sleep 5 + - mount -t nfs {{VIP0}}:/fake /mnt/foo -o port=2999 + - echo test > /mnt/foo/testfile + - sync + +# take each gateway down in turn and ensure things still work +- cephadm.shell: + volumes: + - /mnt/foo:/mnt/foo + host.a: + - | + echo "Check with each haproxy down in turn..." + for haproxy in `ceph orch ps | grep ^haproxy.nfs.foo. | awk '{print $1}'`; do + ceph orch daemon stop $haproxy + while ! ceph orch ps | grep $haproxy | grep stopped; do sleep 1 ; done + cat /mnt/foo/testfile + echo $haproxy > /mnt/foo/testfile + sync + ceph orch daemon start $haproxy + while ! ceph orch ps | grep $haproxy | grep running; do sleep 1 ; done + done + +# take each ganesha down in turn. +# simulate "failure" by deleting the container +- vip.exec: + all-hosts: + - | + echo "Check with $(hostname) ganesha(s) down..." + for c in `systemctl | grep ceph- | grep @nfs | awk '{print $1}'`; do + cid=`echo $c | sed 's/@/-/'` + id=`echo $c | cut -d @ -f 2 | sed 's/.service$//'` + fsid=`echo $c | cut -d @ -f 1 | cut -d - -f 2-` + echo "Removing daemon $id fsid $fsid..." + sudo $TESTDIR/cephadm rm-daemon --fsid $fsid --name $id + + echo "Waking up cephadm..." + sudo $TESTDIR/cephadm shell -- ceph orch ps --refresh + + while ! timeout 1 cat /mnt/foo/testfile ; do true ; done + echo "Mount is back!" + done diff --git a/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-keepalive-only.yaml b/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-keepalive-only.yaml new file mode 100644 index 000000000..ba5afed47 --- /dev/null +++ b/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-keepalive-only.yaml @@ -0,0 +1,55 @@ +tasks: +- vip: + +# make sure cephadm notices the new IP +- cephadm.shell: + host.a: + - ceph orch device ls --refresh + +# stop kernel nfs server, if running +- vip.exec: + all-hosts: + - systemctl stop nfs-server + +- cephadm.shell: + host.a: + - ceph fs volume create foofs + +# deploy nfs + keepalive-only ingress service +- cephadm.apply: + specs: + - service_type: nfs + service_id: foo + placement: + count: 1 + spec: + port: 2049 + virtual_ip: "{{VIP0}}" + - service_type: ingress + service_id: nfs.foo + placement: + count: 1 + spec: + backend_service: nfs.foo + monitor_port: 9002 + virtual_ip: "{{VIP0}}/{{VIPPREFIXLEN}}" + keepalive_only: true +- cephadm.wait_for_service: + service: nfs.foo +- cephadm.wait_for_service: + service: ingress.nfs.foo + +# export and mount +- cephadm.shell: + host.a: + - ceph nfs export create cephfs --fsname foofs --cluster-id foo --pseudo-path /fake + +# make sure mount can be reached over VIP, ensuring both that +# keepalived is maintaining the VIP and that the nfs has bound to it +- vip.exec: + host.a: + - mkdir /mnt/foo + - sleep 5 + - mount -t nfs {{VIP0}}:/fake /mnt/foo + - echo test > /mnt/foo/testfile + - sync diff --git a/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs.yaml b/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs.yaml new file mode 100644 index 000000000..194f4e9de --- /dev/null +++ b/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs.yaml @@ -0,0 +1,13 @@ +tasks: + +# stop kernel nfs server, if running +- vip.exec: + all-hosts: + - systemctl stop nfs-server + +- cephadm.apply: + specs: + - service_type: nfs + service_id: foo +- cephadm.wait_for_service: + service: nfs.foo diff --git a/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs2.yaml b/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs2.yaml new file mode 100644 index 000000000..959c5aa77 --- /dev/null +++ b/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs2.yaml @@ -0,0 +1,12 @@ +tasks: + +# stop kernel nfs server, if running +- vip.exec: + all-hosts: + - systemctl stop nfs-server + +- cephadm.shell: + host.a: + - ceph nfs cluster create foo +- cephadm.wait_for_service: + service: nfs.foo diff --git a/qa/suites/orch/cephadm/smoke-roleless/2-services/nvmeof.yaml b/qa/suites/orch/cephadm/smoke-roleless/2-services/nvmeof.yaml new file mode 100644 index 000000000..4c5e26740 --- /dev/null +++ b/qa/suites/orch/cephadm/smoke-roleless/2-services/nvmeof.yaml @@ -0,0 +1,8 @@ +tasks: +- cephadm.shell: + host.a: + - ceph osd pool create foo + - rbd pool init foo + - ceph orch apply nvmeof foo +- cephadm.wait_for_service: + service: nvmeof.foo diff --git a/qa/suites/orch/cephadm/smoke-roleless/2-services/rgw-ingress.yaml b/qa/suites/orch/cephadm/smoke-roleless/2-services/rgw-ingress.yaml new file mode 100644 index 000000000..710edab73 --- /dev/null +++ b/qa/suites/orch/cephadm/smoke-roleless/2-services/rgw-ingress.yaml @@ -0,0 +1,60 @@ +tasks: +- vip: + +# make sure cephadm notices the new IP +- cephadm.shell: + host.a: + - ceph orch device ls --refresh + +# deploy rgw + ingress +- cephadm.apply: + specs: + - service_type: rgw + service_id: foo + placement: + count: 4 + host_pattern: "*" + spec: + rgw_frontend_port: 8000 + - service_type: ingress + service_id: rgw.foo + placement: + count: 2 + spec: + backend_service: rgw.foo + frontend_port: 9000 + monitor_port: 9001 + virtual_ip: "{{VIP0}}/{{VIPPREFIXLEN}}" +- cephadm.wait_for_service: + service: rgw.foo +- cephadm.wait_for_service: + service: ingress.rgw.foo + +# take each component down in turn and ensure things still work +- cephadm.shell: + host.a: + - | + echo "Check while healthy..." + curl http://{{VIP0}}:9000/ + + # stop each rgw in turn + echo "Check with each rgw stopped in turn..." + for rgw in `ceph orch ps | grep ^rgw.foo. | awk '{print $1}'`; do + ceph orch daemon stop $rgw + while ! ceph orch ps | grep $rgw | grep stopped; do sleep 1 ; done + while ! curl http://{{VIP0}}:9000/ ; do sleep 1 ; done + ceph orch daemon start $rgw + while ! ceph orch ps | grep $rgw | grep running; do sleep 1 ; done + done + + # stop each haproxy in turn + echo "Check with each haproxy down in turn..." + for haproxy in `ceph orch ps | grep ^haproxy.rgw.foo. | awk '{print $1}'`; do + ceph orch daemon stop $haproxy + while ! ceph orch ps | grep $haproxy | grep stopped; do sleep 1 ; done + while ! curl http://{{VIP0}}:9000/ ; do sleep 1 ; done + ceph orch daemon start $haproxy + while ! ceph orch ps | grep $haproxy | grep running; do sleep 1 ; done + done + + while ! curl http://{{VIP0}}:9000/ ; do sleep 1 ; done diff --git a/qa/suites/orch/cephadm/smoke-roleless/2-services/rgw.yaml b/qa/suites/orch/cephadm/smoke-roleless/2-services/rgw.yaml new file mode 100644 index 000000000..cb2c6f4b6 --- /dev/null +++ b/qa/suites/orch/cephadm/smoke-roleless/2-services/rgw.yaml @@ -0,0 +1,12 @@ +tasks: +- cephadm.apply: + specs: + - service_type: rgw + service_id: foo + placement: + count_per_host: 4 + host_pattern: "*" + spec: + rgw_frontend_port: 8000 +- cephadm.wait_for_service: + service: rgw.foo diff --git a/qa/suites/orch/cephadm/smoke-roleless/3-final.yaml b/qa/suites/orch/cephadm/smoke-roleless/3-final.yaml new file mode 100644 index 000000000..bb938848c --- /dev/null +++ b/qa/suites/orch/cephadm/smoke-roleless/3-final.yaml @@ -0,0 +1,10 @@ +tasks: +- cephadm.shell: + host.a: + - stat -c '%u %g' /var/log/ceph | grep '167 167' + - ceph orch status + - ceph orch ps + - ceph orch ls + - ceph orch host ls + - ceph orch device ls + - ceph orch ls | grep '^osd.all-available-devices ' -- cgit v1.2.3