summaryrefslogtreecommitdiffstats
path: root/qa/suites/orch/cephadm/smoke-roleless/2-services
diff options
context:
space:
mode:
Diffstat (limited to 'qa/suites/orch/cephadm/smoke-roleless/2-services')
-rw-r--r--qa/suites/orch/cephadm/smoke-roleless/2-services/basic.yaml0
-rw-r--r--qa/suites/orch/cephadm/smoke-roleless/2-services/client-keyring.yaml40
-rw-r--r--qa/suites/orch/cephadm/smoke-roleless/2-services/iscsi.yaml8
-rw-r--r--qa/suites/orch/cephadm/smoke-roleless/2-services/mirror.yaml9
-rw-r--r--qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress-rgw-bucket.yaml89
-rw-r--r--qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress-rgw-user.yaml90
-rw-r--r--qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress.yaml68
-rw-r--r--qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress2.yaml70
-rw-r--r--qa/suites/orch/cephadm/smoke-roleless/2-services/nfs.yaml13
-rw-r--r--qa/suites/orch/cephadm/smoke-roleless/2-services/nfs2.yaml12
-rw-r--r--qa/suites/orch/cephadm/smoke-roleless/2-services/rgw-ingress.yaml60
-rw-r--r--qa/suites/orch/cephadm/smoke-roleless/2-services/rgw.yaml12
12 files changed, 471 insertions, 0 deletions
diff --git a/qa/suites/orch/cephadm/smoke-roleless/2-services/basic.yaml b/qa/suites/orch/cephadm/smoke-roleless/2-services/basic.yaml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/qa/suites/orch/cephadm/smoke-roleless/2-services/basic.yaml
diff --git a/qa/suites/orch/cephadm/smoke-roleless/2-services/client-keyring.yaml b/qa/suites/orch/cephadm/smoke-roleless/2-services/client-keyring.yaml
new file mode 100644
index 000000000..f00800471
--- /dev/null
+++ b/qa/suites/orch/cephadm/smoke-roleless/2-services/client-keyring.yaml
@@ -0,0 +1,40 @@
+tasks:
+- cephadm.shell:
+ host.a:
+ - ceph orch host label add `hostname` foo
+ - ceph auth get-or-create client.foo mon 'allow r'
+ - ceph orch client-keyring set client.foo label:foo --mode 770 --owner 11111:22222
+- exec:
+ host.a:
+ - while ! test -e /etc/ceph/ceph.client.foo.keyring ; do sleep 1 ; done
+ - ls -al /etc/ceph/ceph.client.foo.keyring | grep rwxrwx---
+ - ls -al /etc/ceph/ceph.client.foo.keyring | grep 11111
+ - ls -al /etc/ceph/ceph.client.foo.keyring | grep 22222
+ - test -e /etc/ceph/ceph.conf
+- exec:
+ host.b:
+ - test ! -e /etc/ceph/ceph.client.foo.keyring
+- cephadm.shell:
+ host.b:
+ - ceph orch host label add `hostname` foo
+- exec:
+ host.b:
+ - while ! test -e /etc/ceph/ceph.client.foo.keyring ; do sleep 1 ; done
+ - ls -al /etc/ceph/ceph.client.foo.keyring | grep rwxrwx---
+ - ls -al /etc/ceph/ceph.client.foo.keyring | grep 11111
+ - ls -al /etc/ceph/ceph.client.foo.keyring | grep 22222
+- cephadm.shell:
+ host.b:
+ - ceph orch host label rm `hostname` foo
+- exec:
+ host.b:
+ - while test -e /etc/ceph/ceph.client.foo.keyring ; do sleep 1 ; done
+- exec:
+ host.a:
+ - test -e /etc/ceph/ceph.client.foo.keyring
+- cephadm.shell:
+ host.a:
+ - ceph orch client-keyring rm client.foo
+- exec:
+ host.a:
+ - while test -e /etc/ceph/ceph.client.foo.keyring ; do sleep 1 ; done
diff --git a/qa/suites/orch/cephadm/smoke-roleless/2-services/iscsi.yaml b/qa/suites/orch/cephadm/smoke-roleless/2-services/iscsi.yaml
new file mode 100644
index 000000000..7f57076db
--- /dev/null
+++ b/qa/suites/orch/cephadm/smoke-roleless/2-services/iscsi.yaml
@@ -0,0 +1,8 @@
+tasks:
+- cephadm.shell:
+ host.a:
+ - ceph osd pool create foo
+ - rbd pool init foo
+ - ceph orch apply iscsi foo u p
+- cephadm.wait_for_service:
+ service: iscsi.foo
diff --git a/qa/suites/orch/cephadm/smoke-roleless/2-services/mirror.yaml b/qa/suites/orch/cephadm/smoke-roleless/2-services/mirror.yaml
new file mode 100644
index 000000000..681e1e04a
--- /dev/null
+++ b/qa/suites/orch/cephadm/smoke-roleless/2-services/mirror.yaml
@@ -0,0 +1,9 @@
+tasks:
+- cephadm.shell:
+ host.a:
+ - ceph orch apply rbd-mirror "--placement=*"
+ - ceph orch apply cephfs-mirror "--placement=*"
+- cephadm.wait_for_service:
+ service: rbd-mirror
+- cephadm.wait_for_service:
+ service: cephfs-mirror
diff --git a/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress-rgw-bucket.yaml b/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress-rgw-bucket.yaml
new file mode 100644
index 000000000..3f4964978
--- /dev/null
+++ b/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress-rgw-bucket.yaml
@@ -0,0 +1,89 @@
+tasks:
+- vip:
+
+# make sure cephadm notices the new IP
+- cephadm.shell:
+ host.a:
+ - ceph orch device ls --refresh
+
+# stop kernel nfs server, if running
+- vip.exec:
+ all-hosts:
+ - systemctl stop nfs-server
+
+- cephadm.shell:
+ host.a:
+ - ceph orch apply rgw foorgw --port 8800
+ - ceph nfs cluster create foo --ingress --virtual-ip {{VIP0}}/{{VIPPREFIXLEN}}
+
+- vip.exec:
+ host.a:
+ - dnf install -y python3-boto3 || apt install -y python3-boto3
+ - /home/ubuntu/cephtest/cephadm shell radosgw-admin user create --uid foouser --display-name foo > /tmp/user.json
+
+- python:
+ host.a: |
+ import boto3
+ import json
+
+ with open('/tmp/user.json', 'rt') as f:
+ info = json.loads(f.read())
+ s3 = boto3.resource(
+ 's3',
+ aws_access_key_id=info['keys'][0]['access_key'],
+ aws_secret_access_key=info['keys'][0]['secret_key'],
+ endpoint_url='http://localhost:8800',
+ )
+ bucket = s3.Bucket('foobucket')
+ bucket.create()
+ bucket.put_object(Key='myobject', Body='thebody')
+
+- cephadm.shell:
+ host.a:
+ - ceph nfs export create rgw --bucket foobucket --cluster-id foo --pseudo-path /foobucket
+
+- cephadm.wait_for_service:
+ service: nfs.foo
+- cephadm.wait_for_service:
+ service: ingress.nfs.foo
+
+## export and mount
+
+- vip.exec:
+ host.a:
+ - mkdir /mnt/foo
+ - sleep 5
+ - mount -t nfs {{VIP0}}:/foobucket /mnt/foo
+ - find /mnt/foo -ls
+ - grep thebody /mnt/foo/myobject
+ - echo test > /mnt/foo/newobject
+ - sync
+
+- python:
+ host.a: |
+ import boto3
+ import json
+ from io import BytesIO
+
+ with open('/tmp/user.json', 'rt') as f:
+ info = json.loads(f.read())
+ s3 = boto3.resource(
+ 's3',
+ aws_access_key_id=info['keys'][0]['access_key'],
+ aws_secret_access_key=info['keys'][0]['secret_key'],
+ endpoint_url='http://localhost:8800',
+ )
+ bucket = s3.Bucket('foobucket')
+ data = BytesIO()
+ bucket.download_fileobj(Fileobj=data, Key='newobject')
+ print(data.getvalue())
+ assert data.getvalue().decode() == 'test\n'
+
+- vip.exec:
+ host.a:
+ - umount /mnt/foo
+
+- cephadm.shell:
+ host.a:
+ - ceph nfs export rm foo /foobucket
+ - ceph nfs cluster rm foo
diff --git a/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress-rgw-user.yaml b/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress-rgw-user.yaml
new file mode 100644
index 000000000..721aecfc3
--- /dev/null
+++ b/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress-rgw-user.yaml
@@ -0,0 +1,90 @@
+tasks:
+- vip:
+
+# make sure cephadm notices the new IP
+- cephadm.shell:
+ host.a:
+ - ceph orch device ls --refresh
+
+# stop kernel nfs server, if running
+- vip.exec:
+ all-hosts:
+ - systemctl stop nfs-server
+
+- cephadm.shell:
+ host.a:
+ - ceph orch apply rgw foorgw --port 8800
+ - ceph nfs cluster create foo --ingress --virtual-ip {{VIP0}}/{{VIPPREFIXLEN}}
+
+- vip.exec:
+ host.a:
+ - dnf install -y python3-boto3 || apt install -y python3-boto3
+ - /home/ubuntu/cephtest/cephadm shell radosgw-admin user create --uid foouser --display-name foo > /tmp/user.json
+
+- python:
+ host.a: |
+ import boto3
+ import json
+
+ with open('/tmp/user.json', 'rt') as f:
+ info = json.loads(f.read())
+ s3 = boto3.resource(
+ 's3',
+ aws_access_key_id=info['keys'][0]['access_key'],
+ aws_secret_access_key=info['keys'][0]['secret_key'],
+ endpoint_url='http://localhost:8800',
+ )
+ bucket = s3.Bucket('foobucket')
+ bucket.create()
+ bucket.put_object(Key='myobject', Body='thebody')
+
+- cephadm.shell:
+ host.a:
+ - ceph nfs export create rgw --cluster-id foo --pseudo-path /foouser --user-id foouser
+
+- cephadm.wait_for_service:
+ service: nfs.foo
+- cephadm.wait_for_service:
+ service: ingress.nfs.foo
+
+## export and mount
+
+- vip.exec:
+ host.a:
+ - mkdir /mnt/foo
+ - sleep 5
+ - mount -t nfs {{VIP0}}:/foouser /mnt/foo
+ - test -d /mnt/foo/foobucket
+ - find /mnt/foo -ls
+ - grep thebody /mnt/foo/foobucket/myobject
+ - echo test > /mnt/foo/foobucket/newobject
+ - sync
+
+- python:
+ host.a: |
+ import boto3
+ import json
+ from io import BytesIO
+
+ with open('/tmp/user.json', 'rt') as f:
+ info = json.loads(f.read())
+ s3 = boto3.resource(
+ 's3',
+ aws_access_key_id=info['keys'][0]['access_key'],
+ aws_secret_access_key=info['keys'][0]['secret_key'],
+ endpoint_url='http://localhost:8800',
+ )
+ bucket = s3.Bucket('foobucket')
+ data = BytesIO()
+ bucket.download_fileobj(Fileobj=data, Key='newobject')
+ print(data.getvalue())
+ assert data.getvalue().decode() == 'test\n'
+
+- vip.exec:
+ host.a:
+ - umount /mnt/foo
+
+- cephadm.shell:
+ host.a:
+ - ceph nfs export rm foo /foouser
+ - ceph nfs cluster rm foo
diff --git a/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress.yaml b/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress.yaml
new file mode 100644
index 000000000..b4e843df2
--- /dev/null
+++ b/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress.yaml
@@ -0,0 +1,68 @@
+tasks:
+- vip:
+
+# make sure cephadm notices the new IP
+- cephadm.shell:
+ host.a:
+ - ceph orch device ls --refresh
+
+# stop kernel nfs server, if running
+- vip.exec:
+ all-hosts:
+ - systemctl stop nfs-server
+
+- cephadm.shell:
+ host.a:
+ - ceph fs volume create foofs
+
+# deploy nfs + ingress
+- cephadm.apply:
+ specs:
+ - service_type: nfs
+ service_id: foo
+ placement:
+ count: 2
+ spec:
+ port: 12049
+ - service_type: ingress
+ service_id: nfs.foo
+ spec:
+ backend_service: nfs.foo
+ frontend_port: 2049
+ monitor_port: 9002
+ virtual_ip: "{{VIP0}}/{{VIPPREFIXLEN}}"
+- cephadm.wait_for_service:
+ service: nfs.foo
+- cephadm.wait_for_service:
+ service: ingress.nfs.foo
+
+## export and mount
+
+- cephadm.shell:
+ host.a:
+ - ceph nfs export create cephfs --fsname foofs --cluster-id foo --pseudo-path /fake
+
+- vip.exec:
+ host.a:
+ - mkdir /mnt/foo
+ - sleep 5
+ - mount -t nfs {{VIP0}}:/fake /mnt/foo
+ - echo test > /mnt/foo/testfile
+ - sync
+
+# take each gateway down in turn and ensure things still work
+- cephadm.shell:
+ volumes:
+ - /mnt/foo:/mnt/foo
+ host.a:
+ - |
+ echo "Check with each haproxy down in turn..."
+ for haproxy in `ceph orch ps | grep ^haproxy.nfs.foo. | awk '{print $1}'`; do
+ ceph orch daemon stop $haproxy
+ while ! ceph orch ps | grep $haproxy | grep stopped; do sleep 1 ; done
+ cat /mnt/foo/testfile
+ echo $haproxy > /mnt/foo/testfile
+ sync
+ ceph orch daemon start $haproxy
+ while ! ceph orch ps | grep $haproxy | grep running; do sleep 1 ; done
+ done
diff --git a/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress2.yaml b/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress2.yaml
new file mode 100644
index 000000000..a47dd9d76
--- /dev/null
+++ b/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress2.yaml
@@ -0,0 +1,70 @@
+tasks:
+- vip:
+
+# make sure cephadm notices the new IP
+- cephadm.shell:
+ host.a:
+ - ceph orch device ls --refresh
+
+# stop kernel nfs server, if running
+- vip.exec:
+ all-hosts:
+ - systemctl stop nfs-server
+
+- cephadm.shell:
+ host.a:
+ - ceph fs volume create foofs
+ - ceph nfs cluster create foo --ingress --virtual-ip {{VIP0}}/{{VIPPREFIXLEN}} --port 2999
+ - ceph nfs export create cephfs --fsname foofs --cluster-id foo --pseudo-path /fake
+
+- cephadm.wait_for_service:
+ service: nfs.foo
+- cephadm.wait_for_service:
+ service: ingress.nfs.foo
+
+## export and mount
+
+- vip.exec:
+ host.a:
+ - mkdir /mnt/foo
+ - sleep 5
+ - mount -t nfs {{VIP0}}:/fake /mnt/foo -o port=2999
+ - echo test > /mnt/foo/testfile
+ - sync
+
+# take each gateway down in turn and ensure things still work
+- cephadm.shell:
+ volumes:
+ - /mnt/foo:/mnt/foo
+ host.a:
+ - |
+ echo "Check with each haproxy down in turn..."
+ for haproxy in `ceph orch ps | grep ^haproxy.nfs.foo. | awk '{print $1}'`; do
+ ceph orch daemon stop $haproxy
+ while ! ceph orch ps | grep $haproxy | grep stopped; do sleep 1 ; done
+ cat /mnt/foo/testfile
+ echo $haproxy > /mnt/foo/testfile
+ sync
+ ceph orch daemon start $haproxy
+ while ! ceph orch ps | grep $haproxy | grep running; do sleep 1 ; done
+ done
+
+# take each ganesha down in turn.
+# simulate "failure" by deleting the container
+- vip.exec:
+ all-hosts:
+ - |
+ echo "Check with $(hostname) ganesha(s) down..."
+ for c in `systemctl | grep ceph- | grep @nfs | awk '{print $1}'`; do
+ cid=`echo $c | sed 's/@/-/'`
+ id=`echo $c | cut -d @ -f 2 | sed 's/.service$//'`
+ fsid=`echo $c | cut -d @ -f 1 | cut -d - -f 2-`
+ echo "Removing daemon $id fsid $fsid..."
+ sudo $TESTDIR/cephadm rm-daemon --fsid $fsid --name $id
+
+ echo "Waking up cephadm..."
+ sudo $TESTDIR/cephadm shell -- ceph orch ps --refresh
+
+ while ! timeout 1 cat /mnt/foo/testfile ; do true ; done
+ echo "Mount is back!"
+ done
diff --git a/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs.yaml b/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs.yaml
new file mode 100644
index 000000000..194f4e9de
--- /dev/null
+++ b/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs.yaml
@@ -0,0 +1,13 @@
+tasks:
+
+# stop kernel nfs server, if running
+- vip.exec:
+ all-hosts:
+ - systemctl stop nfs-server
+
+- cephadm.apply:
+ specs:
+ - service_type: nfs
+ service_id: foo
+- cephadm.wait_for_service:
+ service: nfs.foo
diff --git a/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs2.yaml b/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs2.yaml
new file mode 100644
index 000000000..959c5aa77
--- /dev/null
+++ b/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs2.yaml
@@ -0,0 +1,12 @@
+tasks:
+
+# stop kernel nfs server, if running
+- vip.exec:
+ all-hosts:
+ - systemctl stop nfs-server
+
+- cephadm.shell:
+ host.a:
+ - ceph nfs cluster create foo
+- cephadm.wait_for_service:
+ service: nfs.foo
diff --git a/qa/suites/orch/cephadm/smoke-roleless/2-services/rgw-ingress.yaml b/qa/suites/orch/cephadm/smoke-roleless/2-services/rgw-ingress.yaml
new file mode 100644
index 000000000..710edab73
--- /dev/null
+++ b/qa/suites/orch/cephadm/smoke-roleless/2-services/rgw-ingress.yaml
@@ -0,0 +1,60 @@
+tasks:
+- vip:
+
+# make sure cephadm notices the new IP
+- cephadm.shell:
+ host.a:
+ - ceph orch device ls --refresh
+
+# deploy rgw + ingress
+- cephadm.apply:
+ specs:
+ - service_type: rgw
+ service_id: foo
+ placement:
+ count: 4
+ host_pattern: "*"
+ spec:
+ rgw_frontend_port: 8000
+ - service_type: ingress
+ service_id: rgw.foo
+ placement:
+ count: 2
+ spec:
+ backend_service: rgw.foo
+ frontend_port: 9000
+ monitor_port: 9001
+ virtual_ip: "{{VIP0}}/{{VIPPREFIXLEN}}"
+- cephadm.wait_for_service:
+ service: rgw.foo
+- cephadm.wait_for_service:
+ service: ingress.rgw.foo
+
+# take each component down in turn and ensure things still work
+- cephadm.shell:
+ host.a:
+ - |
+ echo "Check while healthy..."
+ curl http://{{VIP0}}:9000/
+
+ # stop each rgw in turn
+ echo "Check with each rgw stopped in turn..."
+ for rgw in `ceph orch ps | grep ^rgw.foo. | awk '{print $1}'`; do
+ ceph orch daemon stop $rgw
+ while ! ceph orch ps | grep $rgw | grep stopped; do sleep 1 ; done
+ while ! curl http://{{VIP0}}:9000/ ; do sleep 1 ; done
+ ceph orch daemon start $rgw
+ while ! ceph orch ps | grep $rgw | grep running; do sleep 1 ; done
+ done
+
+ # stop each haproxy in turn
+ echo "Check with each haproxy down in turn..."
+ for haproxy in `ceph orch ps | grep ^haproxy.rgw.foo. | awk '{print $1}'`; do
+ ceph orch daemon stop $haproxy
+ while ! ceph orch ps | grep $haproxy | grep stopped; do sleep 1 ; done
+ while ! curl http://{{VIP0}}:9000/ ; do sleep 1 ; done
+ ceph orch daemon start $haproxy
+ while ! ceph orch ps | grep $haproxy | grep running; do sleep 1 ; done
+ done
+
+ while ! curl http://{{VIP0}}:9000/ ; do sleep 1 ; done
diff --git a/qa/suites/orch/cephadm/smoke-roleless/2-services/rgw.yaml b/qa/suites/orch/cephadm/smoke-roleless/2-services/rgw.yaml
new file mode 100644
index 000000000..cb2c6f4b6
--- /dev/null
+++ b/qa/suites/orch/cephadm/smoke-roleless/2-services/rgw.yaml
@@ -0,0 +1,12 @@
+tasks:
+- cephadm.apply:
+ specs:
+ - service_type: rgw
+ service_id: foo
+ placement:
+ count_per_host: 4
+ host_pattern: "*"
+ spec:
+ rgw_frontend_port: 8000
+- cephadm.wait_for_service:
+ service: rgw.foo