summaryrefslogtreecommitdiffstats
path: root/src/ceph-volume/ceph_volume/tests/functional/batch/tox.ini
blob: 783142737fdf1870dbafe470bc6e19c91689335a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
[tox]
envlist = centos-bluestore-{single_type,single_type_dmcrypt,mixed_type,mixed_type_dmcrypt,mixed_type_explicit,mixed_type_dmcrypt_explicit}
skipsdist = True

[testenv]
deps = mock
allowlist_externals =
    vagrant
    bash
    git
    cp
    sleep
passenv=*
setenv=
  ANSIBLE_CONFIG = {envdir}/tmp/ceph-ansible/ansible.cfg
  ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config -o ControlMaster=auto -o ControlPersist=600s -o PreferredAuthentications=publickey
  ANSIBLE_STDOUT_CALLBACK = debug
  VAGRANT_CWD = {changedir}
  CEPH_VOLUME_DEBUG = 1
  DEBIAN_FRONTEND=noninteractive
  ANSIBLE_COLLECTIONS_PATH = {envdir}/ansible_collections
  CEPH_ANSIBLE_VAGRANT_BOX = centos/stream8
changedir=
  centos-bluestore-single_type: {toxinidir}/centos/bluestore/single-type
  centos-bluestore-single_type_dmcrypt: {toxinidir}/centos/bluestore/single-type-dmcrypt
  centos-bluestore-mixed_type: {toxinidir}/centos/bluestore/mixed-type
  centos-bluestore-mixed_type_dmcrypt: {toxinidir}/centos/bluestore/mixed-type-dmcrypt
  centos-bluestore-mixed_type_explicit: {toxinidir}/centos/bluestore/mixed-type-explicit
  centos-bluestore-mixed_type_dmcrypt_explicit: {toxinidir}/centos/bluestore/mixed-type-dmcrypt-explicit
commands=
  git clone -b {env:CEPH_ANSIBLE_BRANCH:master} --single-branch {env:CEPH_ANSIBLE_CLONE:"https://github.com/ceph/ceph-ansible.git"} {envdir}/tmp/ceph-ansible
  python -m pip install -r {envdir}/tmp/ceph-ansible/tests/requirements.txt
  ansible-galaxy collection install -r {envdir}/tmp/ceph-ansible/requirements.yml -v -p {envdir}/ansible_collections

  # bash {toxinidir}/../scripts/vagrant_up.sh {env:VAGRANT_UP_FLAGS:""} {posargs:--provider=virtualbox}
  bash {toxinidir}/../scripts/vagrant_up.sh {posargs:--provider=virtualbox}
  bash {toxinidir}/../scripts/generate_ssh_config.sh {changedir}

  cp {toxinidir}/../playbooks/deploy.yml {envdir}/tmp/ceph-ansible

  ansible -vv -i {changedir}/hosts all -b -m package -a 'name=rpm state=latest'

  # individual scenario setup
  ansible-playbook -vv -i {changedir}/hosts {changedir}/setup.yml

  # use ceph-ansible to deploy a ceph cluster on the vms
  ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/deploy.yml --extra-vars "fetch_directory={changedir}/fetch ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} toxinidir={toxinidir}"

  # test cluster state using testinfra
  py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests

  # reboot all vms - attempt
  bash {toxinidir}/../scripts/vagrant_reload.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox}

  # after a reboot, osds may take about 20 seconds to come back up
  sleep 30

  # retest to ensure cluster came back up correctly after rebooting
  py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests

  # destroy an OSD, zap it's device and recreate it using it's ID
  ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml

  # retest to ensure cluster came back up correctly
  py.test --reruns 5 --reruns-delay 10 -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests

  # test zap OSDs by ID
  ansible-playbook -vv -i {changedir}/hosts {changedir}/test_zap.yml

  vagrant destroy {env:VAGRANT_DESTROY_FLAGS:"--force"}