diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 18:24:20 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 18:24:20 +0000 |
commit | 483eb2f56657e8e7f419ab1a4fab8dce9ade8609 (patch) | |
tree | e5d88d25d870d5dedacb6bbdbe2a966086a0a5cf /src/ceph-volume/ceph_volume/tests/functional/lvm | |
parent | Initial commit. (diff) | |
download | ceph-upstream.tar.xz ceph-upstream.zip |
Adding upstream version 14.2.21.upstream/14.2.21upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/ceph-volume/ceph_volume/tests/functional/lvm')
52 files changed, 1623 insertions, 0 deletions
diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/Vagrantfile new file mode 120000 index 00000000..16076e42 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/group_vars/all new file mode 100644 index 00000000..01ae1dae --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/group_vars/all @@ -0,0 +1,29 @@ +--- + +ceph_dev: True +cluster: test +public_network: "192.168.3.0/24" +cluster_network: "192.168.4.0/24" +monitor_interface: eth1 +journal_size: 100 +osd_objectstore: "bluestore" +osd_scenario: lvm +ceph_origin: 'repository' +ceph_repository: 'dev' +copy_admin_key: false +lvm_volumes: + - data: data-lv1 + data_vg: test_group + crush_device_class: test + - data: data-lv2 + data_vg: test_group + db: journal1 + db_vg: journals + - data: /dev/sdd1 +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_pg_num: 8 + osd_pool_default_size: 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/hosts b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/hosts new file mode 100644 index 00000000..e1c1de6f --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/hosts @@ -0,0 +1,8 @@ +[mons] +mon0 + +[osds] +osd0 + +[mgrs] +mon0 diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/setup.yml new file mode 120000 index 00000000..1c1a3ce8 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/setup.yml @@ -0,0 +1 @@ +../../../playbooks/setup_partitions.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/test.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/test.yml new file mode 120000 index 00000000..165d9da2 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/test.yml @@ -0,0 +1 @@ +../../../playbooks/test_bluestore.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/vagrant_variables.yml new file mode 100644 index 00000000..7d1a4449 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/create/vagrant_variables.yml @@ -0,0 +1,56 @@ +--- + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.3 +cluster_subnet: 192.168.4 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/7 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/Vagrantfile new file mode 120000 index 00000000..16076e42 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/group_vars/all new file mode 100644 index 00000000..9d4f50de --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/group_vars/all @@ -0,0 +1,30 @@ +--- + +dmcrypt: True +ceph_dev: True +cluster: test +public_network: "192.168.3.0/24" +cluster_network: "192.168.4.0/24" +monitor_interface: eth1 +journal_size: 100 +osd_objectstore: "bluestore" +osd_scenario: lvm +ceph_origin: 'repository' +ceph_repository: 'dev' +copy_admin_key: false +lvm_volumes: + - data: data-lv1 + data_vg: test_group + crush_device_class: test + - data: data-lv2 + data_vg: test_group + db: journal1 + db_vg: journals + - data: /dev/sdd1 +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_pg_num: 8 + osd_pool_default_size: 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/hosts b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/hosts new file mode 100644 index 00000000..e1c1de6f --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/hosts @@ -0,0 +1,8 @@ +[mons] +mon0 + +[osds] +osd0 + +[mgrs] +mon0 diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/setup.yml new file mode 120000 index 00000000..1c1a3ce8 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/setup.yml @@ -0,0 +1 @@ +../../../playbooks/setup_partitions.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/test.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/test.yml new file mode 100644 index 00000000..bbd5b45d --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/test.yml @@ -0,0 +1,104 @@ +- hosts: osds + become: yes + tasks: + + - name: stop ceph-osd@2 daemon + service: + name: ceph-osd@2 + state: stopped + + - name: stop ceph-osd@0 daemon + service: + name: ceph-osd@0 + state: stopped + +- hosts: mons + become: yes + tasks: + + - name: destroy osd.2 + command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it" + + - name: destroy osd.0 + command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it" + +- hosts: osds + become: yes + tasks: + + # osd.2 device + - name: zap /dev/sdd1 + command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd1 --destroy" + environment: + CEPH_VOLUME_DEBUG: 1 + + # partitions have been completely removed, so re-create them again + - name: re-create partition /dev/sdd for lvm data usage + parted: + device: /dev/sdd + number: 1 + part_start: 0% + part_end: 50% + unit: '%' + label: gpt + state: present + + - name: redeploy osd.2 using /dev/sdd1 + command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data /dev/sdd1 --osd-id 2" + environment: + CEPH_VOLUME_DEBUG: 1 + + # osd.0 lv + - name: zap test_group/data-lv1 + command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: redeploy osd.0 using test_group/data-lv1 + command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data test_group/data-lv1 --osd-id 0" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: stop ceph-osd@0 daemon + service: + name: ceph-osd@0 + state: stopped + + +- hosts: mons + become: yes + tasks: + + - name: destroy osd.0 + command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it" + + +- hosts: osds + become: yes + tasks: + + + - name: zap test_group/data-lv1 + command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: prepare osd.0 using test_group/data-lv1 + command: "ceph-volume --cluster {{ cluster }} lvm prepare --bluestore --data test_group/data-lv1 --osd-id 0" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: activate all to start the previously prepared osd.0 + command: "ceph-volume lvm activate --all" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: node inventory + command: "ceph-volume inventory" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: list all OSDs + command: "ceph-volume lvm list" + environment: + CEPH_VOLUME_DEBUG: 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/vagrant_variables.yml new file mode 100644 index 00000000..7d1a4449 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/bluestore/dmcrypt/vagrant_variables.yml @@ -0,0 +1,56 @@ +--- + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.3 +cluster_subnet: 192.168.4 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/7 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/Vagrantfile new file mode 120000 index 00000000..16076e42 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/group_vars/all new file mode 100644 index 00000000..5af1b7ac --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/group_vars/all @@ -0,0 +1,32 @@ +--- + +ceph_dev: True +cluster: test +public_network: "192.168.3.0/24" +cluster_network: "192.168.4.0/24" +monitor_interface: eth1 +journal_size: 100 +osd_objectstore: "filestore" +osd_scenario: lvm +ceph_origin: 'repository' +ceph_repository: 'dev' +copy_admin_key: false +# test-volume is created by tests/functional/lvm_setup.yml from /dev/sda +lvm_volumes: + - data: data-lv1 + journal: /dev/sdc1 + data_vg: test_group + crush_device_class: test + - data: data-lv2 + journal: journal1 + data_vg: test_group + journal_vg: journals + - data: /dev/sdd1 + journal: /dev/sdd2 +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_pg_num: 8 + osd_pool_default_size: 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/hosts b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/hosts new file mode 100644 index 00000000..e1c1de6f --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/hosts @@ -0,0 +1,8 @@ +[mons] +mon0 + +[osds] +osd0 + +[mgrs] +mon0 diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/setup.yml new file mode 120000 index 00000000..1c1a3ce8 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/setup.yml @@ -0,0 +1 @@ +../../../playbooks/setup_partitions.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/test.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/test.yml new file mode 120000 index 00000000..1a8c37c1 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/test.yml @@ -0,0 +1 @@ +../../../playbooks/test_filestore.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/vagrant_variables.yml new file mode 100644 index 00000000..7d1a4449 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/create/vagrant_variables.yml @@ -0,0 +1,56 @@ +--- + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.3 +cluster_subnet: 192.168.4 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/7 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/Vagrantfile new file mode 120000 index 00000000..16076e42 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/group_vars/all new file mode 100644 index 00000000..7544678b --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/group_vars/all @@ -0,0 +1,33 @@ +--- + +dmcrypt: True +ceph_dev: True +cluster: test +public_network: "192.168.3.0/24" +cluster_network: "192.168.4.0/24" +monitor_interface: eth1 +journal_size: 100 +osd_objectstore: "filestore" +osd_scenario: lvm +ceph_origin: 'repository' +ceph_repository: 'dev' +copy_admin_key: false +# test-volume is created by tests/functional/lvm_setup.yml from /dev/sda +lvm_volumes: + - data: data-lv1 + journal: /dev/sdc1 + data_vg: test_group + crush_device_class: test + - data: data-lv2 + journal: journal1 + data_vg: test_group + journal_vg: journals + - data: /dev/sdd1 + journal: /dev/sdd2 +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_pg_num: 8 + osd_pool_default_size: 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/hosts b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/hosts new file mode 100644 index 00000000..e1c1de6f --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/hosts @@ -0,0 +1,8 @@ +[mons] +mon0 + +[osds] +osd0 + +[mgrs] +mon0 diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/setup.yml new file mode 120000 index 00000000..1c1a3ce8 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/setup.yml @@ -0,0 +1 @@ +../../../playbooks/setup_partitions.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/test.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/test.yml new file mode 100644 index 00000000..91c9a1b8 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/test.yml @@ -0,0 +1,108 @@ + +- hosts: osds + become: yes + tasks: + + - name: stop ceph-osd@2 daemon + service: + name: ceph-osd@2 + state: stopped + + - name: stop ceph-osd@0 daemon + service: + name: ceph-osd@0 + state: stopped + + +- hosts: mons + become: yes + tasks: + + - name: destroy osd.2 + command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it" + + - name: destroy osd.0 + command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it" + + +- hosts: osds + become: yes + tasks: + + # osd.2 device + - name: zap /dev/sdd1 + command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd1 --destroy" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: zap /dev/sdd2 + command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd2 --destroy" + environment: + CEPH_VOLUME_DEBUG: 1 + + # partitions have been completely removed, so re-create them again + - name: re-create partition /dev/sdd for lvm data usage + parted: + device: /dev/sdd + number: 1 + part_start: 0% + part_end: 50% + unit: '%' + label: gpt + state: present + + - name: re-create partition /dev/sdd lvm journals + parted: + device: /dev/sdd + number: 2 + part_start: 50% + part_end: 100% + unit: '%' + state: present + label: gpt + + - name: redeploy osd.2 using /dev/sdd1 + command: "ceph-volume --cluster {{ cluster }} lvm create --filestore --data /dev/sdd1 --journal /dev/sdd2 --osd-id 2" + environment: + CEPH_VOLUME_DEBUG: 1 + + # osd.0 lv + - name: zap test_group/data-lv1 + command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: zap /dev/sdc1 + command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdc1 --destroy" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: re-create partition /dev/sdc1 + parted: + device: /dev/sdc + number: 1 + part_start: 0% + part_end: 50% + unit: '%' + state: present + label: gpt + + - name: prepare osd.0 again using test_group/data-lv1 + command: "ceph-volume --cluster {{ cluster }} lvm prepare --filestore --data test_group/data-lv1 --journal /dev/sdc1 --osd-id 0" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: activate all to start the previously prepared osd.0 + command: "ceph-volume lvm activate --filestore --all" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: node inventory + command: "ceph-volume inventory" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: list all OSDs + command: "ceph-volume lvm list" + environment: + CEPH_VOLUME_DEBUG: 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/vagrant_variables.yml new file mode 100644 index 00000000..7d1a4449 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/centos7/filestore/dmcrypt/vagrant_variables.yml @@ -0,0 +1,56 @@ +--- + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.3 +cluster_subnet: 192.168.4 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: centos/7 +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/setup_partitions.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/setup_partitions.yml new file mode 100644 index 00000000..37a48949 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/setup_partitions.yml @@ -0,0 +1,27 @@ +--- + +- hosts: osds + gather_facts: false + become: yes + tasks: + + - name: partition /dev/sdd for lvm data usage + parted: + device: /dev/sdd + number: 1 + part_start: 0% + part_end: 50% + unit: '%' + label: gpt + state: present + + - name: partition /dev/sdd lvm journals + parted: + device: /dev/sdd + number: 2 + part_start: 50% + part_end: 100% + unit: '%' + state: present + label: gpt + diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_bluestore.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_bluestore.yml new file mode 100644 index 00000000..1e9b8c3e --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_bluestore.yml @@ -0,0 +1,148 @@ + +- hosts: osds + become: yes + tasks: + + - name: stop ceph-osd@2 daemon + service: + name: ceph-osd@2 + state: stopped + + - name: stop ceph-osd@0 daemon + service: + name: ceph-osd@0 + state: stopped + + +- hosts: mons + become: yes + tasks: + + - name: destroy osd.2 + command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it" + + - name: destroy osd.0 + command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it" + + +- hosts: osds + become: yes + tasks: + + # osd.2 device + - name: zap /dev/sdd1 + command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd1 --destroy" + environment: + CEPH_VOLUME_DEBUG: 1 + + # partitions have been completely removed, so re-create them again + - name: re-create partition /dev/sdd for lvm data usage + parted: + device: /dev/sdd + number: 1 + part_start: 0% + part_end: 50% + unit: '%' + label: gpt + state: present + + - name: redeploy osd.2 using /dev/sdd1 + command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data /dev/sdd1 --osd-id 2" + environment: + CEPH_VOLUME_DEBUG: 1 + + # osd.0 device (zap without --destroy that removes the LV) + - name: zap test_group/data-lv1 + command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: prepare osd.0 again using test_group/data-lv1 + command: "ceph-volume --cluster {{ cluster }} lvm prepare --bluestore --data test_group/data-lv1 --osd-id 0" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: find all OSD directories + find: + paths: /var/lib/ceph/osd + recurse: no + file_type: directory + register: osd_directories + + - name: find all OSD symlinks + find: + paths: /var/lib/ceph/osd + recurse: yes + depth: 2 + file_type: link + register: osd_symlinks + + # set the OSD dir and the block/block.db links to root:root permissions, to + # ensure that the OSD will be able to activate regardless + - file: + path: "{{ item.path }}" + owner: root + group: root + with_items: + - "{{ osd_directories.files }}" + + - file: + path: "{{ item.path }}" + owner: root + group: root + with_items: + - "{{ osd_symlinks.files }}" + + - name: activate all to start the previously prepared osd.0 + command: "ceph-volume lvm activate --all" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: node inventory + command: "ceph-volume inventory" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: list all OSDs + command: "ceph-volume lvm list" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: create temporary directory + tempfile: + state: directory + suffix: sparse + register: tmpdir + + - name: create a 5GB sparse file + command: fallocate -l 5G {{ tmpdir.path }}/sparse.file + + - name: find an empty loop device + command: losetup -f + register: losetup_list + + - name: setup loop device with sparse file + command: losetup {{ losetup_list.stdout }} {{ tmpdir.path }}/sparse.file + + - name: create volume group + command: vgcreate test_zap {{ losetup_list.stdout }} + failed_when: false + + - name: create logical volume 1 + command: lvcreate --yes -l 50%FREE -n data-lv1 test_zap + failed_when: false + + - name: create logical volume 2 + command: lvcreate --yes -l 50%FREE -n data-lv2 test_zap + failed_when: false + + # zapping the first lv shouldn't remove the vg, allowing the second zap to succeed + - name: zap test_zap/data-lv1 + command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv1" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: zap test_zap/data-lv2 + command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv2" + environment: + CEPH_VOLUME_DEBUG: 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_filestore.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_filestore.yml new file mode 100644 index 00000000..4e43839e --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/playbooks/test_filestore.yml @@ -0,0 +1,169 @@ + +- hosts: osds + become: yes + tasks: + + - name: stop ceph-osd@2 daemon + service: + name: ceph-osd@2 + state: stopped + + - name: stop ceph-osd@0 daemon + service: + name: ceph-osd@0 + state: stopped + + +- hosts: mons + become: yes + tasks: + + - name: destroy osd.2 + command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it" + + - name: destroy osd.0 + command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it" + + +- hosts: osds + become: yes + tasks: + + # osd.2 device + - name: zap /dev/sdd1 + command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd1 --destroy" + environment: + CEPH_VOLUME_DEBUG: 1 + + # osd.2 journal + - name: zap /dev/sdd2 + command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd2 --destroy" + environment: + CEPH_VOLUME_DEBUG: 1 + + # partitions have been completely removed, so re-create them again + - name: re-create partition /dev/sdd for lvm data usage + parted: + device: /dev/sdd + number: 1 + part_start: 0% + part_end: 50% + unit: '%' + label: gpt + state: present + + - name: re-create partition /dev/sdd lvm journals + parted: + device: /dev/sdd + number: 2 + part_start: 50% + part_end: 100% + unit: '%' + state: present + label: gpt + + - name: redeploy osd.2 using /dev/sdd1 + command: "ceph-volume --cluster {{ cluster }} lvm create --filestore --data /dev/sdd1 --journal /dev/sdd2 --osd-id 2" + environment: + CEPH_VOLUME_DEBUG: 1 + + # osd.0 data lv + # note: we don't use --destroy here to test this works without that flag. + # --destroy is used in the bluestore tests + - name: zap test_group/data-lv1 + command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1" + environment: + CEPH_VOLUME_DEBUG: 1 + + # osd.0 journal device (zap without --destroy that removes the LV) + - name: zap /dev/sdc1 + command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdc1" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: prepare osd.0 again using test_group/data-lv1 + command: "ceph-volume --cluster {{ cluster }} lvm prepare --filestore --data test_group/data-lv1 --journal /dev/sdc1 --osd-id 0" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: find all OSD paths + find: + paths: /var/lib/ceph/osd + recurse: no + file_type: directory + register: osd_paths + + # set all OSD paths to root:rootto ensure that the OSD will be able to + # activate regardless + - name: mangle permissions to root + file: + path: "{{ item.path }}" + owner: root + group: root + recurse: yes + with_items: + - "{{ osd_paths.files }}" + + - name: stop ceph-osd@2 daemon + service: + name: ceph-osd@2 + state: stopped + + - name: stop ceph-osd@1 daemon + service: + name: ceph-osd@1 + state: stopped + + - name: activate all to start the previously prepared osd.0 + command: "ceph-volume lvm activate --filestore --all" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: node inventory + command: "ceph-volume inventory" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: list all OSDs + command: "ceph-volume lvm list" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: create temporary directory + tempfile: + state: directory + suffix: sparse + register: tmpdir + + - name: create a 5GB sparse file + command: fallocate -l 5G {{ tmpdir.path }}/sparse.file + + - name: find an empty loop device + command: losetup -f + register: losetup_list + + - name: setup loop device with sparse file + command: losetup {{ losetup_list.stdout }} {{ tmpdir.path }}/sparse.file + + - name: create volume group + command: vgcreate test_zap {{ losetup_list.stdout }} + failed_when: false + + - name: create logical volume 1 + command: lvcreate --yes -l 50%FREE -n data-lv1 test_zap + failed_when: false + + - name: create logical volume 2 + command: lvcreate --yes -l 50%FREE -n data-lv2 test_zap + failed_when: false + + # zapping the first lv shouldn't remove the vg, allowing the second zap to succeed + - name: zap test_zap/data-lv1 + command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv1" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: zap test_zap/data-lv2 + command: "ceph-volume --cluster {{ cluster }} lvm zap --destroy test_zap/data-lv2" + environment: + CEPH_VOLUME_DEBUG: 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini b/src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini new file mode 100644 index 00000000..0b38c85b --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/tox.ini @@ -0,0 +1,79 @@ +[tox] +envlist = {centos7,xenial}-{filestore,bluestore}-{create,prepare_activate,dmcrypt} +skipsdist = True + +[testenv] +deps = mock +whitelist_externals = + vagrant + bash + git + cp + sleep +passenv=* +setenv= + ANSIBLE_SSH_ARGS = -F {changedir}/vagrant_ssh_config + ANSIBLE_ACTION_PLUGINS = {envdir}/tmp/ceph-ansible/plugins/actions + ANSIBLE_STDOUT_CALLBACK = debug + ANSIBLE_RETRY_FILES_ENABLED = False + ANSIBLE_SSH_RETRIES = 5 + VAGRANT_CWD = {changedir} + CEPH_VOLUME_DEBUG = 1 + DEBIAN_FRONTEND=noninteractive +changedir= + # plain/unencrypted + centos7-filestore-create: {toxinidir}/centos7/filestore/create + centos7-bluestore-create: {toxinidir}/centos7/bluestore/create + xenial-filestore-create: {toxinidir}/xenial/filestore/create + xenial-bluestore-create: {toxinidir}/xenial/bluestore/create + # dmcrypt + centos7-filestore-dmcrypt: {toxinidir}/centos7/filestore/dmcrypt + centos7-bluestore-dmcrypt: {toxinidir}/centos7/bluestore/dmcrypt + xenial-filestore-dmcrypt: {toxinidir}/xenial/filestore/dmcrypt + xenial-bluestore-dmcrypt: {toxinidir}/xenial/bluestore/dmcrypt + # TODO: these are placeholders for now, eventually we want to + # test the prepare/activate workflow of ceph-volume as well + xenial-filestore-prepare_activate: {toxinidir}/xenial/filestore/prepare_activate + xenial-bluestore-prepare_activate: {toxinidir}/xenial/bluestore/prepare_activate + centos7-filestore-prepare_activate: {toxinidir}/xenial/filestore/prepare_activate + centos7-bluestore-prepare_activate: {toxinidir}/xenial/bluestore/prepare_activate +commands= + git clone -b {env:CEPH_ANSIBLE_BRANCH:master} --single-branch https://github.com/ceph/ceph-ansible.git {envdir}/tmp/ceph-ansible + pip install -r {envdir}/tmp/ceph-ansible/tests/requirements.txt + + bash {toxinidir}/../scripts/vagrant_up.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox} + bash {toxinidir}/../scripts/generate_ssh_config.sh {changedir} + + # create logical volumes to test with on the vms + ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/lvm_setup.yml + + # ad-hoc/local test setup for lvm + ansible-playbook -vv -i {changedir}/hosts {changedir}/setup.yml + + cp {toxinidir}/../playbooks/deploy.yml {envdir}/tmp/ceph-ansible + + # use ceph-ansible to deploy a ceph cluster on the vms + ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/deploy.yml --extra-vars "fetch_directory={changedir}/fetch ceph_dev_branch={env:CEPH_DEV_BRANCH:master} ceph_dev_sha1={env:CEPH_DEV_SHA1:latest} toxinidir={toxinidir}" + + # prepare nodes for testing with testinfra + ansible-playbook -vv -i {changedir}/hosts {envdir}/tmp/ceph-ansible/tests/functional/setup.yml + + # test cluster state using testinfra + py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests + + # reboot all vms - attempt + bash {toxinidir}/../scripts/vagrant_reload.sh {env:VAGRANT_UP_FLAGS:"--no-provision"} {posargs:--provider=virtualbox} + + # after a reboot, osds may take about 20 seconds to come back up + sleep 30 + + # retest to ensure cluster came back up correctly after rebooting + py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests + + # destroy an OSD, zap it's device and recreate it using it's ID + ansible-playbook -vv -i {changedir}/hosts {changedir}/test.yml + + # retest to ensure cluster came back up correctly + py.test -n 4 --sudo -v --connection=ansible --ssh-config={changedir}/vagrant_ssh_config --ansible-inventory={changedir}/hosts {toxinidir}/../tests + + vagrant destroy {env:VAGRANT_DESTROY_FLAGS:"--force"} diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/Vagrantfile new file mode 120000 index 00000000..16076e42 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/group_vars/all new file mode 100644 index 00000000..01ae1dae --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/group_vars/all @@ -0,0 +1,29 @@ +--- + +ceph_dev: True +cluster: test +public_network: "192.168.3.0/24" +cluster_network: "192.168.4.0/24" +monitor_interface: eth1 +journal_size: 100 +osd_objectstore: "bluestore" +osd_scenario: lvm +ceph_origin: 'repository' +ceph_repository: 'dev' +copy_admin_key: false +lvm_volumes: + - data: data-lv1 + data_vg: test_group + crush_device_class: test + - data: data-lv2 + data_vg: test_group + db: journal1 + db_vg: journals + - data: /dev/sdd1 +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_pg_num: 8 + osd_pool_default_size: 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/hosts b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/hosts new file mode 100644 index 00000000..e1c1de6f --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/hosts @@ -0,0 +1,8 @@ +[mons] +mon0 + +[osds] +osd0 + +[mgrs] +mon0 diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/setup.yml new file mode 120000 index 00000000..1c1a3ce8 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/setup.yml @@ -0,0 +1 @@ +../../../playbooks/setup_partitions.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/test.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/test.yml new file mode 120000 index 00000000..165d9da2 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/test.yml @@ -0,0 +1 @@ +../../../playbooks/test_bluestore.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/vagrant_variables.yml new file mode 100644 index 00000000..7252344d --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/create/vagrant_variables.yml @@ -0,0 +1,56 @@ +--- + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.3 +cluster_subnet: 192.168.4 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: ceph/ubuntu-xenial +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/Vagrantfile new file mode 120000 index 00000000..16076e42 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/group_vars/all new file mode 100644 index 00000000..9d4f50de --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/group_vars/all @@ -0,0 +1,30 @@ +--- + +dmcrypt: True +ceph_dev: True +cluster: test +public_network: "192.168.3.0/24" +cluster_network: "192.168.4.0/24" +monitor_interface: eth1 +journal_size: 100 +osd_objectstore: "bluestore" +osd_scenario: lvm +ceph_origin: 'repository' +ceph_repository: 'dev' +copy_admin_key: false +lvm_volumes: + - data: data-lv1 + data_vg: test_group + crush_device_class: test + - data: data-lv2 + data_vg: test_group + db: journal1 + db_vg: journals + - data: /dev/sdd1 +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_pg_num: 8 + osd_pool_default_size: 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/hosts b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/hosts new file mode 100644 index 00000000..e1c1de6f --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/hosts @@ -0,0 +1,8 @@ +[mons] +mon0 + +[osds] +osd0 + +[mgrs] +mon0 diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/setup.yml new file mode 120000 index 00000000..1c1a3ce8 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/setup.yml @@ -0,0 +1 @@ +../../../playbooks/setup_partitions.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/test.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/test.yml new file mode 100644 index 00000000..27290d93 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/test.yml @@ -0,0 +1,104 @@ +- hosts: osds + become: yes + tasks: + + - name: stop ceph-osd@2 daemon + service: + name: ceph-osd@2 + state: stopped + + - name: stop ceph-osd@0 daemon + service: + name: ceph-osd@0 + state: stopped + +- hosts: mons + become: yes + tasks: + + - name: destroy osd.2 + command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it" + + - name: destroy osd.0 + command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it" + + +- hosts: osds + become: yes + tasks: + + # osd.2 device + - name: zap /dev/sdd1 + command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd1 --destroy" + environment: + CEPH_VOLUME_DEBUG: 1 + + # partitions have been completely removed, so re-create them again + - name: re-create partition /dev/sdd for lvm data usage + parted: + device: /dev/sdd + number: 1 + part_start: 0% + part_end: 50% + unit: '%' + label: gpt + state: present + + - name: redeploy osd.2 using /dev/sdd1 + command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data /dev/sdd1 --osd-id 2" + environment: + CEPH_VOLUME_DEBUG: 1 + + # osd.0 lv + - name: zap test_group/data-lv1 + command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: redeploy osd.0 using test_group/data-lv1 + command: "ceph-volume --cluster {{ cluster }} lvm create --bluestore --data test_group/data-lv1 --osd-id 0" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: stop ceph-osd@0 daemon + service: + name: ceph-osd@0 + state: stopped + + +- hosts: mons + become: yes + tasks: + + - name: destroy osd.0 + command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it" + + +- hosts: osds + become: yes + tasks: + + - name: zap test_group/data-lv1 + command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: prepare osd.0 using test_group/data-lv1 + command: "ceph-volume --cluster {{ cluster }} lvm prepare --bluestore --data test_group/data-lv1 --osd-id 0" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: activate all to start the previously prepared osd.0 + command: "ceph-volume lvm activate --all" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: node inventory + command: "ceph-volume inventory" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: list all OSDs + command: "ceph-volume lvm list" + environment: + CEPH_VOLUME_DEBUG: 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/vagrant_variables.yml new file mode 100644 index 00000000..7252344d --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/bluestore/dmcrypt/vagrant_variables.yml @@ -0,0 +1,56 @@ +--- + +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.3 +cluster_subnet: 192.168.4 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: ceph/ubuntu-xenial +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/Vagrantfile new file mode 120000 index 00000000..16076e42 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/group_vars/all new file mode 100644 index 00000000..5af1b7ac --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/group_vars/all @@ -0,0 +1,32 @@ +--- + +ceph_dev: True +cluster: test +public_network: "192.168.3.0/24" +cluster_network: "192.168.4.0/24" +monitor_interface: eth1 +journal_size: 100 +osd_objectstore: "filestore" +osd_scenario: lvm +ceph_origin: 'repository' +ceph_repository: 'dev' +copy_admin_key: false +# test-volume is created by tests/functional/lvm_setup.yml from /dev/sda +lvm_volumes: + - data: data-lv1 + journal: /dev/sdc1 + data_vg: test_group + crush_device_class: test + - data: data-lv2 + journal: journal1 + data_vg: test_group + journal_vg: journals + - data: /dev/sdd1 + journal: /dev/sdd2 +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_pg_num: 8 + osd_pool_default_size: 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/hosts b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/hosts new file mode 100644 index 00000000..e1c1de6f --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/hosts @@ -0,0 +1,8 @@ +[mons] +mon0 + +[osds] +osd0 + +[mgrs] +mon0 diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/setup.yml new file mode 120000 index 00000000..1c1a3ce8 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/setup.yml @@ -0,0 +1 @@ +../../../playbooks/setup_partitions.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/test.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/test.yml new file mode 120000 index 00000000..1a8c37c1 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/test.yml @@ -0,0 +1 @@ +../../../playbooks/test_filestore.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/vagrant_variables.yml new file mode 100644 index 00000000..82b330ef --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/create/vagrant_variables.yml @@ -0,0 +1,54 @@ +--- +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.3 +cluster_subnet: 192.168.4 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: ceph/ubuntu-xenial +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/Vagrantfile b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/Vagrantfile new file mode 120000 index 00000000..16076e42 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/Vagrantfile @@ -0,0 +1 @@ +../../../../Vagrantfile
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/group_vars/all b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/group_vars/all new file mode 100644 index 00000000..7544678b --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/group_vars/all @@ -0,0 +1,33 @@ +--- + +dmcrypt: True +ceph_dev: True +cluster: test +public_network: "192.168.3.0/24" +cluster_network: "192.168.4.0/24" +monitor_interface: eth1 +journal_size: 100 +osd_objectstore: "filestore" +osd_scenario: lvm +ceph_origin: 'repository' +ceph_repository: 'dev' +copy_admin_key: false +# test-volume is created by tests/functional/lvm_setup.yml from /dev/sda +lvm_volumes: + - data: data-lv1 + journal: /dev/sdc1 + data_vg: test_group + crush_device_class: test + - data: data-lv2 + journal: journal1 + data_vg: test_group + journal_vg: journals + - data: /dev/sdd1 + journal: /dev/sdd2 +os_tuning_params: + - { name: kernel.pid_max, value: 4194303 } + - { name: fs.file-max, value: 26234859 } +ceph_conf_overrides: + global: + osd_pool_default_pg_num: 8 + osd_pool_default_size: 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/hosts b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/hosts new file mode 100644 index 00000000..e1c1de6f --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/hosts @@ -0,0 +1,8 @@ +[mons] +mon0 + +[osds] +osd0 + +[mgrs] +mon0 diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/setup.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/setup.yml new file mode 120000 index 00000000..1c1a3ce8 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/setup.yml @@ -0,0 +1 @@ +../../../playbooks/setup_partitions.yml
\ No newline at end of file diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/test.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/test.yml new file mode 100644 index 00000000..91c9a1b8 --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/test.yml @@ -0,0 +1,108 @@ + +- hosts: osds + become: yes + tasks: + + - name: stop ceph-osd@2 daemon + service: + name: ceph-osd@2 + state: stopped + + - name: stop ceph-osd@0 daemon + service: + name: ceph-osd@0 + state: stopped + + +- hosts: mons + become: yes + tasks: + + - name: destroy osd.2 + command: "ceph --cluster {{ cluster }} osd destroy osd.2 --yes-i-really-mean-it" + + - name: destroy osd.0 + command: "ceph --cluster {{ cluster }} osd destroy osd.0 --yes-i-really-mean-it" + + +- hosts: osds + become: yes + tasks: + + # osd.2 device + - name: zap /dev/sdd1 + command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd1 --destroy" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: zap /dev/sdd2 + command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdd2 --destroy" + environment: + CEPH_VOLUME_DEBUG: 1 + + # partitions have been completely removed, so re-create them again + - name: re-create partition /dev/sdd for lvm data usage + parted: + device: /dev/sdd + number: 1 + part_start: 0% + part_end: 50% + unit: '%' + label: gpt + state: present + + - name: re-create partition /dev/sdd lvm journals + parted: + device: /dev/sdd + number: 2 + part_start: 50% + part_end: 100% + unit: '%' + state: present + label: gpt + + - name: redeploy osd.2 using /dev/sdd1 + command: "ceph-volume --cluster {{ cluster }} lvm create --filestore --data /dev/sdd1 --journal /dev/sdd2 --osd-id 2" + environment: + CEPH_VOLUME_DEBUG: 1 + + # osd.0 lv + - name: zap test_group/data-lv1 + command: "ceph-volume --cluster {{ cluster }} lvm zap test_group/data-lv1" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: zap /dev/sdc1 + command: "ceph-volume --cluster {{ cluster }} lvm zap /dev/sdc1 --destroy" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: re-create partition /dev/sdc1 + parted: + device: /dev/sdc + number: 1 + part_start: 0% + part_end: 50% + unit: '%' + state: present + label: gpt + + - name: prepare osd.0 again using test_group/data-lv1 + command: "ceph-volume --cluster {{ cluster }} lvm prepare --filestore --data test_group/data-lv1 --journal /dev/sdc1 --osd-id 0" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: activate all to start the previously prepared osd.0 + command: "ceph-volume lvm activate --filestore --all" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: node inventory + command: "ceph-volume inventory" + environment: + CEPH_VOLUME_DEBUG: 1 + + - name: list all OSDs + command: "ceph-volume lvm list" + environment: + CEPH_VOLUME_DEBUG: 1 diff --git a/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/vagrant_variables.yml b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/vagrant_variables.yml new file mode 100644 index 00000000..82b330ef --- /dev/null +++ b/src/ceph-volume/ceph_volume/tests/functional/lvm/xenial/filestore/dmcrypt/vagrant_variables.yml @@ -0,0 +1,54 @@ +--- +# DEFINE THE NUMBER OF VMS TO RUN +mon_vms: 1 +osd_vms: 1 +mds_vms: 0 +rgw_vms: 0 +nfs_vms: 0 +rbd_mirror_vms: 0 +client_vms: 0 +iscsi_gw_vms: 0 +mgr_vms: 0 + +# SUBNETS TO USE FOR THE VMS +public_subnet: 192.168.3 +cluster_subnet: 192.168.4 + +# MEMORY +# set 1024 for CentOS +memory: 512 + +# Ethernet interface name +# use eth1 for libvirt and ubuntu precise, enp0s8 for CentOS and ubuntu xenial +eth: 'eth1' + +# VAGRANT BOX +# Ceph boxes are *strongly* suggested. They are under better control and will +# not get updated frequently unless required for build systems. These are (for +# now): +# +# * ceph/ubuntu-xenial +# +# Ubuntu: ceph/ubuntu-xenial bento/ubuntu-16.04 or ubuntu/trusty64 or ubuntu/wily64 +# CentOS: bento/centos-7.1 or puppetlabs/centos-7.0-64-puppet +# libvirt CentOS: centos/7 +# parallels Ubuntu: parallels/ubuntu-14.04 +# Debian: deb/jessie-amd64 - be careful the storage controller is named 'SATA Controller' +# For more boxes have a look at: +# - https://atlas.hashicorp.com/boxes/search?utf8=✓&sort=&provider=virtualbox&q= +# - https://download.gluster.org/pub/gluster/purpleidea/vagrant/ +vagrant_box: ceph/ubuntu-xenial +#ssh_private_key_path: "~/.ssh/id_rsa" +# The sync directory changes based on vagrant box +# Set to /home/vagrant/sync for Centos/7, /home/{ user }/vagrant for openstack and defaults to /vagrant +#vagrant_sync_dir: /home/vagrant/sync +#vagrant_sync_dir: / +# Disables synced folder creation. Not needed for testing, will skip mounting +# the vagrant directory on the remote box regardless of the provider. +vagrant_disable_synced_folder: true +# VAGRANT URL +# This is a URL to download an image from an alternate location. vagrant_box +# above should be set to the filename of the image. +# Fedora virtualbox: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box +# Fedora libvirt: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-libvirt.box +# vagrant_box_url: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Base-Vagrant-22-20150521.x86_64.vagrant-virtualbox.box |